1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
14 #include "mds_client.h"
16 #include <linux/ceph/decode.h>
19 * Ceph inode operations
21 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22 * setattr, etc.), xattr helpers, and helpers for assimilating
23 * metadata returned by the MDS into our cache.
25 * Also define helpers for doing asynchronous writeback, invalidation,
26 * and truncation for the benefit of those who can't afford to block
27 * (typically because they are in the message handler path).
30 static const struct inode_operations ceph_symlink_iops;
32 static void ceph_invalidate_work(struct work_struct *work);
33 static void ceph_writeback_work(struct work_struct *work);
34 static void ceph_vmtruncate_work(struct work_struct *work);
37 * find or create an inode, given the ceph ino number
39 static int ceph_set_ino_cb(struct inode *inode, void *data)
41 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
42 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
46 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
49 ino_t t = ceph_vino_to_ino(vino);
51 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
53 return ERR_PTR(-ENOMEM);
54 if (inode->i_state & I_NEW) {
55 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
56 inode, ceph_vinop(inode), (u64)inode->i_ino);
57 unlock_new_inode(inode);
60 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
66 * get/constuct snapdir inode for a given directory
68 struct inode *ceph_get_snapdir(struct inode *parent)
70 struct ceph_vino vino = {
71 .ino = ceph_ino(parent),
74 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
75 struct ceph_inode_info *ci = ceph_inode(inode);
77 BUG_ON(!S_ISDIR(parent->i_mode));
80 inode->i_mode = parent->i_mode;
81 inode->i_uid = parent->i_uid;
82 inode->i_gid = parent->i_gid;
83 inode->i_op = &ceph_dir_iops;
84 inode->i_fop = &ceph_dir_fops;
85 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
90 const struct inode_operations ceph_file_iops = {
91 .permission = ceph_permission,
92 .setattr = ceph_setattr,
93 .getattr = ceph_getattr,
94 .setxattr = ceph_setxattr,
95 .getxattr = ceph_getxattr,
96 .listxattr = ceph_listxattr,
97 .removexattr = ceph_removexattr,
102 * We use a 'frag tree' to keep track of the MDS's directory fragments
103 * for a given inode (usually there is just a single fragment). We
104 * need to know when a child frag is delegated to a new MDS, or when
105 * it is flagged as replicated, so we can direct our requests
110 * find/create a frag in the tree
112 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
116 struct rb_node *parent = NULL;
117 struct ceph_inode_frag *frag;
120 p = &ci->i_fragtree.rb_node;
123 frag = rb_entry(parent, struct ceph_inode_frag, node);
124 c = ceph_frag_compare(f, frag->frag);
133 frag = kmalloc(sizeof(*frag), GFP_NOFS);
135 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
136 "frag %x\n", &ci->vfs_inode,
137 ceph_vinop(&ci->vfs_inode), f);
138 return ERR_PTR(-ENOMEM);
145 rb_link_node(&frag->node, parent, p);
146 rb_insert_color(&frag->node, &ci->i_fragtree);
148 dout("get_or_create_frag added %llx.%llx frag %x\n",
149 ceph_vinop(&ci->vfs_inode), f);
154 * find a specific frag @f
156 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
158 struct rb_node *n = ci->i_fragtree.rb_node;
161 struct ceph_inode_frag *frag =
162 rb_entry(n, struct ceph_inode_frag, node);
163 int c = ceph_frag_compare(f, frag->frag);
175 * Choose frag containing the given value @v. If @pfrag is
176 * specified, copy the frag delegation info to the caller if
179 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
180 struct ceph_inode_frag *pfrag,
183 u32 t = ceph_frag_make(0, 0);
184 struct ceph_inode_frag *frag;
191 mutex_lock(&ci->i_fragtree_mutex);
193 WARN_ON(!ceph_frag_contains_value(t, v));
194 frag = __ceph_find_frag(ci, t);
196 break; /* t is a leaf */
197 if (frag->split_by == 0) {
199 memcpy(pfrag, frag, sizeof(*pfrag));
206 nway = 1 << frag->split_by;
207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
208 frag->split_by, nway);
209 for (i = 0; i < nway; i++) {
210 n = ceph_frag_make_child(t, frag->split_by, i);
211 if (ceph_frag_contains_value(n, v)) {
218 dout("choose_frag(%x) = %x\n", v, t);
220 mutex_unlock(&ci->i_fragtree_mutex);
225 * Process dirfrag (delegation) info from the mds. Include leaf
226 * fragment in tree ONLY if ndist > 0. Otherwise, only
227 * branches/splits are included in i_fragtree)
229 static int ceph_fill_dirfrag(struct inode *inode,
230 struct ceph_mds_reply_dirfrag *dirinfo)
232 struct ceph_inode_info *ci = ceph_inode(inode);
233 struct ceph_inode_frag *frag;
234 u32 id = le32_to_cpu(dirinfo->frag);
235 int mds = le32_to_cpu(dirinfo->auth);
236 int ndist = le32_to_cpu(dirinfo->ndist);
240 mutex_lock(&ci->i_fragtree_mutex);
242 /* no delegation info needed. */
243 frag = __ceph_find_frag(ci, id);
246 if (frag->split_by == 0) {
247 /* tree leaf, remove */
248 dout("fill_dirfrag removed %llx.%llx frag %x"
249 " (no ref)\n", ceph_vinop(inode), id);
250 rb_erase(&frag->node, &ci->i_fragtree);
253 /* tree branch, keep and clear */
254 dout("fill_dirfrag cleared %llx.%llx frag %x"
255 " referral\n", ceph_vinop(inode), id);
263 /* find/add this frag to store mds delegation info */
264 frag = __get_or_create_frag(ci, id);
266 /* this is not the end of the world; we can continue
267 with bad/inaccurate delegation info */
268 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
269 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
275 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
276 for (i = 0; i < frag->ndist; i++)
277 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
278 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
279 ceph_vinop(inode), frag->frag, frag->ndist);
282 mutex_unlock(&ci->i_fragtree_mutex);
288 * initialize a newly allocated inode.
290 struct inode *ceph_alloc_inode(struct super_block *sb)
292 struct ceph_inode_info *ci;
295 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
299 dout("alloc_inode %p\n", &ci->vfs_inode);
301 spin_lock_init(&ci->i_ceph_lock);
304 ci->i_time_warp_seq = 0;
305 ci->i_ceph_flags = 0;
306 atomic_set(&ci->i_release_count, 1);
307 atomic_set(&ci->i_complete_count, 0);
308 ci->i_symlink = NULL;
310 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
312 ci->i_fragtree = RB_ROOT;
313 mutex_init(&ci->i_fragtree_mutex);
315 ci->i_xattrs.blob = NULL;
316 ci->i_xattrs.prealloc_blob = NULL;
317 ci->i_xattrs.dirty = false;
318 ci->i_xattrs.index = RB_ROOT;
319 ci->i_xattrs.count = 0;
320 ci->i_xattrs.names_size = 0;
321 ci->i_xattrs.vals_size = 0;
322 ci->i_xattrs.version = 0;
323 ci->i_xattrs.index_version = 0;
325 ci->i_caps = RB_ROOT;
326 ci->i_auth_cap = NULL;
327 ci->i_dirty_caps = 0;
328 ci->i_flushing_caps = 0;
329 INIT_LIST_HEAD(&ci->i_dirty_item);
330 INIT_LIST_HEAD(&ci->i_flushing_item);
331 ci->i_cap_flush_seq = 0;
332 ci->i_cap_flush_last_tid = 0;
333 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
334 init_waitqueue_head(&ci->i_cap_wq);
335 ci->i_hold_caps_min = 0;
336 ci->i_hold_caps_max = 0;
337 INIT_LIST_HEAD(&ci->i_cap_delay_list);
338 ci->i_cap_exporting_mds = 0;
339 ci->i_cap_exporting_mseq = 0;
340 ci->i_cap_exporting_issued = 0;
341 INIT_LIST_HEAD(&ci->i_cap_snaps);
342 ci->i_head_snapc = NULL;
345 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
346 ci->i_nr_by_mode[i] = 0;
348 mutex_init(&ci->i_truncate_mutex);
349 ci->i_truncate_seq = 0;
350 ci->i_truncate_size = 0;
351 ci->i_truncate_pending = 0;
354 ci->i_reported_size = 0;
355 ci->i_wanted_max_size = 0;
356 ci->i_requested_max_size = 0;
360 ci->i_rdcache_ref = 0;
363 ci->i_wrbuffer_ref = 0;
364 ci->i_wrbuffer_ref_head = 0;
365 ci->i_shared_gen = 0;
366 ci->i_rdcache_gen = 0;
367 ci->i_rdcache_revoking = 0;
369 INIT_LIST_HEAD(&ci->i_unsafe_writes);
370 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
371 spin_lock_init(&ci->i_unsafe_lock);
373 ci->i_snap_realm = NULL;
374 INIT_LIST_HEAD(&ci->i_snap_realm_item);
375 INIT_LIST_HEAD(&ci->i_snap_flush_item);
377 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
378 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
380 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
382 ceph_fscache_inode_init(ci);
384 return &ci->vfs_inode;
387 static void ceph_i_callback(struct rcu_head *head)
389 struct inode *inode = container_of(head, struct inode, i_rcu);
390 struct ceph_inode_info *ci = ceph_inode(inode);
392 kmem_cache_free(ceph_inode_cachep, ci);
395 void ceph_destroy_inode(struct inode *inode)
397 struct ceph_inode_info *ci = ceph_inode(inode);
398 struct ceph_inode_frag *frag;
401 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
403 ceph_fscache_unregister_inode_cookie(ci);
405 ceph_queue_caps_release(inode);
408 * we may still have a snap_realm reference if there are stray
409 * caps in i_cap_exporting_issued or i_snap_caps.
411 if (ci->i_snap_realm) {
412 struct ceph_mds_client *mdsc =
413 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
414 struct ceph_snap_realm *realm = ci->i_snap_realm;
416 dout(" dropping residual ref to snap realm %p\n", realm);
417 spin_lock(&realm->inodes_with_caps_lock);
418 list_del_init(&ci->i_snap_realm_item);
419 spin_unlock(&realm->inodes_with_caps_lock);
420 ceph_put_snap_realm(mdsc, realm);
423 kfree(ci->i_symlink);
424 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
425 frag = rb_entry(n, struct ceph_inode_frag, node);
426 rb_erase(n, &ci->i_fragtree);
430 __ceph_destroy_xattrs(ci);
431 if (ci->i_xattrs.blob)
432 ceph_buffer_put(ci->i_xattrs.blob);
433 if (ci->i_xattrs.prealloc_blob)
434 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
436 call_rcu(&inode->i_rcu, ceph_i_callback);
440 * Helpers to fill in size, ctime, mtime, and atime. We have to be
441 * careful because either the client or MDS may have more up to date
442 * info, depending on which capabilities are held, and whether
443 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
444 * and size are monotonically increasing, except when utimes() or
445 * truncate() increments the corresponding _seq values.)
447 int ceph_fill_file_size(struct inode *inode, int issued,
448 u32 truncate_seq, u64 truncate_size, u64 size)
450 struct ceph_inode_info *ci = ceph_inode(inode);
453 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
454 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
455 dout("size %lld -> %llu\n", inode->i_size, size);
456 inode->i_size = size;
457 inode->i_blocks = (size + (1<<9) - 1) >> 9;
458 ci->i_reported_size = size;
459 if (truncate_seq != ci->i_truncate_seq) {
460 dout("truncate_seq %u -> %u\n",
461 ci->i_truncate_seq, truncate_seq);
462 ci->i_truncate_seq = truncate_seq;
464 /* the MDS should have revoked these caps */
465 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
468 CEPH_CAP_FILE_LAZYIO));
470 * If we hold relevant caps, or in the case where we're
471 * not the only client referencing this file and we
472 * don't hold those caps, then we need to check whether
473 * the file is either opened or mmaped
475 if ((issued & (CEPH_CAP_FILE_CACHE|
476 CEPH_CAP_FILE_BUFFER)) ||
477 mapping_mapped(inode->i_mapping) ||
478 __ceph_caps_file_wanted(ci)) {
479 ci->i_truncate_pending++;
484 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
485 ci->i_truncate_size != truncate_size) {
486 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
488 ci->i_truncate_size = truncate_size;
492 ceph_fscache_invalidate(inode);
497 void ceph_fill_file_time(struct inode *inode, int issued,
498 u64 time_warp_seq, struct timespec *ctime,
499 struct timespec *mtime, struct timespec *atime)
501 struct ceph_inode_info *ci = ceph_inode(inode);
504 if (issued & (CEPH_CAP_FILE_EXCL|
506 CEPH_CAP_FILE_BUFFER|
508 CEPH_CAP_XATTR_EXCL)) {
509 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
510 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
511 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
512 ctime->tv_sec, ctime->tv_nsec);
513 inode->i_ctime = *ctime;
515 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
516 /* the MDS did a utimes() */
517 dout("mtime %ld.%09ld -> %ld.%09ld "
519 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
520 mtime->tv_sec, mtime->tv_nsec,
521 ci->i_time_warp_seq, (int)time_warp_seq);
523 inode->i_mtime = *mtime;
524 inode->i_atime = *atime;
525 ci->i_time_warp_seq = time_warp_seq;
526 } else if (time_warp_seq == ci->i_time_warp_seq) {
527 /* nobody did utimes(); take the max */
528 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
529 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
530 inode->i_mtime.tv_sec,
531 inode->i_mtime.tv_nsec,
532 mtime->tv_sec, mtime->tv_nsec);
533 inode->i_mtime = *mtime;
535 if (timespec_compare(atime, &inode->i_atime) > 0) {
536 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
537 inode->i_atime.tv_sec,
538 inode->i_atime.tv_nsec,
539 atime->tv_sec, atime->tv_nsec);
540 inode->i_atime = *atime;
542 } else if (issued & CEPH_CAP_FILE_EXCL) {
543 /* we did a utimes(); ignore mds values */
548 /* we have no write|excl caps; whatever the MDS says is true */
549 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
550 inode->i_ctime = *ctime;
551 inode->i_mtime = *mtime;
552 inode->i_atime = *atime;
553 ci->i_time_warp_seq = time_warp_seq;
558 if (warn) /* time_warp_seq shouldn't go backwards */
559 dout("%p mds time_warp_seq %llu < %u\n",
560 inode, time_warp_seq, ci->i_time_warp_seq);
564 * Populate an inode based on info from mds. May be called on new or
567 static int fill_inode(struct inode *inode,
568 struct ceph_mds_reply_info_in *iinfo,
569 struct ceph_mds_reply_dirfrag *dirinfo,
570 struct ceph_mds_session *session,
571 unsigned long ttl_from, int cap_fmode,
572 struct ceph_cap_reservation *caps_reservation)
574 struct ceph_mds_reply_inode *info = iinfo->in;
575 struct ceph_inode_info *ci = ceph_inode(inode);
577 int issued = 0, implemented;
578 struct timespec mtime, atime, ctime;
580 struct ceph_inode_frag *frag;
581 struct rb_node *rb_node;
582 struct ceph_buffer *xattr_blob = NULL;
586 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
587 inode, ceph_vinop(inode), le64_to_cpu(info->version),
591 * prealloc xattr data, if it looks like we'll need it. only
592 * if len > 4 (meaning there are actually xattrs; the first 4
593 * bytes are the xattr count).
595 if (iinfo->xattr_len > 4) {
596 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
598 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
602 spin_lock(&ci->i_ceph_lock);
605 * provided version will be odd if inode value is projected,
606 * even if stable. skip the update if we have newer stable
607 * info (ours>=theirs, e.g. due to racing mds replies), unless
608 * we are getting projected (unstable) info (in which case the
609 * version is odd, and we want ours>theirs).
615 if (le64_to_cpu(info->version) > 0 &&
616 (ci->i_version & ~1) >= le64_to_cpu(info->version))
619 issued = __ceph_caps_issued(ci, &implemented);
620 issued |= implemented | __ceph_caps_dirty(ci);
623 ci->i_version = le64_to_cpu(info->version);
625 inode->i_rdev = le32_to_cpu(info->rdev);
627 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
628 inode->i_mode = le32_to_cpu(info->mode);
629 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
630 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
631 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
632 from_kuid(&init_user_ns, inode->i_uid),
633 from_kgid(&init_user_ns, inode->i_gid));
636 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
637 set_nlink(inode, le32_to_cpu(info->nlink));
639 /* be careful with mtime, atime, size */
640 ceph_decode_timespec(&atime, &info->atime);
641 ceph_decode_timespec(&mtime, &info->mtime);
642 ceph_decode_timespec(&ctime, &info->ctime);
643 queue_trunc = ceph_fill_file_size(inode, issued,
644 le32_to_cpu(info->truncate_seq),
645 le64_to_cpu(info->truncate_size),
646 le64_to_cpu(info->size));
647 ceph_fill_file_time(inode, issued,
648 le32_to_cpu(info->time_warp_seq),
649 &ctime, &mtime, &atime);
651 /* only update max_size on auth cap */
652 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
653 ci->i_max_size != le64_to_cpu(info->max_size)) {
654 dout("max_size %lld -> %llu\n", ci->i_max_size,
655 le64_to_cpu(info->max_size));
656 ci->i_max_size = le64_to_cpu(info->max_size);
659 ci->i_layout = info->layout;
660 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
663 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
664 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
665 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
666 if (ci->i_xattrs.blob)
667 ceph_buffer_put(ci->i_xattrs.blob);
668 ci->i_xattrs.blob = xattr_blob;
670 memcpy(ci->i_xattrs.blob->vec.iov_base,
671 iinfo->xattr_data, iinfo->xattr_len);
672 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
676 inode->i_mapping->a_ops = &ceph_aops;
677 inode->i_mapping->backing_dev_info =
678 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
680 switch (inode->i_mode & S_IFMT) {
685 init_special_inode(inode, inode->i_mode, inode->i_rdev);
686 inode->i_op = &ceph_file_iops;
689 inode->i_op = &ceph_file_iops;
690 inode->i_fop = &ceph_file_fops;
693 inode->i_op = &ceph_symlink_iops;
694 if (!ci->i_symlink) {
695 u32 symlen = iinfo->symlink_len;
698 spin_unlock(&ci->i_ceph_lock);
701 if (WARN_ON(symlen != inode->i_size))
705 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
709 spin_lock(&ci->i_ceph_lock);
713 kfree(sym); /* lost a race */
717 inode->i_op = &ceph_dir_iops;
718 inode->i_fop = &ceph_dir_fops;
720 ci->i_dir_layout = iinfo->dir_layout;
722 ci->i_files = le64_to_cpu(info->files);
723 ci->i_subdirs = le64_to_cpu(info->subdirs);
724 ci->i_rbytes = le64_to_cpu(info->rbytes);
725 ci->i_rfiles = le64_to_cpu(info->rfiles);
726 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
727 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
730 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
731 ceph_vinop(inode), inode->i_mode);
734 /* set dir completion flag? */
735 if (S_ISDIR(inode->i_mode) &&
736 ci->i_files == 0 && ci->i_subdirs == 0 &&
737 ceph_snap(inode) == CEPH_NOSNAP &&
738 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
739 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
740 !__ceph_dir_is_complete(ci)) {
741 dout(" marking %p complete (empty)\n", inode);
742 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
743 ci->i_max_offset = 2;
746 spin_unlock(&ci->i_ceph_lock);
748 /* queue truncate if we saw i_size decrease */
750 ceph_queue_vmtruncate(inode);
752 /* populate frag tree */
753 /* FIXME: move me up, if/when version reflects fragtree changes */
754 nsplits = le32_to_cpu(info->fragtree.nsplits);
755 mutex_lock(&ci->i_fragtree_mutex);
756 rb_node = rb_first(&ci->i_fragtree);
757 for (i = 0; i < nsplits; i++) {
758 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
761 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
762 if (ceph_frag_compare(frag->frag, id) >= 0) {
763 if (frag->frag != id)
766 rb_node = rb_next(rb_node);
769 rb_node = rb_next(rb_node);
770 rb_erase(&frag->node, &ci->i_fragtree);
775 frag = __get_or_create_frag(ci, id);
779 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
780 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
783 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
784 rb_node = rb_next(rb_node);
785 rb_erase(&frag->node, &ci->i_fragtree);
788 mutex_unlock(&ci->i_fragtree_mutex);
790 /* were we issued a capability? */
791 if (info->cap.caps) {
792 if (ceph_snap(inode) == CEPH_NOSNAP) {
793 ceph_add_cap(inode, session,
794 le64_to_cpu(info->cap.cap_id),
796 le32_to_cpu(info->cap.caps),
797 le32_to_cpu(info->cap.wanted),
798 le32_to_cpu(info->cap.seq),
799 le32_to_cpu(info->cap.mseq),
800 le64_to_cpu(info->cap.realm),
804 spin_lock(&ci->i_ceph_lock);
805 dout(" %p got snap_caps %s\n", inode,
806 ceph_cap_string(le32_to_cpu(info->cap.caps)));
807 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
809 __ceph_get_fmode(ci, cap_fmode);
810 spin_unlock(&ci->i_ceph_lock);
812 } else if (cap_fmode >= 0) {
813 pr_warning("mds issued no caps on %llx.%llx\n",
815 __ceph_get_fmode(ci, cap_fmode);
818 /* update delegation info? */
820 ceph_fill_dirfrag(inode, dirinfo);
826 ceph_buffer_put(xattr_blob);
831 * caller should hold session s_mutex.
833 static void update_dentry_lease(struct dentry *dentry,
834 struct ceph_mds_reply_lease *lease,
835 struct ceph_mds_session *session,
836 unsigned long from_time)
838 struct ceph_dentry_info *di = ceph_dentry(dentry);
839 long unsigned duration = le32_to_cpu(lease->duration_ms);
840 long unsigned ttl = from_time + (duration * HZ) / 1000;
841 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
844 /* only track leases on regular dentries */
845 if (dentry->d_op != &ceph_dentry_ops)
848 spin_lock(&dentry->d_lock);
849 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
850 dentry, duration, ttl);
852 /* make lease_rdcache_gen match directory */
853 dir = dentry->d_parent->d_inode;
854 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
859 if (di->lease_gen == session->s_cap_gen &&
860 time_before(ttl, dentry->d_time))
861 goto out_unlock; /* we already have a newer lease. */
863 if (di->lease_session && di->lease_session != session)
866 ceph_dentry_lru_touch(dentry);
868 if (!di->lease_session)
869 di->lease_session = ceph_get_mds_session(session);
870 di->lease_gen = session->s_cap_gen;
871 di->lease_seq = le32_to_cpu(lease->seq);
872 di->lease_renew_after = half_ttl;
873 di->lease_renew_from = 0;
874 dentry->d_time = ttl;
876 spin_unlock(&dentry->d_lock);
881 * Set dentry's directory position based on the current dir's max, and
882 * order it in d_subdirs, so that dcache_readdir behaves.
884 * Always called under directory's i_mutex.
886 static void ceph_set_dentry_offset(struct dentry *dn)
888 struct dentry *dir = dn->d_parent;
889 struct inode *inode = dir->d_inode;
890 struct ceph_inode_info *ci;
891 struct ceph_dentry_info *di;
895 ci = ceph_inode(inode);
896 di = ceph_dentry(dn);
898 spin_lock(&ci->i_ceph_lock);
899 if (!__ceph_dir_is_complete(ci)) {
900 spin_unlock(&ci->i_ceph_lock);
903 di->offset = ceph_inode(inode)->i_max_offset++;
904 spin_unlock(&ci->i_ceph_lock);
906 spin_lock(&dir->d_lock);
907 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
908 list_move(&dn->d_u.d_child, &dir->d_subdirs);
909 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
910 dn->d_u.d_child.prev, dn->d_u.d_child.next);
911 spin_unlock(&dn->d_lock);
912 spin_unlock(&dir->d_lock);
916 * splice a dentry to an inode.
917 * caller must hold directory i_mutex for this to be safe.
919 * we will only rehash the resulting dentry if @prehash is
920 * true; @prehash will be set to false (for the benefit of
921 * the caller) if we fail.
923 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
924 bool *prehash, bool set_offset)
926 struct dentry *realdn;
930 /* dn must be unhashed */
933 realdn = d_materialise_unique(dn, in);
934 if (IS_ERR(realdn)) {
935 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
936 PTR_ERR(realdn), dn, in, ceph_vinop(in));
938 *prehash = false; /* don't rehash on error */
939 dn = realdn; /* note realdn contains the error */
942 dout("dn %p (%d) spliced with %p (%d) "
943 "inode %p ino %llx.%llx\n",
945 realdn, d_count(realdn),
946 realdn->d_inode, ceph_vinop(realdn->d_inode));
950 BUG_ON(!ceph_dentry(dn));
951 dout("dn %p attached to %p ino %llx.%llx\n",
952 dn, dn->d_inode, ceph_vinop(dn->d_inode));
954 if ((!prehash || *prehash) && d_unhashed(dn))
957 ceph_set_dentry_offset(dn);
963 * Incorporate results into the local cache. This is either just
964 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
967 * A reply may contain
968 * a directory inode along with a dentry.
969 * and/or a target inode
971 * Called with snap_rwsem (read).
973 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
974 struct ceph_mds_session *session)
976 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
977 struct inode *in = NULL;
978 struct ceph_mds_reply_inode *ininfo;
979 struct ceph_vino vino;
980 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
983 dout("fill_trace %p is_dentry %d is_target %d\n", req,
984 rinfo->head->is_dentry, rinfo->head->is_target);
990 * If we resend completed ops to a recovering mds, we get no
991 * trace. Since that is very rare, pretend this is the case
992 * to ensure the 'no trace' handlers in the callers behave.
994 * Fill in inodes unconditionally to avoid breaking cap
997 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
998 pr_info("fill_trace faking empty trace on %lld %s\n",
999 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1000 if (rinfo->head->is_dentry) {
1001 rinfo->head->is_dentry = 0;
1002 err = fill_inode(req->r_locked_dir,
1003 &rinfo->diri, rinfo->dirfrag,
1004 session, req->r_request_started, -1);
1006 if (rinfo->head->is_target) {
1007 rinfo->head->is_target = 0;
1008 ininfo = rinfo->targeti.in;
1009 vino.ino = le64_to_cpu(ininfo->ino);
1010 vino.snap = le64_to_cpu(ininfo->snapid);
1011 in = ceph_get_inode(sb, vino);
1012 err = fill_inode(in, &rinfo->targeti, NULL,
1013 session, req->r_request_started,
1020 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1021 dout("fill_trace reply is empty!\n");
1022 if (rinfo->head->result == 0 && req->r_locked_dir)
1023 ceph_invalidate_dir_request(req);
1027 if (rinfo->head->is_dentry) {
1028 struct inode *dir = req->r_locked_dir;
1031 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
1032 session, req->r_request_started, -1,
1033 &req->r_caps_reservation);
1041 if (rinfo->head->is_target) {
1042 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1043 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1045 in = ceph_get_inode(sb, vino);
1050 req->r_target_inode = in;
1052 err = fill_inode(in, &rinfo->targeti, NULL,
1053 session, req->r_request_started,
1054 (le32_to_cpu(rinfo->head->result) == 0) ?
1056 &req->r_caps_reservation);
1058 pr_err("fill_inode badness %p %llx.%llx\n",
1059 in, ceph_vinop(in));
1065 * ignore null lease/binding on snapdir ENOENT, or else we
1066 * will have trouble splicing in the virtual snapdir later
1068 if (rinfo->head->is_dentry && !req->r_aborted &&
1069 req->r_locked_dir &&
1070 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1071 fsc->mount_options->snapdir_name,
1072 req->r_dentry->d_name.len))) {
1074 * lookup link rename : null -> possibly existing inode
1075 * mknod symlink mkdir : null -> new inode
1076 * unlink : linked -> null
1078 struct inode *dir = req->r_locked_dir;
1079 struct dentry *dn = req->r_dentry;
1080 bool have_dir_cap, have_lease;
1084 BUG_ON(dn->d_parent->d_inode != dir);
1085 BUG_ON(ceph_ino(dir) !=
1086 le64_to_cpu(rinfo->diri.in->ino));
1087 BUG_ON(ceph_snap(dir) !=
1088 le64_to_cpu(rinfo->diri.in->snapid));
1090 /* do we have a lease on the whole dir? */
1092 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1093 CEPH_CAP_FILE_SHARED);
1095 /* do we have a dn lease? */
1096 have_lease = have_dir_cap ||
1097 le32_to_cpu(rinfo->dlease->duration_ms);
1099 dout("fill_trace no dentry lease or dir cap\n");
1102 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1103 dout(" src %p '%.*s' dst %p '%.*s'\n",
1105 req->r_old_dentry->d_name.len,
1106 req->r_old_dentry->d_name.name,
1107 dn, dn->d_name.len, dn->d_name.name);
1108 dout("fill_trace doing d_move %p -> %p\n",
1109 req->r_old_dentry, dn);
1111 d_move(req->r_old_dentry, dn);
1112 dout(" src %p '%.*s' dst %p '%.*s'\n",
1114 req->r_old_dentry->d_name.len,
1115 req->r_old_dentry->d_name.name,
1116 dn, dn->d_name.len, dn->d_name.name);
1118 /* ensure target dentry is invalidated, despite
1119 rehashing bug in vfs_rename_dir */
1120 ceph_invalidate_dentry_lease(dn);
1123 * d_move() puts the renamed dentry at the end of
1124 * d_subdirs. We need to assign it an appropriate
1125 * directory offset so we can behave when dir is
1128 ceph_set_dentry_offset(req->r_old_dentry);
1129 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1130 ceph_dentry(req->r_old_dentry)->offset);
1132 dn = req->r_old_dentry; /* use old_dentry */
1136 if (!rinfo->head->is_target) {
1137 dout("fill_trace null dentry\n");
1139 dout("d_delete %p\n", dn);
1142 dout("d_instantiate %p NULL\n", dn);
1143 d_instantiate(dn, NULL);
1144 if (have_lease && d_unhashed(dn))
1146 update_dentry_lease(dn, rinfo->dlease,
1148 req->r_request_started);
1153 /* attach proper inode */
1156 dn = splice_dentry(dn, in, &have_lease, true);
1161 req->r_dentry = dn; /* may have spliced */
1162 } else if (dn->d_inode && dn->d_inode != in) {
1163 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1164 dn, dn->d_inode, ceph_vinop(dn->d_inode),
1170 update_dentry_lease(dn, rinfo->dlease, session,
1171 req->r_request_started);
1172 dout(" final dn %p\n", dn);
1173 } else if (!req->r_aborted &&
1174 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1175 req->r_op == CEPH_MDS_OP_MKSNAP)) {
1176 struct dentry *dn = req->r_dentry;
1178 /* fill out a snapdir LOOKUPSNAP dentry */
1180 BUG_ON(!req->r_locked_dir);
1181 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1182 ininfo = rinfo->targeti.in;
1183 vino.ino = le64_to_cpu(ininfo->ino);
1184 vino.snap = le64_to_cpu(ininfo->snapid);
1185 dout(" linking snapped dir %p to dn %p\n", in, dn);
1187 dn = splice_dentry(dn, in, NULL, true);
1192 req->r_dentry = dn; /* may have spliced */
1195 dout("fill_trace done err=%d\n", err);
1200 * Prepopulate our cache with readdir results, leases, etc.
1202 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1203 struct ceph_mds_session *session)
1205 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1208 for (i = 0; i < rinfo->dir_nr; i++) {
1209 struct ceph_vino vino;
1213 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1214 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1216 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1219 dout("new_inode badness got %d\n", err);
1222 rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
1223 req->r_request_started, -1,
1224 &req->r_caps_reservation);
1226 pr_err("fill_inode badness on %p got %d\n", in, rc);
1235 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1236 struct ceph_mds_session *session)
1238 struct dentry *parent = req->r_dentry;
1239 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1243 int err = 0, ret, i;
1244 struct inode *snapdir = NULL;
1245 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1246 struct ceph_dentry_info *di;
1247 u64 r_readdir_offset = req->r_readdir_offset;
1248 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1250 if (rinfo->dir_dir &&
1251 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1252 dout("readdir_prepopulate got new frag %x -> %x\n",
1253 frag, le32_to_cpu(rinfo->dir_dir->frag));
1254 frag = le32_to_cpu(rinfo->dir_dir->frag);
1255 if (ceph_frag_is_leftmost(frag))
1256 r_readdir_offset = 2;
1258 r_readdir_offset = 0;
1262 return readdir_prepopulate_inodes_only(req, session);
1264 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1265 snapdir = ceph_get_snapdir(parent->d_inode);
1266 parent = d_find_alias(snapdir);
1267 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1268 rinfo->dir_nr, parent);
1270 dout("readdir_prepopulate %d items under dn %p\n",
1271 rinfo->dir_nr, parent);
1273 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1276 /* FIXME: release caps/leases if error occurs */
1277 for (i = 0; i < rinfo->dir_nr; i++) {
1278 struct ceph_vino vino;
1280 dname.name = rinfo->dir_dname[i];
1281 dname.len = rinfo->dir_dname_len[i];
1282 dname.hash = full_name_hash(dname.name, dname.len);
1284 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1285 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1288 dn = d_lookup(parent, &dname);
1289 dout("d_lookup on parent=%p name=%.*s got %p\n",
1290 parent, dname.len, dname.name, dn);
1293 dn = d_alloc(parent, &dname);
1294 dout("d_alloc %p '%.*s' = %p\n", parent,
1295 dname.len, dname.name, dn);
1297 dout("d_alloc badness\n");
1301 ret = ceph_init_dentry(dn);
1307 } else if (dn->d_inode &&
1308 (ceph_ino(dn->d_inode) != vino.ino ||
1309 ceph_snap(dn->d_inode) != vino.snap)) {
1310 dout(" dn %p points to wrong inode %p\n",
1316 /* reorder parent's d_subdirs */
1317 spin_lock(&parent->d_lock);
1318 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1319 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1320 spin_unlock(&dn->d_lock);
1321 spin_unlock(&parent->d_lock);
1328 in = ceph_get_inode(parent->d_sb, vino);
1330 dout("new_inode badness\n");
1338 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1339 req->r_request_started, -1,
1340 &req->r_caps_reservation) < 0) {
1341 pr_err("fill_inode badness on %p\n", in);
1349 dn = splice_dentry(dn, in, NULL, false);
1358 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1360 update_dentry_lease(dn, rinfo->dir_dlease[i],
1362 req->r_request_started);
1368 req->r_did_prepopulate = true;
1375 dout("readdir_prepopulate done\n");
1379 int ceph_inode_set_size(struct inode *inode, loff_t size)
1381 struct ceph_inode_info *ci = ceph_inode(inode);
1384 spin_lock(&ci->i_ceph_lock);
1385 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1386 inode->i_size = size;
1387 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1389 /* tell the MDS if we are approaching max_size */
1390 if ((size << 1) >= ci->i_max_size &&
1391 (ci->i_reported_size << 1) < ci->i_max_size)
1394 spin_unlock(&ci->i_ceph_lock);
1399 * Write back inode data in a worker thread. (This can't be done
1400 * in the message handler context.)
1402 void ceph_queue_writeback(struct inode *inode)
1405 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1406 &ceph_inode(inode)->i_wb_work)) {
1407 dout("ceph_queue_writeback %p\n", inode);
1409 dout("ceph_queue_writeback %p failed\n", inode);
1414 static void ceph_writeback_work(struct work_struct *work)
1416 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1418 struct inode *inode = &ci->vfs_inode;
1420 dout("writeback %p\n", inode);
1421 filemap_fdatawrite(&inode->i_data);
1426 * queue an async invalidation
1428 void ceph_queue_invalidate(struct inode *inode)
1431 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1432 &ceph_inode(inode)->i_pg_inv_work)) {
1433 dout("ceph_queue_invalidate %p\n", inode);
1435 dout("ceph_queue_invalidate %p failed\n", inode);
1441 * Invalidate inode pages in a worker thread. (This can't be done
1442 * in the message handler context.)
1444 static void ceph_invalidate_work(struct work_struct *work)
1446 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1448 struct inode *inode = &ci->vfs_inode;
1452 mutex_lock(&ci->i_truncate_mutex);
1453 spin_lock(&ci->i_ceph_lock);
1454 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1455 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1456 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1458 spin_unlock(&ci->i_ceph_lock);
1459 mutex_unlock(&ci->i_truncate_mutex);
1462 orig_gen = ci->i_rdcache_gen;
1463 spin_unlock(&ci->i_ceph_lock);
1465 truncate_inode_pages(inode->i_mapping, 0);
1467 spin_lock(&ci->i_ceph_lock);
1468 if (orig_gen == ci->i_rdcache_gen &&
1469 orig_gen == ci->i_rdcache_revoking) {
1470 dout("invalidate_pages %p gen %d successful\n", inode,
1472 ci->i_rdcache_revoking--;
1475 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1476 inode, orig_gen, ci->i_rdcache_gen,
1477 ci->i_rdcache_revoking);
1479 spin_unlock(&ci->i_ceph_lock);
1480 mutex_unlock(&ci->i_truncate_mutex);
1483 ceph_check_caps(ci, 0, NULL);
1490 * called by trunc_wq;
1492 * We also truncate in a separate thread as well.
1494 static void ceph_vmtruncate_work(struct work_struct *work)
1496 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1498 struct inode *inode = &ci->vfs_inode;
1500 dout("vmtruncate_work %p\n", inode);
1501 __ceph_do_pending_vmtruncate(inode);
1506 * Queue an async vmtruncate. If we fail to queue work, we will handle
1507 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1509 void ceph_queue_vmtruncate(struct inode *inode)
1511 struct ceph_inode_info *ci = ceph_inode(inode);
1515 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1516 &ci->i_vmtruncate_work)) {
1517 dout("ceph_queue_vmtruncate %p\n", inode);
1519 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1520 inode, ci->i_truncate_pending);
1526 * Make sure any pending truncation is applied before doing anything
1527 * that may depend on it.
1529 void __ceph_do_pending_vmtruncate(struct inode *inode)
1531 struct ceph_inode_info *ci = ceph_inode(inode);
1533 int wrbuffer_refs, finish = 0;
1535 mutex_lock(&ci->i_truncate_mutex);
1537 spin_lock(&ci->i_ceph_lock);
1538 if (ci->i_truncate_pending == 0) {
1539 dout("__do_pending_vmtruncate %p none pending\n", inode);
1540 spin_unlock(&ci->i_ceph_lock);
1541 mutex_unlock(&ci->i_truncate_mutex);
1546 * make sure any dirty snapped pages are flushed before we
1547 * possibly truncate them.. so write AND block!
1549 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1550 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1552 spin_unlock(&ci->i_ceph_lock);
1553 filemap_write_and_wait_range(&inode->i_data, 0,
1554 inode->i_sb->s_maxbytes);
1558 /* there should be no reader or writer */
1559 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1561 to = ci->i_truncate_size;
1562 wrbuffer_refs = ci->i_wrbuffer_ref;
1563 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1564 ci->i_truncate_pending, to);
1565 spin_unlock(&ci->i_ceph_lock);
1567 truncate_inode_pages(inode->i_mapping, to);
1569 spin_lock(&ci->i_ceph_lock);
1570 if (to == ci->i_truncate_size) {
1571 ci->i_truncate_pending = 0;
1574 spin_unlock(&ci->i_ceph_lock);
1578 mutex_unlock(&ci->i_truncate_mutex);
1580 if (wrbuffer_refs == 0)
1581 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1583 wake_up_all(&ci->i_cap_wq);
1589 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1591 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1592 nd_set_link(nd, ci->i_symlink);
1596 static const struct inode_operations ceph_symlink_iops = {
1597 .readlink = generic_readlink,
1598 .follow_link = ceph_sym_follow_link,
1599 .setattr = ceph_setattr,
1600 .getattr = ceph_getattr,
1601 .setxattr = ceph_setxattr,
1602 .getxattr = ceph_getxattr,
1603 .listxattr = ceph_listxattr,
1604 .removexattr = ceph_removexattr,
1610 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1612 struct inode *inode = dentry->d_inode;
1613 struct ceph_inode_info *ci = ceph_inode(inode);
1614 struct inode *parent_inode;
1615 const unsigned int ia_valid = attr->ia_valid;
1616 struct ceph_mds_request *req;
1617 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1619 int release = 0, dirtied = 0;
1622 int inode_dirty_flags = 0;
1624 if (ceph_snap(inode) != CEPH_NOSNAP)
1627 err = inode_change_ok(inode, attr);
1631 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1634 return PTR_ERR(req);
1636 spin_lock(&ci->i_ceph_lock);
1637 issued = __ceph_caps_issued(ci, NULL);
1638 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1640 if (ia_valid & ATTR_UID) {
1641 dout("setattr %p uid %d -> %d\n", inode,
1642 from_kuid(&init_user_ns, inode->i_uid),
1643 from_kuid(&init_user_ns, attr->ia_uid));
1644 if (issued & CEPH_CAP_AUTH_EXCL) {
1645 inode->i_uid = attr->ia_uid;
1646 dirtied |= CEPH_CAP_AUTH_EXCL;
1647 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1648 !uid_eq(attr->ia_uid, inode->i_uid)) {
1649 req->r_args.setattr.uid = cpu_to_le32(
1650 from_kuid(&init_user_ns, attr->ia_uid));
1651 mask |= CEPH_SETATTR_UID;
1652 release |= CEPH_CAP_AUTH_SHARED;
1655 if (ia_valid & ATTR_GID) {
1656 dout("setattr %p gid %d -> %d\n", inode,
1657 from_kgid(&init_user_ns, inode->i_gid),
1658 from_kgid(&init_user_ns, attr->ia_gid));
1659 if (issued & CEPH_CAP_AUTH_EXCL) {
1660 inode->i_gid = attr->ia_gid;
1661 dirtied |= CEPH_CAP_AUTH_EXCL;
1662 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1663 !gid_eq(attr->ia_gid, inode->i_gid)) {
1664 req->r_args.setattr.gid = cpu_to_le32(
1665 from_kgid(&init_user_ns, attr->ia_gid));
1666 mask |= CEPH_SETATTR_GID;
1667 release |= CEPH_CAP_AUTH_SHARED;
1670 if (ia_valid & ATTR_MODE) {
1671 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1673 if (issued & CEPH_CAP_AUTH_EXCL) {
1674 inode->i_mode = attr->ia_mode;
1675 dirtied |= CEPH_CAP_AUTH_EXCL;
1676 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1677 attr->ia_mode != inode->i_mode) {
1678 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1679 mask |= CEPH_SETATTR_MODE;
1680 release |= CEPH_CAP_AUTH_SHARED;
1684 if (ia_valid & ATTR_ATIME) {
1685 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1686 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1687 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1688 if (issued & CEPH_CAP_FILE_EXCL) {
1689 ci->i_time_warp_seq++;
1690 inode->i_atime = attr->ia_atime;
1691 dirtied |= CEPH_CAP_FILE_EXCL;
1692 } else if ((issued & CEPH_CAP_FILE_WR) &&
1693 timespec_compare(&inode->i_atime,
1694 &attr->ia_atime) < 0) {
1695 inode->i_atime = attr->ia_atime;
1696 dirtied |= CEPH_CAP_FILE_WR;
1697 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1698 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1699 ceph_encode_timespec(&req->r_args.setattr.atime,
1701 mask |= CEPH_SETATTR_ATIME;
1702 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1706 if (ia_valid & ATTR_MTIME) {
1707 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1708 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1709 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1710 if (issued & CEPH_CAP_FILE_EXCL) {
1711 ci->i_time_warp_seq++;
1712 inode->i_mtime = attr->ia_mtime;
1713 dirtied |= CEPH_CAP_FILE_EXCL;
1714 } else if ((issued & CEPH_CAP_FILE_WR) &&
1715 timespec_compare(&inode->i_mtime,
1716 &attr->ia_mtime) < 0) {
1717 inode->i_mtime = attr->ia_mtime;
1718 dirtied |= CEPH_CAP_FILE_WR;
1719 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1720 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1721 ceph_encode_timespec(&req->r_args.setattr.mtime,
1723 mask |= CEPH_SETATTR_MTIME;
1724 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1728 if (ia_valid & ATTR_SIZE) {
1729 dout("setattr %p size %lld -> %lld\n", inode,
1730 inode->i_size, attr->ia_size);
1731 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1735 if ((issued & CEPH_CAP_FILE_EXCL) &&
1736 attr->ia_size > inode->i_size) {
1737 inode->i_size = attr->ia_size;
1739 (attr->ia_size + (1 << 9) - 1) >> 9;
1740 inode->i_ctime = attr->ia_ctime;
1741 ci->i_reported_size = attr->ia_size;
1742 dirtied |= CEPH_CAP_FILE_EXCL;
1743 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1744 attr->ia_size != inode->i_size) {
1745 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1746 req->r_args.setattr.old_size =
1747 cpu_to_le64(inode->i_size);
1748 mask |= CEPH_SETATTR_SIZE;
1749 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1754 /* these do nothing */
1755 if (ia_valid & ATTR_CTIME) {
1756 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1757 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1758 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1759 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1760 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1761 only ? "ctime only" : "ignored");
1762 inode->i_ctime = attr->ia_ctime;
1765 * if kernel wants to dirty ctime but nothing else,
1766 * we need to choose a cap to dirty under, or do
1767 * a almost-no-op setattr
1769 if (issued & CEPH_CAP_AUTH_EXCL)
1770 dirtied |= CEPH_CAP_AUTH_EXCL;
1771 else if (issued & CEPH_CAP_FILE_EXCL)
1772 dirtied |= CEPH_CAP_FILE_EXCL;
1773 else if (issued & CEPH_CAP_XATTR_EXCL)
1774 dirtied |= CEPH_CAP_XATTR_EXCL;
1776 mask |= CEPH_SETATTR_CTIME;
1779 if (ia_valid & ATTR_FILE)
1780 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1783 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
1784 inode->i_ctime = CURRENT_TIME;
1788 spin_unlock(&ci->i_ceph_lock);
1790 if (inode_dirty_flags)
1791 __mark_inode_dirty(inode, inode_dirty_flags);
1794 req->r_inode = inode;
1796 req->r_inode_drop = release;
1797 req->r_args.setattr.mask = cpu_to_le32(mask);
1798 req->r_num_caps = 1;
1799 parent_inode = ceph_get_dentry_parent_inode(dentry);
1800 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1803 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1804 ceph_cap_string(dirtied), mask);
1806 ceph_mdsc_put_request(req);
1807 if (mask & CEPH_SETATTR_SIZE)
1808 __ceph_do_pending_vmtruncate(inode);
1811 spin_unlock(&ci->i_ceph_lock);
1812 ceph_mdsc_put_request(req);
1817 * Verify that we have a lease on the given mask. If not,
1818 * do a getattr against an mds.
1820 int ceph_do_getattr(struct inode *inode, int mask)
1822 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1823 struct ceph_mds_client *mdsc = fsc->mdsc;
1824 struct ceph_mds_request *req;
1827 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1828 dout("do_getattr inode %p SNAPDIR\n", inode);
1832 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
1833 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1836 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1838 return PTR_ERR(req);
1839 req->r_inode = inode;
1841 req->r_num_caps = 1;
1842 req->r_args.getattr.mask = cpu_to_le32(mask);
1843 err = ceph_mdsc_do_request(mdsc, NULL, req);
1844 ceph_mdsc_put_request(req);
1845 dout("do_getattr result=%d\n", err);
1851 * Check inode permissions. We verify we have a valid value for
1852 * the AUTH cap, then call the generic handler.
1854 int ceph_permission(struct inode *inode, int mask)
1858 if (mask & MAY_NOT_BLOCK)
1861 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1864 err = generic_permission(inode, mask);
1869 * Get all attributes. Hopefully somedata we'll have a statlite()
1870 * and can limit the fields we require to be accurate.
1872 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1875 struct inode *inode = dentry->d_inode;
1876 struct ceph_inode_info *ci = ceph_inode(inode);
1879 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1881 generic_fillattr(inode, stat);
1882 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
1883 if (ceph_snap(inode) != CEPH_NOSNAP)
1884 stat->dev = ceph_snap(inode);
1887 if (S_ISDIR(inode->i_mode)) {
1888 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1890 stat->size = ci->i_rbytes;
1892 stat->size = ci->i_files + ci->i_subdirs;
1894 stat->blksize = 65536;