1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
38 #include <cluster/masklog.h>
41 #include "ocfs2_lockingver.h"
46 #include "extent_map.h"
48 #include "heartbeat.h"
51 #include "stackglue.h"
56 #include "refcounttree.h"
58 #include "buffer_head_io.h"
60 struct ocfs2_mask_waiter {
61 struct list_head mw_item;
63 struct completion mw_complete;
64 unsigned long mw_mask;
65 unsigned long mw_goal;
66 #ifdef CONFIG_OCFS2_FS_STATS
67 ktime_t mw_lock_start;
71 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
72 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
73 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
74 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
77 * Return value from ->downconvert_worker functions.
79 * These control the precise actions of ocfs2_unblock_lock()
80 * and ocfs2_process_blocked_lock()
83 enum ocfs2_unblock_action {
84 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
85 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
86 * ->post_unlock callback */
87 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
88 * ->post_unlock() callback. */
91 struct ocfs2_unblock_ctl {
93 enum ocfs2_unblock_action unblock_action;
96 /* Lockdep class keys */
97 struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
99 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
101 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
103 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
106 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
109 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
110 struct ocfs2_lock_res *lockres);
112 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
114 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
116 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
119 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
121 /* This aids in debugging situations where a bad LVB might be involved. */
122 static void ocfs2_dump_meta_lvb_info(u64 level,
123 const char *function,
125 struct ocfs2_lock_res *lockres)
127 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
129 mlog(level, "LVB information for %s (called from %s:%u):\n",
130 lockres->l_name, function, line);
131 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
132 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
133 be32_to_cpu(lvb->lvb_igeneration));
134 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
135 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
136 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
137 be16_to_cpu(lvb->lvb_imode));
138 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
139 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
140 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
141 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
142 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
143 be32_to_cpu(lvb->lvb_iattr));
148 * OCFS2 Lock Resource Operations
150 * These fine tune the behavior of the generic dlmglue locking infrastructure.
152 * The most basic of lock types can point ->l_priv to their respective
153 * struct ocfs2_super and allow the default actions to manage things.
155 * Right now, each lock type also needs to implement an init function,
156 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
157 * should be called when the lock is no longer needed (i.e., object
160 struct ocfs2_lock_res_ops {
162 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
163 * this callback if ->l_priv is not an ocfs2_super pointer
165 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
168 * Optionally called in the downconvert thread after a
169 * successful downconvert. The lockres will not be referenced
170 * after this callback is called, so it is safe to free
173 * The exact semantics of when this is called are controlled
174 * by ->downconvert_worker()
176 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
179 * Allow a lock type to add checks to determine whether it is
180 * safe to downconvert a lock. Return 0 to re-queue the
181 * downconvert at a later time, nonzero to continue.
183 * For most locks, the default checks that there are no
184 * incompatible holders are sufficient.
186 * Called with the lockres spinlock held.
188 int (*check_downconvert)(struct ocfs2_lock_res *, int);
191 * Allows a lock type to populate the lock value block. This
192 * is called on downconvert, and when we drop a lock.
194 * Locks that want to use this should set LOCK_TYPE_USES_LVB
195 * in the flags field.
197 * Called with the lockres spinlock held.
199 void (*set_lvb)(struct ocfs2_lock_res *);
202 * Called from the downconvert thread when it is determined
203 * that a lock will be downconverted. This is called without
204 * any locks held so the function can do work that might
205 * schedule (syncing out data, etc).
207 * This should return any one of the ocfs2_unblock_action
208 * values, depending on what it wants the thread to do.
210 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
213 * LOCK_TYPE_* flags which describe the specific requirements
214 * of a lock type. Descriptions of each individual flag follow.
220 * Some locks want to "refresh" potentially stale data when a
221 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
222 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
223 * individual lockres l_flags member from the ast function. It is
224 * expected that the locking wrapper will clear the
225 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
227 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
230 * Indicate that a lock type makes use of the lock value block. The
231 * ->set_lvb lock type callback must be defined.
233 #define LOCK_TYPE_USES_LVB 0x2
235 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
236 .get_osb = ocfs2_get_inode_osb,
240 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
241 .get_osb = ocfs2_get_inode_osb,
242 .check_downconvert = ocfs2_check_meta_downconvert,
243 .set_lvb = ocfs2_set_meta_lvb,
244 .downconvert_worker = ocfs2_data_convert_worker,
245 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
248 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
249 .flags = LOCK_TYPE_REQUIRES_REFRESH,
252 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
256 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
260 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
261 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
264 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
265 .get_osb = ocfs2_get_dentry_osb,
266 .post_unlock = ocfs2_dentry_post_unlock,
267 .downconvert_worker = ocfs2_dentry_convert_worker,
271 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
272 .get_osb = ocfs2_get_inode_osb,
276 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
277 .get_osb = ocfs2_get_file_osb,
281 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
282 .set_lvb = ocfs2_set_qinfo_lvb,
283 .get_osb = ocfs2_get_qinfo_osb,
284 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
287 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
288 .check_downconvert = ocfs2_check_refcount_downconvert,
289 .downconvert_worker = ocfs2_refcount_convert_worker,
293 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
295 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
296 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
297 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
300 static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
302 return container_of(lksb, struct ocfs2_lock_res, l_lksb);
305 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
307 BUG_ON(!ocfs2_is_inode_lock(lockres));
309 return (struct inode *) lockres->l_priv;
312 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
314 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
316 return (struct ocfs2_dentry_lock *)lockres->l_priv;
319 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
321 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
323 return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
326 static inline struct ocfs2_refcount_tree *
327 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
329 return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
332 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
334 if (lockres->l_ops->get_osb)
335 return lockres->l_ops->get_osb(lockres);
337 return (struct ocfs2_super *)lockres->l_priv;
340 static int ocfs2_lock_create(struct ocfs2_super *osb,
341 struct ocfs2_lock_res *lockres,
344 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
346 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
347 struct ocfs2_lock_res *lockres,
348 int level, unsigned long caller_ip);
349 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
350 struct ocfs2_lock_res *lockres,
353 __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
356 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
357 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
358 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
359 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
360 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
361 struct ocfs2_lock_res *lockres);
362 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
364 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
365 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
366 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
367 _err, _func, _lockres->l_name); \
369 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
370 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
371 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
373 static int ocfs2_downconvert_thread(void *arg);
374 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
375 struct ocfs2_lock_res *lockres);
376 static int ocfs2_inode_lock_update(struct inode *inode,
377 struct buffer_head **bh);
378 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
379 static inline int ocfs2_highest_compat_lock_level(int level);
380 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
382 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
383 struct ocfs2_lock_res *lockres,
386 unsigned int generation);
387 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
388 struct ocfs2_lock_res *lockres);
389 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
390 struct ocfs2_lock_res *lockres);
393 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
400 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
402 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
403 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
404 (long long)blkno, generation);
406 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
408 mlog(0, "built lock resource with name: %s\n", name);
411 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
413 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
414 struct ocfs2_dlm_debug *dlm_debug)
416 mlog(0, "Add tracking for lockres %s\n", res->l_name);
418 spin_lock(&ocfs2_dlm_tracking_lock);
419 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
420 spin_unlock(&ocfs2_dlm_tracking_lock);
423 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
425 spin_lock(&ocfs2_dlm_tracking_lock);
426 if (!list_empty(&res->l_debug_list))
427 list_del_init(&res->l_debug_list);
428 spin_unlock(&ocfs2_dlm_tracking_lock);
431 #ifdef CONFIG_OCFS2_FS_STATS
432 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
434 res->l_lock_refresh = 0;
435 memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
436 memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
439 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
440 struct ocfs2_mask_waiter *mw, int ret)
444 struct ocfs2_lock_stats *stats;
446 if (level == LKM_PRMODE)
447 stats = &res->l_lock_prmode;
448 else if (level == LKM_EXMODE)
449 stats = &res->l_lock_exmode;
453 kt = ktime_sub(ktime_get(), mw->mw_lock_start);
454 usec = ktime_to_us(kt);
457 stats->ls_total += ktime_to_ns(kt);
459 if (unlikely(stats->ls_gets == 0)) {
461 stats->ls_total = ktime_to_ns(kt);
464 if (stats->ls_max < usec)
465 stats->ls_max = usec;
471 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
473 lockres->l_lock_refresh++;
476 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
478 mw->mw_lock_start = ktime_get();
481 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
484 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
485 int level, struct ocfs2_mask_waiter *mw, int ret)
488 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
491 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
496 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
497 struct ocfs2_lock_res *res,
498 enum ocfs2_lock_type type,
499 struct ocfs2_lock_res_ops *ops,
506 res->l_level = DLM_LOCK_IV;
507 res->l_requested = DLM_LOCK_IV;
508 res->l_blocking = DLM_LOCK_IV;
509 res->l_action = OCFS2_AST_INVALID;
510 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
512 res->l_flags = OCFS2_LOCK_INITIALIZED;
514 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
516 ocfs2_init_lock_stats(res);
517 #ifdef CONFIG_DEBUG_LOCK_ALLOC
518 if (type != OCFS2_LOCK_TYPE_OPEN)
519 lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
520 &lockdep_keys[type], 0);
522 res->l_lockdep_map.key = NULL;
526 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
528 /* This also clears out the lock status block */
529 memset(res, 0, sizeof(struct ocfs2_lock_res));
530 spin_lock_init(&res->l_lock);
531 init_waitqueue_head(&res->l_event);
532 INIT_LIST_HEAD(&res->l_blocked_list);
533 INIT_LIST_HEAD(&res->l_mask_waiters);
536 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
537 enum ocfs2_lock_type type,
538 unsigned int generation,
541 struct ocfs2_lock_res_ops *ops;
544 case OCFS2_LOCK_TYPE_RW:
545 ops = &ocfs2_inode_rw_lops;
547 case OCFS2_LOCK_TYPE_META:
548 ops = &ocfs2_inode_inode_lops;
550 case OCFS2_LOCK_TYPE_OPEN:
551 ops = &ocfs2_inode_open_lops;
554 mlog_bug_on_msg(1, "type: %d\n", type);
555 ops = NULL; /* thanks, gcc */
559 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
560 generation, res->l_name);
561 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
564 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
566 struct inode *inode = ocfs2_lock_res_inode(lockres);
568 return OCFS2_SB(inode->i_sb);
571 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
573 struct ocfs2_mem_dqinfo *info = lockres->l_priv;
575 return OCFS2_SB(info->dqi_gi.dqi_sb);
578 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
580 struct ocfs2_file_private *fp = lockres->l_priv;
582 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
585 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
587 __be64 inode_blkno_be;
589 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
592 return be64_to_cpu(inode_blkno_be);
595 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
597 struct ocfs2_dentry_lock *dl = lockres->l_priv;
599 return OCFS2_SB(dl->dl_inode->i_sb);
602 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
603 u64 parent, struct inode *inode)
606 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
607 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
608 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
610 ocfs2_lock_res_init_once(lockres);
613 * Unfortunately, the standard lock naming scheme won't work
614 * here because we have two 16 byte values to use. Instead,
615 * we'll stuff the inode number as a binary value. We still
616 * want error prints to show something without garbling the
617 * display, so drop a null byte in there before the inode
618 * number. A future version of OCFS2 will likely use all
619 * binary lock names. The stringified names have been a
620 * tremendous aid in debugging, but now that the debugfs
621 * interface exists, we can mangle things there if need be.
623 * NOTE: We also drop the standard "pad" value (the total lock
624 * name size stays the same though - the last part is all
625 * zeros due to the memset in ocfs2_lock_res_init_once()
627 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
629 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
632 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
634 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
637 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
638 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
642 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
643 struct ocfs2_super *osb)
645 /* Superblock lockres doesn't come from a slab so we call init
646 * once on it manually. */
647 ocfs2_lock_res_init_once(res);
648 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
650 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
651 &ocfs2_super_lops, osb);
654 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
655 struct ocfs2_super *osb)
657 /* Rename lockres doesn't come from a slab so we call init
658 * once on it manually. */
659 ocfs2_lock_res_init_once(res);
660 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
661 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
662 &ocfs2_rename_lops, osb);
665 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
666 struct ocfs2_super *osb)
668 /* nfs_sync lockres doesn't come from a slab so we call init
669 * once on it manually. */
670 ocfs2_lock_res_init_once(res);
671 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
672 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
673 &ocfs2_nfs_sync_lops, osb);
676 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
677 struct ocfs2_super *osb)
679 ocfs2_lock_res_init_once(res);
680 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
681 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
682 &ocfs2_orphan_scan_lops, osb);
685 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
686 struct ocfs2_file_private *fp)
688 struct inode *inode = fp->fp_file->f_mapping->host;
689 struct ocfs2_inode_info *oi = OCFS2_I(inode);
691 ocfs2_lock_res_init_once(lockres);
692 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
693 inode->i_generation, lockres->l_name);
694 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
695 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
697 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
700 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
701 struct ocfs2_mem_dqinfo *info)
703 ocfs2_lock_res_init_once(lockres);
704 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
706 ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
707 OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
711 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
712 struct ocfs2_super *osb, u64 ref_blkno,
713 unsigned int generation)
715 ocfs2_lock_res_init_once(lockres);
716 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
717 generation, lockres->l_name);
718 ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
719 &ocfs2_refcount_block_lops, osb);
722 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
724 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
727 ocfs2_remove_lockres_tracking(res);
729 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
730 "Lockres %s is on the blocked list\n",
732 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
733 "Lockres %s has mask waiters pending\n",
735 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
736 "Lockres %s is locked\n",
738 mlog_bug_on_msg(res->l_ro_holders,
739 "Lockres %s has %u ro holders\n",
740 res->l_name, res->l_ro_holders);
741 mlog_bug_on_msg(res->l_ex_holders,
742 "Lockres %s has %u ex holders\n",
743 res->l_name, res->l_ex_holders);
745 /* Need to clear out the lock status block for the dlm */
746 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
751 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
758 lockres->l_ex_holders++;
761 lockres->l_ro_holders++;
768 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
775 BUG_ON(!lockres->l_ex_holders);
776 lockres->l_ex_holders--;
779 BUG_ON(!lockres->l_ro_holders);
780 lockres->l_ro_holders--;
787 /* WARNING: This function lives in a world where the only three lock
788 * levels are EX, PR, and NL. It *will* have to be adjusted when more
789 * lock types are added. */
790 static inline int ocfs2_highest_compat_lock_level(int level)
792 int new_level = DLM_LOCK_EX;
794 if (level == DLM_LOCK_EX)
795 new_level = DLM_LOCK_NL;
796 else if (level == DLM_LOCK_PR)
797 new_level = DLM_LOCK_PR;
801 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
802 unsigned long newflags)
804 struct ocfs2_mask_waiter *mw, *tmp;
806 assert_spin_locked(&lockres->l_lock);
808 lockres->l_flags = newflags;
810 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
811 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
814 list_del_init(&mw->mw_item);
816 complete(&mw->mw_complete);
819 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
821 lockres_set_flags(lockres, lockres->l_flags | or);
823 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
826 lockres_set_flags(lockres, lockres->l_flags & ~clear);
829 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
831 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
832 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
833 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
834 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
836 lockres->l_level = lockres->l_requested;
837 if (lockres->l_level <=
838 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
839 lockres->l_blocking = DLM_LOCK_NL;
840 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
842 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
845 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
847 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
848 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
850 /* Convert from RO to EX doesn't really need anything as our
851 * information is already up to data. Convert from NL to
852 * *anything* however should mark ourselves as needing an
854 if (lockres->l_level == DLM_LOCK_NL &&
855 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
856 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
858 lockres->l_level = lockres->l_requested;
861 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
862 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
863 * downconverting the lock before the upconvert has fully completed.
865 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
867 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
870 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
872 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
873 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
875 if (lockres->l_requested > DLM_LOCK_NL &&
876 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
877 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
878 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
880 lockres->l_level = lockres->l_requested;
881 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
882 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
885 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
888 int needs_downconvert = 0;
890 assert_spin_locked(&lockres->l_lock);
892 if (level > lockres->l_blocking) {
893 /* only schedule a downconvert if we haven't already scheduled
894 * one that goes low enough to satisfy the level we're
895 * blocking. this also catches the case where we get
897 if (ocfs2_highest_compat_lock_level(level) <
898 ocfs2_highest_compat_lock_level(lockres->l_blocking))
899 needs_downconvert = 1;
901 lockres->l_blocking = level;
904 mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
905 lockres->l_name, level, lockres->l_level, lockres->l_blocking,
908 if (needs_downconvert)
909 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
910 mlog(0, "needs_downconvert = %d\n", needs_downconvert);
911 return needs_downconvert;
915 * OCFS2_LOCK_PENDING and l_pending_gen.
917 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
918 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
919 * for more details on the race.
921 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
922 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
923 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
924 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
925 * the caller is going to try to clear PENDING again. If nothing else is
926 * happening, __lockres_clear_pending() sees PENDING is unset and does
929 * But what if another path (eg downconvert thread) has just started a
930 * new locking action? The other path has re-set PENDING. Our path
931 * cannot clear PENDING, because that will re-open the original race
937 * ocfs2_cluster_lock()
942 * ocfs2_locking_ast() ocfs2_downconvert_thread()
943 * clear PENDING ocfs2_unblock_lock()
946 * ocfs2_prepare_downconvert()
956 * So as you can see, we now have a window where l_lock is not held,
957 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
959 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
960 * set by ocfs2_prepare_downconvert(). That wasn't nice.
962 * To solve this we introduce l_pending_gen. A call to
963 * lockres_clear_pending() will only do so when it is passed a generation
964 * number that matches the lockres. lockres_set_pending() will return the
965 * current generation number. When ocfs2_cluster_lock() goes to clear
966 * PENDING, it passes the generation it got from set_pending(). In our
967 * example above, the generation numbers will *not* match. Thus,
968 * ocfs2_cluster_lock() will not clear the PENDING set by
969 * ocfs2_prepare_downconvert().
972 /* Unlocked version for ocfs2_locking_ast() */
973 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
974 unsigned int generation,
975 struct ocfs2_super *osb)
977 assert_spin_locked(&lockres->l_lock);
980 * The ast and locking functions can race us here. The winner
981 * will clear pending, the loser will not.
983 if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
984 (lockres->l_pending_gen != generation))
987 lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
988 lockres->l_pending_gen++;
991 * The downconvert thread may have skipped us because we
992 * were PENDING. Wake it up.
994 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
995 ocfs2_wake_downconvert_thread(osb);
998 /* Locked version for callers of ocfs2_dlm_lock() */
999 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1000 unsigned int generation,
1001 struct ocfs2_super *osb)
1003 unsigned long flags;
1005 spin_lock_irqsave(&lockres->l_lock, flags);
1006 __lockres_clear_pending(lockres, generation, osb);
1007 spin_unlock_irqrestore(&lockres->l_lock, flags);
1010 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1012 assert_spin_locked(&lockres->l_lock);
1013 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1015 lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1017 return lockres->l_pending_gen;
1020 static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
1022 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1023 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1024 int needs_downconvert;
1025 unsigned long flags;
1027 BUG_ON(level <= DLM_LOCK_NL);
1029 mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
1030 "type %s\n", lockres->l_name, level, lockres->l_level,
1031 ocfs2_lock_type_string(lockres->l_type));
1034 * We can skip the bast for locks which don't enable caching -
1035 * they'll be dropped at the earliest possible time anyway.
1037 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1040 spin_lock_irqsave(&lockres->l_lock, flags);
1041 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1042 if (needs_downconvert)
1043 ocfs2_schedule_blocked_lock(osb, lockres);
1044 spin_unlock_irqrestore(&lockres->l_lock, flags);
1046 wake_up(&lockres->l_event);
1048 ocfs2_wake_downconvert_thread(osb);
1051 static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
1053 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1054 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1055 unsigned long flags;
1058 spin_lock_irqsave(&lockres->l_lock, flags);
1060 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1062 if (status == -EAGAIN) {
1063 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1068 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1069 lockres->l_name, status);
1070 spin_unlock_irqrestore(&lockres->l_lock, flags);
1074 mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
1075 "level %d => %d\n", lockres->l_name, lockres->l_action,
1076 lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
1078 switch(lockres->l_action) {
1079 case OCFS2_AST_ATTACH:
1080 ocfs2_generic_handle_attach_action(lockres);
1081 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1083 case OCFS2_AST_CONVERT:
1084 ocfs2_generic_handle_convert_action(lockres);
1086 case OCFS2_AST_DOWNCONVERT:
1087 ocfs2_generic_handle_downconvert_action(lockres);
1090 mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
1091 "flags 0x%lx, unlock: %u\n",
1092 lockres->l_name, lockres->l_action, lockres->l_flags,
1093 lockres->l_unlock_action);
1097 /* set it to something invalid so if we get called again we
1099 lockres->l_action = OCFS2_AST_INVALID;
1101 /* Did we try to cancel this lock? Clear that state */
1102 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1103 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1106 * We may have beaten the locking functions here. We certainly
1107 * know that dlm_lock() has been called :-)
1108 * Because we can't have two lock calls in flight at once, we
1109 * can use lockres->l_pending_gen.
1111 __lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
1113 wake_up(&lockres->l_event);
1114 spin_unlock_irqrestore(&lockres->l_lock, flags);
1117 static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1119 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1120 unsigned long flags;
1122 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1123 lockres->l_name, lockres->l_unlock_action);
1125 spin_lock_irqsave(&lockres->l_lock, flags);
1127 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
1128 "unlock_action %d\n", error, lockres->l_name,
1129 lockres->l_unlock_action);
1130 spin_unlock_irqrestore(&lockres->l_lock, flags);
1134 switch(lockres->l_unlock_action) {
1135 case OCFS2_UNLOCK_CANCEL_CONVERT:
1136 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
1137 lockres->l_action = OCFS2_AST_INVALID;
1138 /* Downconvert thread may have requeued this lock, we
1139 * need to wake it. */
1140 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1141 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
1143 case OCFS2_UNLOCK_DROP_LOCK:
1144 lockres->l_level = DLM_LOCK_IV;
1150 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1151 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1152 wake_up(&lockres->l_event);
1153 spin_unlock_irqrestore(&lockres->l_lock, flags);
1157 * This is the filesystem locking protocol. It provides the lock handling
1158 * hooks for the underlying DLM. It has a maximum version number.
1159 * The version number allows interoperability with systems running at
1160 * the same major number and an equal or smaller minor number.
1162 * Whenever the filesystem does new things with locks (adds or removes a
1163 * lock, orders them differently, does different things underneath a lock),
1164 * the version must be changed. The protocol is negotiated when joining
1165 * the dlm domain. A node may join the domain if its major version is
1166 * identical to all other nodes and its minor version is greater than
1167 * or equal to all other nodes. When its minor version is greater than
1168 * the other nodes, it will run at the minor version specified by the
1171 * If a locking change is made that will not be compatible with older
1172 * versions, the major number must be increased and the minor version set
1173 * to zero. If a change merely adds a behavior that can be disabled when
1174 * speaking to older versions, the minor version must be increased. If a
1175 * change adds a fully backwards compatible change (eg, LVB changes that
1176 * are just ignored by older versions), the version does not need to be
1179 static struct ocfs2_locking_protocol lproto = {
1181 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
1182 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
1184 .lp_lock_ast = ocfs2_locking_ast,
1185 .lp_blocking_ast = ocfs2_blocking_ast,
1186 .lp_unlock_ast = ocfs2_unlock_ast,
1189 void ocfs2_set_locking_protocol(void)
1191 ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
1194 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1197 unsigned long flags;
1199 spin_lock_irqsave(&lockres->l_lock, flags);
1200 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1201 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1203 lockres->l_action = OCFS2_AST_INVALID;
1205 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1206 spin_unlock_irqrestore(&lockres->l_lock, flags);
1208 wake_up(&lockres->l_event);
1211 /* Note: If we detect another process working on the lock (i.e.,
1212 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1213 * to do the right thing in that case.
1215 static int ocfs2_lock_create(struct ocfs2_super *osb,
1216 struct ocfs2_lock_res *lockres,
1221 unsigned long flags;
1224 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1227 spin_lock_irqsave(&lockres->l_lock, flags);
1228 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1229 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1230 spin_unlock_irqrestore(&lockres->l_lock, flags);
1234 lockres->l_action = OCFS2_AST_ATTACH;
1235 lockres->l_requested = level;
1236 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1237 gen = lockres_set_pending(lockres);
1238 spin_unlock_irqrestore(&lockres->l_lock, flags);
1240 ret = ocfs2_dlm_lock(osb->cconn,
1245 OCFS2_LOCK_ID_MAX_LEN - 1);
1246 lockres_clear_pending(lockres, gen, osb);
1248 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1249 ocfs2_recover_from_dlm_error(lockres, 1);
1252 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1258 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1261 unsigned long flags;
1264 spin_lock_irqsave(&lockres->l_lock, flags);
1265 ret = lockres->l_flags & flag;
1266 spin_unlock_irqrestore(&lockres->l_lock, flags);
1271 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1274 wait_event(lockres->l_event,
1275 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1278 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1281 wait_event(lockres->l_event,
1282 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1285 /* predict what lock level we'll be dropping down to on behalf
1286 * of another node, and return true if the currently wanted
1287 * level will be compatible with it. */
1288 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1291 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1293 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1296 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1298 INIT_LIST_HEAD(&mw->mw_item);
1299 init_completion(&mw->mw_complete);
1300 ocfs2_init_start_time(mw);
1303 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1305 wait_for_completion(&mw->mw_complete);
1306 /* Re-arm the completion in case we want to wait on it again */
1307 INIT_COMPLETION(mw->mw_complete);
1308 return mw->mw_status;
1311 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1312 struct ocfs2_mask_waiter *mw,
1316 BUG_ON(!list_empty(&mw->mw_item));
1318 assert_spin_locked(&lockres->l_lock);
1320 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1325 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1326 * if the mask still hadn't reached its goal */
1327 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1328 struct ocfs2_mask_waiter *mw)
1330 unsigned long flags;
1333 spin_lock_irqsave(&lockres->l_lock, flags);
1334 if (!list_empty(&mw->mw_item)) {
1335 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1338 list_del_init(&mw->mw_item);
1339 init_completion(&mw->mw_complete);
1341 spin_unlock_irqrestore(&lockres->l_lock, flags);
1347 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1348 struct ocfs2_lock_res *lockres)
1352 ret = wait_for_completion_interruptible(&mw->mw_complete);
1354 lockres_remove_mask_waiter(lockres, mw);
1356 ret = mw->mw_status;
1357 /* Re-arm the completion in case we want to wait on it again */
1358 INIT_COMPLETION(mw->mw_complete);
1362 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1363 struct ocfs2_lock_res *lockres,
1368 unsigned long caller_ip)
1370 struct ocfs2_mask_waiter mw;
1371 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1372 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1373 unsigned long flags;
1375 int noqueue_attempted = 0;
1377 ocfs2_init_mask_waiter(&mw);
1379 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1380 lkm_flags |= DLM_LKF_VALBLK;
1385 spin_lock_irqsave(&lockres->l_lock, flags);
1387 if (catch_signals && signal_pending(current)) {
1392 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1393 "Cluster lock called on freeing lockres %s! flags "
1394 "0x%lx\n", lockres->l_name, lockres->l_flags);
1396 /* We only compare against the currently granted level
1397 * here. If the lock is blocked waiting on a downconvert,
1398 * we'll get caught below. */
1399 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1400 level > lockres->l_level) {
1401 /* is someone sitting in dlm_lock? If so, wait on
1403 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1408 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1410 * We've upconverted. If the lock now has a level we can
1411 * work with, we take it. If, however, the lock is not at the
1412 * required level, we go thru the full cycle. One way this could
1413 * happen is if a process requesting an upconvert to PR is
1414 * closely followed by another requesting upconvert to an EX.
1415 * If the process requesting EX lands here, we want it to
1416 * continue attempting to upconvert and let the process
1417 * requesting PR take the lock.
1418 * If multiple processes request upconvert to PR, the first one
1419 * here will take the lock. The others will have to go thru the
1420 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1421 * downconvert request.
1423 if (level <= lockres->l_level)
1424 goto update_holders;
1427 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1428 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1429 /* is the lock is currently blocked on behalf of
1431 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1436 if (level > lockres->l_level) {
1437 if (noqueue_attempted > 0) {
1441 if (lkm_flags & DLM_LKF_NOQUEUE)
1442 noqueue_attempted = 1;
1444 if (lockres->l_action != OCFS2_AST_INVALID)
1445 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1446 lockres->l_name, lockres->l_action);
1448 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1449 lockres->l_action = OCFS2_AST_ATTACH;
1450 lkm_flags &= ~DLM_LKF_CONVERT;
1452 lockres->l_action = OCFS2_AST_CONVERT;
1453 lkm_flags |= DLM_LKF_CONVERT;
1456 lockres->l_requested = level;
1457 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1458 gen = lockres_set_pending(lockres);
1459 spin_unlock_irqrestore(&lockres->l_lock, flags);
1461 BUG_ON(level == DLM_LOCK_IV);
1462 BUG_ON(level == DLM_LOCK_NL);
1464 mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
1465 lockres->l_name, lockres->l_level, level);
1467 /* call dlm_lock to upgrade lock now */
1468 ret = ocfs2_dlm_lock(osb->cconn,
1473 OCFS2_LOCK_ID_MAX_LEN - 1);
1474 lockres_clear_pending(lockres, gen, osb);
1476 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1478 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1481 ocfs2_recover_from_dlm_error(lockres, 1);
1485 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1488 /* At this point we've gone inside the dlm and need to
1489 * complete our work regardless. */
1492 /* wait for busy to clear and carry on */
1497 /* Ok, if we get here then we're good to go. */
1498 ocfs2_inc_holders(lockres, level);
1502 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1504 spin_unlock_irqrestore(&lockres->l_lock, flags);
1507 * This is helping work around a lock inversion between the page lock
1508 * and dlm locks. One path holds the page lock while calling aops
1509 * which block acquiring dlm locks. The voting thread holds dlm
1510 * locks while acquiring page locks while down converting data locks.
1511 * This block is helping an aop path notice the inversion and back
1512 * off to unlock its page lock before trying the dlm lock again.
1514 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1515 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1517 if (lockres_remove_mask_waiter(lockres, &mw))
1523 ret = ocfs2_wait_for_mask(&mw);
1528 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1530 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1531 if (!ret && lockres->l_lockdep_map.key != NULL) {
1532 if (level == DLM_LOCK_PR)
1533 rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1534 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1537 rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1538 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1545 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1546 struct ocfs2_lock_res *lockres,
1551 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1556 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1557 struct ocfs2_lock_res *lockres,
1559 unsigned long caller_ip)
1561 unsigned long flags;
1563 spin_lock_irqsave(&lockres->l_lock, flags);
1564 ocfs2_dec_holders(lockres, level);
1565 ocfs2_downconvert_on_unlock(osb, lockres);
1566 spin_unlock_irqrestore(&lockres->l_lock, flags);
1567 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1568 if (lockres->l_lockdep_map.key != NULL)
1569 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1573 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1574 struct ocfs2_lock_res *lockres,
1578 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1579 unsigned long flags;
1580 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1582 spin_lock_irqsave(&lockres->l_lock, flags);
1583 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1584 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1585 spin_unlock_irqrestore(&lockres->l_lock, flags);
1587 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1590 /* Grants us an EX lock on the data and metadata resources, skipping
1591 * the normal cluster directory lookup. Use this ONLY on newly created
1592 * inodes which other nodes can't possibly see, and which haven't been
1593 * hashed in the inode hash yet. This can give us a good performance
1594 * increase as it'll skip the network broadcast normally associated
1595 * with creating a new lock resource. */
1596 int ocfs2_create_new_inode_locks(struct inode *inode)
1599 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1602 BUG_ON(!ocfs2_inode_is_new(inode));
1604 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1606 /* NOTE: That we don't increment any of the holder counts, nor
1607 * do we add anything to a journal handle. Since this is
1608 * supposed to be a new inode which the cluster doesn't know
1609 * about yet, there is no need to. As far as the LVB handling
1610 * is concerned, this is basically like acquiring an EX lock
1611 * on a resource which has an invalid one -- we'll set it
1612 * valid when we release the EX. */
1614 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1621 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1622 * don't use a generation in their lock names.
1624 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1630 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1640 int ocfs2_rw_lock(struct inode *inode, int write)
1643 struct ocfs2_lock_res *lockres;
1644 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1648 mlog(0, "inode %llu take %s RW lock\n",
1649 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1650 write ? "EXMODE" : "PRMODE");
1652 if (ocfs2_mount_local(osb))
1655 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1657 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1659 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1667 void ocfs2_rw_unlock(struct inode *inode, int write)
1669 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1670 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1671 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1673 mlog(0, "inode %llu drop %s RW lock\n",
1674 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1675 write ? "EXMODE" : "PRMODE");
1677 if (!ocfs2_mount_local(osb))
1678 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1682 * ocfs2_open_lock always get PR mode lock.
1684 int ocfs2_open_lock(struct inode *inode)
1687 struct ocfs2_lock_res *lockres;
1688 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1692 mlog(0, "inode %llu take PRMODE open lock\n",
1693 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1695 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1698 lockres = &OCFS2_I(inode)->ip_open_lockres;
1700 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1709 int ocfs2_try_open_lock(struct inode *inode, int write)
1711 int status = 0, level;
1712 struct ocfs2_lock_res *lockres;
1713 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1717 mlog(0, "inode %llu try to take %s open lock\n",
1718 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1719 write ? "EXMODE" : "PRMODE");
1721 if (ocfs2_is_hard_readonly(osb)) {
1727 if (ocfs2_mount_local(osb))
1730 lockres = &OCFS2_I(inode)->ip_open_lockres;
1732 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1735 * The file system may already holding a PRMODE/EXMODE open lock.
1736 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1737 * other nodes and the -EAGAIN will indicate to the caller that
1738 * this inode is still in use.
1740 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1741 level, DLM_LKF_NOQUEUE, 0);
1748 * ocfs2_open_unlock unlock PR and EX mode open locks.
1750 void ocfs2_open_unlock(struct inode *inode)
1752 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1753 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1755 mlog(0, "inode %llu drop open lock\n",
1756 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1758 if (ocfs2_mount_local(osb))
1761 if(lockres->l_ro_holders)
1762 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1764 if(lockres->l_ex_holders)
1765 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1772 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1776 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1777 unsigned long flags;
1778 struct ocfs2_mask_waiter mw;
1780 ocfs2_init_mask_waiter(&mw);
1783 spin_lock_irqsave(&lockres->l_lock, flags);
1784 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1785 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1787 spin_unlock_irqrestore(&lockres->l_lock, flags);
1788 ret = ocfs2_cancel_convert(osb, lockres);
1795 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1796 spin_unlock_irqrestore(&lockres->l_lock, flags);
1798 ocfs2_wait_for_mask(&mw);
1804 * We may still have gotten the lock, in which case there's no
1805 * point to restarting the syscall.
1807 if (lockres->l_level == level)
1810 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1811 lockres->l_flags, lockres->l_level, lockres->l_action);
1813 spin_unlock_irqrestore(&lockres->l_lock, flags);
1820 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1821 * flock() calls. The locking approach this requires is sufficiently
1822 * different from all other cluster lock types that we implement a
1823 * separate path to the "low-level" dlm calls. In particular:
1825 * - No optimization of lock levels is done - we take at exactly
1826 * what's been requested.
1828 * - No lock caching is employed. We immediately downconvert to
1829 * no-lock at unlock time. This also means flock locks never go on
1830 * the blocking list).
1832 * - Since userspace can trivially deadlock itself with flock, we make
1833 * sure to allow cancellation of a misbehaving applications flock()
1836 * - Access to any flock lockres doesn't require concurrency, so we
1837 * can simplify the code by requiring the caller to guarantee
1838 * serialization of dlmglue flock calls.
1840 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1842 int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1843 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1844 unsigned long flags;
1845 struct ocfs2_file_private *fp = file->private_data;
1846 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1847 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1848 struct ocfs2_mask_waiter mw;
1850 ocfs2_init_mask_waiter(&mw);
1852 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1853 (lockres->l_level > DLM_LOCK_NL)) {
1855 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1856 "level: %u\n", lockres->l_name, lockres->l_flags,
1861 spin_lock_irqsave(&lockres->l_lock, flags);
1862 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1863 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1864 spin_unlock_irqrestore(&lockres->l_lock, flags);
1867 * Get the lock at NLMODE to start - that way we
1868 * can cancel the upconvert request if need be.
1870 ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1876 ret = ocfs2_wait_for_mask(&mw);
1881 spin_lock_irqsave(&lockres->l_lock, flags);
1884 lockres->l_action = OCFS2_AST_CONVERT;
1885 lkm_flags |= DLM_LKF_CONVERT;
1886 lockres->l_requested = level;
1887 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1889 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1890 spin_unlock_irqrestore(&lockres->l_lock, flags);
1892 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1893 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
1895 if (!trylock || (ret != -EAGAIN)) {
1896 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1900 ocfs2_recover_from_dlm_error(lockres, 1);
1901 lockres_remove_mask_waiter(lockres, &mw);
1905 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1906 if (ret == -ERESTARTSYS) {
1908 * Userspace can cause deadlock itself with
1909 * flock(). Current behavior locally is to allow the
1910 * deadlock, but abort the system call if a signal is
1911 * received. We follow this example, otherwise a
1912 * poorly written program could sit in kernel until
1915 * Handling this is a bit more complicated for Ocfs2
1916 * though. We can't exit this function with an
1917 * outstanding lock request, so a cancel convert is
1918 * required. We intentionally overwrite 'ret' - if the
1919 * cancel fails and the lock was granted, it's easier
1920 * to just bubble success back up to the user.
1922 ret = ocfs2_flock_handle_signal(lockres, level);
1923 } else if (!ret && (level > lockres->l_level)) {
1924 /* Trylock failed asynchronously */
1931 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1932 lockres->l_name, ex, trylock, ret);
1936 void ocfs2_file_unlock(struct file *file)
1940 unsigned long flags;
1941 struct ocfs2_file_private *fp = file->private_data;
1942 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1943 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1944 struct ocfs2_mask_waiter mw;
1946 ocfs2_init_mask_waiter(&mw);
1948 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
1951 if (lockres->l_level == DLM_LOCK_NL)
1954 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1955 lockres->l_name, lockres->l_flags, lockres->l_level,
1958 spin_lock_irqsave(&lockres->l_lock, flags);
1960 * Fake a blocking ast for the downconvert code.
1962 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
1963 lockres->l_blocking = DLM_LOCK_EX;
1965 gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
1966 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1967 spin_unlock_irqrestore(&lockres->l_lock, flags);
1969 ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
1975 ret = ocfs2_wait_for_mask(&mw);
1980 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
1981 struct ocfs2_lock_res *lockres)
1985 /* If we know that another node is waiting on our lock, kick
1986 * the downconvert thread * pre-emptively when we reach a release
1988 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1989 switch(lockres->l_blocking) {
1991 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1995 if (!lockres->l_ex_holders)
2004 ocfs2_wake_downconvert_thread(osb);
2007 #define OCFS2_SEC_BITS 34
2008 #define OCFS2_SEC_SHIFT (64 - 34)
2009 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
2011 /* LVB only has room for 64 bits of time here so we pack it for
2013 static u64 ocfs2_pack_timespec(struct timespec *spec)
2016 u64 sec = spec->tv_sec;
2017 u32 nsec = spec->tv_nsec;
2019 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
2024 /* Call this with the lockres locked. I am reasonably sure we don't
2025 * need ip_lock in this function as anyone who would be changing those
2026 * values is supposed to be blocked in ocfs2_inode_lock right now. */
2027 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2029 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2030 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2031 struct ocfs2_meta_lvb *lvb;
2033 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2036 * Invalidate the LVB of a deleted inode - this way other
2037 * nodes are forced to go to disk and discover the new inode
2040 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2041 lvb->lvb_version = 0;
2045 lvb->lvb_version = OCFS2_LVB_VERSION;
2046 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
2047 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
2048 lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode));
2049 lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
2050 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
2051 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
2052 lvb->lvb_iatime_packed =
2053 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
2054 lvb->lvb_ictime_packed =
2055 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
2056 lvb->lvb_imtime_packed =
2057 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
2058 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
2059 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2060 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2063 mlog_meta_lvb(0, lockres);
2066 static void ocfs2_unpack_timespec(struct timespec *spec,
2069 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2070 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2073 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2075 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2076 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2077 struct ocfs2_meta_lvb *lvb;
2079 mlog_meta_lvb(0, lockres);
2081 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2083 /* We're safe here without the lockres lock... */
2084 spin_lock(&oi->ip_lock);
2085 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2086 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2088 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2089 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2090 ocfs2_set_inode_flags(inode);
2092 /* fast-symlinks are a special case */
2093 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2094 inode->i_blocks = 0;
2096 inode->i_blocks = ocfs2_inode_sector_count(inode);
2098 i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
2099 i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
2100 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
2101 set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
2102 ocfs2_unpack_timespec(&inode->i_atime,
2103 be64_to_cpu(lvb->lvb_iatime_packed));
2104 ocfs2_unpack_timespec(&inode->i_mtime,
2105 be64_to_cpu(lvb->lvb_imtime_packed));
2106 ocfs2_unpack_timespec(&inode->i_ctime,
2107 be64_to_cpu(lvb->lvb_ictime_packed));
2108 spin_unlock(&oi->ip_lock);
2111 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2112 struct ocfs2_lock_res *lockres)
2114 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2116 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2117 && lvb->lvb_version == OCFS2_LVB_VERSION
2118 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2123 /* Determine whether a lock resource needs to be refreshed, and
2124 * arbitrate who gets to refresh it.
2126 * 0 means no refresh needed.
2128 * > 0 means you need to refresh this and you MUST call
2129 * ocfs2_complete_lock_res_refresh afterwards. */
2130 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2132 unsigned long flags;
2136 spin_lock_irqsave(&lockres->l_lock, flags);
2137 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2138 spin_unlock_irqrestore(&lockres->l_lock, flags);
2142 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2143 spin_unlock_irqrestore(&lockres->l_lock, flags);
2145 ocfs2_wait_on_refreshing_lock(lockres);
2149 /* Ok, I'll be the one to refresh this lock. */
2150 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2151 spin_unlock_irqrestore(&lockres->l_lock, flags);
2155 mlog(0, "status %d\n", status);
2159 /* If status is non zero, I'll mark it as not being in refresh
2160 * anymroe, but i won't clear the needs refresh flag. */
2161 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2164 unsigned long flags;
2166 spin_lock_irqsave(&lockres->l_lock, flags);
2167 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2169 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2170 spin_unlock_irqrestore(&lockres->l_lock, flags);
2172 wake_up(&lockres->l_event);
2175 /* may or may not return a bh if it went to disk. */
2176 static int ocfs2_inode_lock_update(struct inode *inode,
2177 struct buffer_head **bh)
2180 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2181 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2182 struct ocfs2_dinode *fe;
2183 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2185 if (ocfs2_mount_local(osb))
2188 spin_lock(&oi->ip_lock);
2189 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2190 mlog(0, "Orphaned inode %llu was deleted while we "
2191 "were waiting on a lock. ip_flags = 0x%x\n",
2192 (unsigned long long)oi->ip_blkno, oi->ip_flags);
2193 spin_unlock(&oi->ip_lock);
2197 spin_unlock(&oi->ip_lock);
2199 if (!ocfs2_should_refresh_lock_res(lockres))
2202 /* This will discard any caching information we might have had
2203 * for the inode metadata. */
2204 ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2206 ocfs2_extent_map_trunc(inode, 0);
2208 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2209 mlog(0, "Trusting LVB on inode %llu\n",
2210 (unsigned long long)oi->ip_blkno);
2211 ocfs2_refresh_inode_from_lvb(inode);
2213 /* Boo, we have to go to disk. */
2214 /* read bh, cast, ocfs2_refresh_inode */
2215 status = ocfs2_read_inode_block(inode, bh);
2220 fe = (struct ocfs2_dinode *) (*bh)->b_data;
2222 /* This is a good chance to make sure we're not
2223 * locking an invalid object. ocfs2_read_inode_block()
2224 * already checked that the inode block is sane.
2226 * We bug on a stale inode here because we checked
2227 * above whether it was wiped from disk. The wiping
2228 * node provides a guarantee that we receive that
2229 * message and can mark the inode before dropping any
2230 * locks associated with it. */
2231 mlog_bug_on_msg(inode->i_generation !=
2232 le32_to_cpu(fe->i_generation),
2233 "Invalid dinode %llu disk generation: %u "
2234 "inode->i_generation: %u\n",
2235 (unsigned long long)oi->ip_blkno,
2236 le32_to_cpu(fe->i_generation),
2237 inode->i_generation);
2238 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2239 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2240 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2241 (unsigned long long)oi->ip_blkno,
2242 (unsigned long long)le64_to_cpu(fe->i_dtime),
2243 le32_to_cpu(fe->i_flags));
2245 ocfs2_refresh_inode(inode, fe);
2246 ocfs2_track_lock_refresh(lockres);
2251 ocfs2_complete_lock_res_refresh(lockres, status);
2256 static int ocfs2_assign_bh(struct inode *inode,
2257 struct buffer_head **ret_bh,
2258 struct buffer_head *passed_bh)
2263 /* Ok, the update went to disk for us, use the
2265 *ret_bh = passed_bh;
2271 status = ocfs2_read_inode_block(inode, ret_bh);
2279 * returns < 0 error if the callback will never be called, otherwise
2280 * the result of the lock will be communicated via the callback.
2282 int ocfs2_inode_lock_full_nested(struct inode *inode,
2283 struct buffer_head **ret_bh,
2288 int status, level, acquired;
2290 struct ocfs2_lock_res *lockres = NULL;
2291 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2292 struct buffer_head *local_bh = NULL;
2296 mlog(0, "inode %llu, take %s META lock\n",
2297 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2298 ex ? "EXMODE" : "PRMODE");
2302 /* We'll allow faking a readonly metadata lock for
2304 if (ocfs2_is_hard_readonly(osb)) {
2310 if (ocfs2_mount_local(osb))
2313 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2314 ocfs2_wait_for_recovery(osb);
2316 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2317 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2319 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2320 dlm_flags |= DLM_LKF_NOQUEUE;
2322 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2323 arg_flags, subclass, _RET_IP_);
2325 if (status != -EAGAIN)
2330 /* Notify the error cleanup path to drop the cluster lock. */
2333 /* We wait twice because a node may have died while we were in
2334 * the lower dlm layers. The second time though, we've
2335 * committed to owning this lock so we don't allow signals to
2336 * abort the operation. */
2337 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2338 ocfs2_wait_for_recovery(osb);
2342 * We only see this flag if we're being called from
2343 * ocfs2_read_locked_inode(). It means we're locking an inode
2344 * which hasn't been populated yet, so clear the refresh flag
2345 * and let the caller handle it.
2347 if (inode->i_state & I_NEW) {
2350 ocfs2_complete_lock_res_refresh(lockres, 0);
2354 /* This is fun. The caller may want a bh back, or it may
2355 * not. ocfs2_inode_lock_update definitely wants one in, but
2356 * may or may not read one, depending on what's in the
2357 * LVB. The result of all of this is that we've *only* gone to
2358 * disk if we have to, so the complexity is worthwhile. */
2359 status = ocfs2_inode_lock_update(inode, &local_bh);
2361 if (status != -ENOENT)
2367 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2376 if (ret_bh && (*ret_bh)) {
2381 ocfs2_inode_unlock(inode, ex);
2391 * This is working around a lock inversion between tasks acquiring DLM
2392 * locks while holding a page lock and the downconvert thread which
2393 * blocks dlm lock acquiry while acquiring page locks.
2395 * ** These _with_page variantes are only intended to be called from aop
2396 * methods that hold page locks and return a very specific *positive* error
2397 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2399 * The DLM is called such that it returns -EAGAIN if it would have
2400 * blocked waiting for the downconvert thread. In that case we unlock
2401 * our page so the downconvert thread can make progress. Once we've
2402 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2403 * that called us can bubble that back up into the VFS who will then
2404 * immediately retry the aop call.
2406 * We do a blocking lock and immediate unlock before returning, though, so that
2407 * the lock has a great chance of being cached on this node by the time the VFS
2408 * calls back to retry the aop. This has a potential to livelock as nodes
2409 * ping locks back and forth, but that's a risk we're willing to take to avoid
2410 * the lock inversion simply.
2412 int ocfs2_inode_lock_with_page(struct inode *inode,
2413 struct buffer_head **ret_bh,
2419 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2420 if (ret == -EAGAIN) {
2422 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2423 ocfs2_inode_unlock(inode, ex);
2424 ret = AOP_TRUNCATED_PAGE;
2430 int ocfs2_inode_lock_atime(struct inode *inode,
2431 struct vfsmount *vfsmnt,
2436 ret = ocfs2_inode_lock(inode, NULL, 0);
2443 * If we should update atime, we will get EX lock,
2444 * otherwise we just get PR lock.
2446 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2447 struct buffer_head *bh = NULL;
2449 ocfs2_inode_unlock(inode, 0);
2450 ret = ocfs2_inode_lock(inode, &bh, 1);
2456 if (ocfs2_should_update_atime(inode, vfsmnt))
2457 ocfs2_update_inode_atime(inode, bh);
2466 void ocfs2_inode_unlock(struct inode *inode,
2469 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2470 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2471 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2473 mlog(0, "inode %llu drop %s META lock\n",
2474 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2475 ex ? "EXMODE" : "PRMODE");
2477 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2478 !ocfs2_mount_local(osb))
2479 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2482 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2484 struct ocfs2_lock_res *lockres;
2485 struct ocfs2_orphan_scan_lvb *lvb;
2488 if (ocfs2_is_hard_readonly(osb))
2491 if (ocfs2_mount_local(osb))
2494 lockres = &osb->osb_orphan_scan.os_lockres;
2495 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2499 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2500 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2501 lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2502 *seqno = be32_to_cpu(lvb->lvb_os_seqno);
2504 *seqno = osb->osb_orphan_scan.os_seqno + 1;
2509 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2511 struct ocfs2_lock_res *lockres;
2512 struct ocfs2_orphan_scan_lvb *lvb;
2514 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2515 lockres = &osb->osb_orphan_scan.os_lockres;
2516 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2517 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2518 lvb->lvb_os_seqno = cpu_to_be32(seqno);
2519 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2523 int ocfs2_super_lock(struct ocfs2_super *osb,
2527 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2528 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2530 if (ocfs2_is_hard_readonly(osb))
2533 if (ocfs2_mount_local(osb))
2536 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2542 /* The super block lock path is really in the best position to
2543 * know when resources covered by the lock need to be
2544 * refreshed, so we do it here. Of course, making sense of
2545 * everything is up to the caller :) */
2546 status = ocfs2_should_refresh_lock_res(lockres);
2548 ocfs2_cluster_unlock(osb, lockres, level);
2553 status = ocfs2_refresh_slot_info(osb);
2555 ocfs2_complete_lock_res_refresh(lockres, status);
2558 ocfs2_cluster_unlock(osb, lockres, level);
2561 ocfs2_track_lock_refresh(lockres);
2567 void ocfs2_super_unlock(struct ocfs2_super *osb,
2570 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2571 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2573 if (!ocfs2_mount_local(osb))
2574 ocfs2_cluster_unlock(osb, lockres, level);
2577 int ocfs2_rename_lock(struct ocfs2_super *osb)
2580 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2582 if (ocfs2_is_hard_readonly(osb))
2585 if (ocfs2_mount_local(osb))
2588 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2595 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2597 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2599 if (!ocfs2_mount_local(osb))
2600 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2603 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2606 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2608 if (ocfs2_is_hard_readonly(osb))
2611 if (ocfs2_mount_local(osb))
2614 status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2617 mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2622 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2624 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2626 if (!ocfs2_mount_local(osb))
2627 ocfs2_cluster_unlock(osb, lockres,
2628 ex ? LKM_EXMODE : LKM_PRMODE);
2631 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2634 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2635 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2636 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2640 if (ocfs2_is_hard_readonly(osb)) {
2646 if (ocfs2_mount_local(osb))
2649 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2656 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2658 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2659 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2660 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2662 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2663 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2666 /* Reference counting of the dlm debug structure. We want this because
2667 * open references on the debug inodes can live on after a mount, so
2668 * we can't rely on the ocfs2_super to always exist. */
2669 static void ocfs2_dlm_debug_free(struct kref *kref)
2671 struct ocfs2_dlm_debug *dlm_debug;
2673 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2678 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2681 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2684 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2686 kref_get(&debug->d_refcnt);
2689 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2691 struct ocfs2_dlm_debug *dlm_debug;
2693 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2695 mlog_errno(-ENOMEM);
2699 kref_init(&dlm_debug->d_refcnt);
2700 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2701 dlm_debug->d_locking_state = NULL;
2706 /* Access to this is arbitrated for us via seq_file->sem. */
2707 struct ocfs2_dlm_seq_priv {
2708 struct ocfs2_dlm_debug *p_dlm_debug;
2709 struct ocfs2_lock_res p_iter_res;
2710 struct ocfs2_lock_res p_tmp_res;
2713 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2714 struct ocfs2_dlm_seq_priv *priv)
2716 struct ocfs2_lock_res *iter, *ret = NULL;
2717 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2719 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2721 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2722 /* discover the head of the list */
2723 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2724 mlog(0, "End of list found, %p\n", ret);
2728 /* We track our "dummy" iteration lockres' by a NULL
2730 if (iter->l_ops != NULL) {
2739 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2741 struct ocfs2_dlm_seq_priv *priv = m->private;
2742 struct ocfs2_lock_res *iter;
2744 spin_lock(&ocfs2_dlm_tracking_lock);
2745 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2747 /* Since lockres' have the lifetime of their container
2748 * (which can be inodes, ocfs2_supers, etc) we want to
2749 * copy this out to a temporary lockres while still
2750 * under the spinlock. Obviously after this we can't
2751 * trust any pointers on the copy returned, but that's
2752 * ok as the information we want isn't typically held
2754 priv->p_tmp_res = *iter;
2755 iter = &priv->p_tmp_res;
2757 spin_unlock(&ocfs2_dlm_tracking_lock);
2762 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2766 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2768 struct ocfs2_dlm_seq_priv *priv = m->private;
2769 struct ocfs2_lock_res *iter = v;
2770 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2772 spin_lock(&ocfs2_dlm_tracking_lock);
2773 iter = ocfs2_dlm_next_res(iter, priv);
2774 list_del_init(&dummy->l_debug_list);
2776 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2777 priv->p_tmp_res = *iter;
2778 iter = &priv->p_tmp_res;
2780 spin_unlock(&ocfs2_dlm_tracking_lock);
2786 * Version is used by debugfs.ocfs2 to determine the format being used
2789 * - Lock stats printed
2791 * - Max time in lock stats is in usecs (instead of nsecs)
2793 #define OCFS2_DLM_DEBUG_STR_VERSION 3
2794 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2798 struct ocfs2_lock_res *lockres = v;
2803 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2805 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2806 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2808 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2810 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2812 seq_printf(m, "%d\t"
2823 lockres->l_unlock_action,
2824 lockres->l_ro_holders,
2825 lockres->l_ex_holders,
2826 lockres->l_requested,
2827 lockres->l_blocking);
2829 /* Dump the raw LVB */
2830 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2831 for(i = 0; i < DLM_LVB_LEN; i++)
2832 seq_printf(m, "0x%x\t", lvb[i]);
2834 #ifdef CONFIG_OCFS2_FS_STATS
2835 # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
2836 # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
2837 # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
2838 # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
2839 # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
2840 # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
2841 # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
2842 # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
2843 # define lock_refresh(_l) ((_l)->l_lock_refresh)
2845 # define lock_num_prmode(_l) (0)
2846 # define lock_num_exmode(_l) (0)
2847 # define lock_num_prmode_failed(_l) (0)
2848 # define lock_num_exmode_failed(_l) (0)
2849 # define lock_total_prmode(_l) (0ULL)
2850 # define lock_total_exmode(_l) (0ULL)
2851 # define lock_max_prmode(_l) (0)
2852 # define lock_max_exmode(_l) (0)
2853 # define lock_refresh(_l) (0)
2855 /* The following seq_print was added in version 2 of this output */
2856 seq_printf(m, "%u\t"
2865 lock_num_prmode(lockres),
2866 lock_num_exmode(lockres),
2867 lock_num_prmode_failed(lockres),
2868 lock_num_exmode_failed(lockres),
2869 lock_total_prmode(lockres),
2870 lock_total_exmode(lockres),
2871 lock_max_prmode(lockres),
2872 lock_max_exmode(lockres),
2873 lock_refresh(lockres));
2876 seq_printf(m, "\n");
2880 static const struct seq_operations ocfs2_dlm_seq_ops = {
2881 .start = ocfs2_dlm_seq_start,
2882 .stop = ocfs2_dlm_seq_stop,
2883 .next = ocfs2_dlm_seq_next,
2884 .show = ocfs2_dlm_seq_show,
2887 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2889 struct seq_file *seq = file->private_data;
2890 struct ocfs2_dlm_seq_priv *priv = seq->private;
2891 struct ocfs2_lock_res *res = &priv->p_iter_res;
2893 ocfs2_remove_lockres_tracking(res);
2894 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2895 return seq_release_private(inode, file);
2898 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2901 struct ocfs2_dlm_seq_priv *priv;
2902 struct seq_file *seq;
2903 struct ocfs2_super *osb;
2905 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2911 osb = inode->i_private;
2912 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2913 priv->p_dlm_debug = osb->osb_dlm_debug;
2914 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2916 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2923 seq = file->private_data;
2924 seq->private = priv;
2926 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2933 static const struct file_operations ocfs2_dlm_debug_fops = {
2934 .open = ocfs2_dlm_debug_open,
2935 .release = ocfs2_dlm_debug_release,
2937 .llseek = seq_lseek,
2940 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2943 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2945 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2947 osb->osb_debug_root,
2949 &ocfs2_dlm_debug_fops);
2950 if (!dlm_debug->d_locking_state) {
2953 "Unable to create locking state debugfs file.\n");
2957 ocfs2_get_dlm_debug(dlm_debug);
2962 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2964 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2967 debugfs_remove(dlm_debug->d_locking_state);
2968 ocfs2_put_dlm_debug(dlm_debug);
2972 int ocfs2_dlm_init(struct ocfs2_super *osb)
2975 struct ocfs2_cluster_connection *conn = NULL;
2977 if (ocfs2_mount_local(osb)) {
2982 status = ocfs2_dlm_init_debug(osb);
2988 /* launch downconvert thread */
2989 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
2990 if (IS_ERR(osb->dc_task)) {
2991 status = PTR_ERR(osb->dc_task);
2992 osb->dc_task = NULL;
2997 /* for now, uuid == domain */
2998 status = ocfs2_cluster_connect(osb->osb_cluster_stack,
3000 strlen(osb->uuid_str),
3001 &lproto, ocfs2_do_node_down, osb,
3008 status = ocfs2_cluster_this_node(&osb->node_num);
3012 "could not find this host's node number\n");
3013 ocfs2_cluster_disconnect(conn, 0);
3018 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
3019 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
3020 ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
3021 ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
3028 ocfs2_dlm_shutdown_debug(osb);
3030 kthread_stop(osb->dc_task);
3036 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3039 ocfs2_drop_osb_locks(osb);
3042 * Now that we have dropped all locks and ocfs2_dismount_volume()
3043 * has disabled recovery, the DLM won't be talking to us. It's
3044 * safe to tear things down before disconnecting the cluster.
3048 kthread_stop(osb->dc_task);
3049 osb->dc_task = NULL;
3052 ocfs2_lock_res_free(&osb->osb_super_lockres);
3053 ocfs2_lock_res_free(&osb->osb_rename_lockres);
3054 ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3055 ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3057 ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3060 ocfs2_dlm_shutdown_debug(osb);
3063 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3064 struct ocfs2_lock_res *lockres)
3067 unsigned long flags;
3070 /* We didn't get anywhere near actually using this lockres. */
3071 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3074 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3075 lkm_flags |= DLM_LKF_VALBLK;
3077 spin_lock_irqsave(&lockres->l_lock, flags);
3079 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3080 "lockres %s, flags 0x%lx\n",
3081 lockres->l_name, lockres->l_flags);
3083 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3084 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3085 "%u, unlock_action = %u\n",
3086 lockres->l_name, lockres->l_flags, lockres->l_action,
3087 lockres->l_unlock_action);
3089 spin_unlock_irqrestore(&lockres->l_lock, flags);
3091 /* XXX: Today we just wait on any busy
3092 * locks... Perhaps we need to cancel converts in the
3094 ocfs2_wait_on_busy_lock(lockres);
3096 spin_lock_irqsave(&lockres->l_lock, flags);
3099 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3100 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3101 lockres->l_level == DLM_LOCK_EX &&
3102 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3103 lockres->l_ops->set_lvb(lockres);
3106 if (lockres->l_flags & OCFS2_LOCK_BUSY)
3107 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3109 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3110 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3112 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3113 spin_unlock_irqrestore(&lockres->l_lock, flags);
3117 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3119 /* make sure we never get here while waiting for an ast to
3121 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3123 /* is this necessary? */
3124 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3125 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3126 spin_unlock_irqrestore(&lockres->l_lock, flags);
3128 mlog(0, "lock %s\n", lockres->l_name);
3130 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
3132 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3133 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3134 ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3137 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3140 ocfs2_wait_on_busy_lock(lockres);
3145 /* Mark the lockres as being dropped. It will no longer be
3146 * queued if blocking, but we still may have to wait on it
3147 * being dequeued from the downconvert thread before we can consider
3150 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3151 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
3154 struct ocfs2_mask_waiter mw;
3155 unsigned long flags;
3157 ocfs2_init_mask_waiter(&mw);
3159 spin_lock_irqsave(&lockres->l_lock, flags);
3160 lockres->l_flags |= OCFS2_LOCK_FREEING;
3161 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3162 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3163 spin_unlock_irqrestore(&lockres->l_lock, flags);
3165 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3167 status = ocfs2_wait_for_mask(&mw);
3171 spin_lock_irqsave(&lockres->l_lock, flags);
3173 spin_unlock_irqrestore(&lockres->l_lock, flags);
3176 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3177 struct ocfs2_lock_res *lockres)
3181 ocfs2_mark_lockres_freeing(lockres);
3182 ret = ocfs2_drop_lock(osb, lockres);
3187 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3189 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3190 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3191 ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3192 ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3195 int ocfs2_drop_inode_locks(struct inode *inode)
3199 /* No need to call ocfs2_mark_lockres_freeing here -
3200 * ocfs2_clear_inode has done it for us. */
3202 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3203 &OCFS2_I(inode)->ip_open_lockres);
3209 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3210 &OCFS2_I(inode)->ip_inode_lockres);
3213 if (err < 0 && !status)
3216 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3217 &OCFS2_I(inode)->ip_rw_lockres);
3220 if (err < 0 && !status)
3226 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3229 assert_spin_locked(&lockres->l_lock);
3231 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3233 if (lockres->l_level <= new_level) {
3234 mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3235 "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3236 "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
3237 new_level, list_empty(&lockres->l_blocked_list),
3238 list_empty(&lockres->l_mask_waiters), lockres->l_type,
3239 lockres->l_flags, lockres->l_ro_holders,
3240 lockres->l_ex_holders, lockres->l_action,
3241 lockres->l_unlock_action, lockres->l_requested,
3242 lockres->l_blocking, lockres->l_pending_gen);
3246 mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
3247 lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
3249 lockres->l_action = OCFS2_AST_DOWNCONVERT;
3250 lockres->l_requested = new_level;
3251 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3252 return lockres_set_pending(lockres);
3255 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3256 struct ocfs2_lock_res *lockres,
3259 unsigned int generation)
3262 u32 dlm_flags = DLM_LKF_CONVERT;
3264 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3265 lockres->l_level, new_level);
3268 dlm_flags |= DLM_LKF_VALBLK;
3270 ret = ocfs2_dlm_lock(osb->cconn,
3275 OCFS2_LOCK_ID_MAX_LEN - 1);
3276 lockres_clear_pending(lockres, generation, osb);
3278 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3279 ocfs2_recover_from_dlm_error(lockres, 1);
3288 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3289 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3290 struct ocfs2_lock_res *lockres)
3292 assert_spin_locked(&lockres->l_lock);
3294 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3295 /* If we're already trying to cancel a lock conversion
3296 * then just drop the spinlock and allow the caller to
3297 * requeue this lock. */
3298 mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
3302 /* were we in a convert when we got the bast fire? */
3303 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3304 lockres->l_action != OCFS2_AST_DOWNCONVERT);
3305 /* set things up for the unlockast to know to just
3306 * clear out the ast_action and unset busy, etc. */
3307 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3309 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3310 "lock %s, invalid flags: 0x%lx\n",
3311 lockres->l_name, lockres->l_flags);
3313 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3318 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3319 struct ocfs2_lock_res *lockres)
3323 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3326 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3327 ocfs2_recover_from_dlm_error(lockres, 0);
3330 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3335 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3336 struct ocfs2_lock_res *lockres,
3337 struct ocfs2_unblock_ctl *ctl)
3339 unsigned long flags;
3347 spin_lock_irqsave(&lockres->l_lock, flags);
3351 * Is it still blocking? If not, we have no more work to do.
3353 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3354 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3355 spin_unlock_irqrestore(&lockres->l_lock, flags);
3360 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3362 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3363 * exists entirely for one reason - another thread has set
3364 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3366 * If we do ocfs2_cancel_convert() before the other thread
3367 * calls dlm_lock(), our cancel will do nothing. We will
3368 * get no ast, and we will have no way of knowing the
3369 * cancel failed. Meanwhile, the other thread will call
3370 * into dlm_lock() and wait...forever.
3372 * Why forever? Because another node has asked for the
3373 * lock first; that's why we're here in unblock_lock().
3375 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3376 * set, we just requeue the unblock. Only when the other
3377 * thread has called dlm_lock() and cleared PENDING will
3378 * we then cancel their request.
3380 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3381 * at the same time they set OCFS2_DLM_BUSY. They must
3382 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3384 if (lockres->l_flags & OCFS2_LOCK_PENDING) {
3385 mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
3391 ret = ocfs2_prepare_cancel_convert(osb, lockres);
3392 spin_unlock_irqrestore(&lockres->l_lock, flags);
3394 ret = ocfs2_cancel_convert(osb, lockres);
3402 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3403 * set when the ast is received for an upconvert just before the
3404 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3405 * on the heels of the ast, we want to delay the downconvert just
3406 * enough to allow the up requestor to do its task. Because this
3407 * lock is in the blocked queue, the lock will be downconverted
3408 * as soon as the requestor is done with the lock.
3410 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3414 * How can we block and yet be at NL? We were trying to upconvert
3415 * from NL and got canceled. The code comes back here, and now
3416 * we notice and clear BLOCKING.
3418 if (lockres->l_level == DLM_LOCK_NL) {
3419 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3420 mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
3421 lockres->l_blocking = DLM_LOCK_NL;
3422 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3423 spin_unlock_irqrestore(&lockres->l_lock, flags);
3427 /* if we're blocking an exclusive and we have *any* holders,
3429 if ((lockres->l_blocking == DLM_LOCK_EX)
3430 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
3431 mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3432 lockres->l_name, lockres->l_ex_holders,
3433 lockres->l_ro_holders);
3437 /* If it's a PR we're blocking, then only
3438 * requeue if we've got any EX holders */
3439 if (lockres->l_blocking == DLM_LOCK_PR &&
3440 lockres->l_ex_holders) {
3441 mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
3442 lockres->l_name, lockres->l_ex_holders);
3447 * Can we get a lock in this state if the holder counts are
3448 * zero? The meta data unblock code used to check this.
3450 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3451 && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
3452 mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
3457 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3459 if (lockres->l_ops->check_downconvert
3460 && !lockres->l_ops->check_downconvert(lockres, new_level)) {
3461 mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
3466 /* If we get here, then we know that there are no more
3467 * incompatible holders (and anyone asking for an incompatible
3468 * lock is blocked). We can now downconvert the lock */
3469 if (!lockres->l_ops->downconvert_worker)
3472 /* Some lockres types want to do a bit of work before
3473 * downconverting a lock. Allow that here. The worker function
3474 * may sleep, so we save off a copy of what we're blocking as
3475 * it may change while we're not holding the spin lock. */
3476 blocking = lockres->l_blocking;
3477 level = lockres->l_level;
3478 spin_unlock_irqrestore(&lockres->l_lock, flags);
3480 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3482 if (ctl->unblock_action == UNBLOCK_STOP_POST) {
3483 mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
3488 spin_lock_irqsave(&lockres->l_lock, flags);
3489 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3490 /* If this changed underneath us, then we can't drop
3492 mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
3493 "Recheck\n", lockres->l_name, blocking,
3494 lockres->l_blocking, level, lockres->l_level);
3501 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3502 if (lockres->l_level == DLM_LOCK_EX)
3506 * We only set the lvb if the lock has been fully
3507 * refreshed - otherwise we risk setting stale
3508 * data. Otherwise, there's no need to actually clear
3509 * out the lvb here as it's value is still valid.
3511 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3512 lockres->l_ops->set_lvb(lockres);
3515 gen = ocfs2_prepare_downconvert(lockres, new_level);
3516 spin_unlock_irqrestore(&lockres->l_lock, flags);
3517 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3526 spin_unlock_irqrestore(&lockres->l_lock, flags);
3532 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3535 struct inode *inode;
3536 struct address_space *mapping;
3537 struct ocfs2_inode_info *oi;
3539 inode = ocfs2_lock_res_inode(lockres);
3540 mapping = inode->i_mapping;
3542 if (S_ISDIR(inode->i_mode)) {
3543 oi = OCFS2_I(inode);
3544 oi->ip_dir_lock_gen++;
3545 mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
3549 if (!S_ISREG(inode->i_mode))
3553 * We need this before the filemap_fdatawrite() so that it can
3554 * transfer the dirty bit from the PTE to the
3555 * page. Unfortunately this means that even for EX->PR
3556 * downconverts, we'll lose our mappings and have to build
3559 unmap_mapping_range(mapping, 0, 0, 0);
3561 if (filemap_fdatawrite(mapping)) {
3562 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3563 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3565 sync_mapping_buffers(mapping);
3566 if (blocking == DLM_LOCK_EX) {
3567 truncate_inode_pages(mapping, 0);
3569 /* We only need to wait on the I/O if we're not also
3570 * truncating pages because truncate_inode_pages waits
3571 * for us above. We don't truncate pages if we're
3572 * blocking anything < EXMODE because we want to keep
3573 * them around in that case. */
3574 filemap_fdatawait(mapping);
3578 return UNBLOCK_CONTINUE;
3581 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3582 struct ocfs2_lock_res *lockres,
3585 int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3587 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3588 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3593 ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3597 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3600 struct inode *inode = ocfs2_lock_res_inode(lockres);
3602 return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3605 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3607 struct inode *inode = ocfs2_lock_res_inode(lockres);
3609 __ocfs2_stuff_meta_lvb(inode);
3613 * Does the final reference drop on our dentry lock. Right now this
3614 * happens in the downconvert thread, but we could choose to simplify the
3615 * dlmglue API and push these off to the ocfs2_wq in the future.
3617 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3618 struct ocfs2_lock_res *lockres)
3620 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3621 ocfs2_dentry_lock_put(osb, dl);
3625 * d_delete() matching dentries before the lock downconvert.
3627 * At this point, any process waiting to destroy the
3628 * dentry_lock due to last ref count is stopped by the
3629 * OCFS2_LOCK_QUEUED flag.
3631 * We have two potential problems
3633 * 1) If we do the last reference drop on our dentry_lock (via dput)
3634 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3635 * the downconvert to finish. Instead we take an elevated
3636 * reference and push the drop until after we've completed our
3637 * unblock processing.
3639 * 2) There might be another process with a final reference,
3640 * waiting on us to finish processing. If this is the case, we
3641 * detect it and exit out - there's no more dentries anyway.
3643 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3646 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3647 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3648 struct dentry *dentry;
3649 unsigned long flags;
3653 * This node is blocking another node from getting a read
3654 * lock. This happens when we've renamed within a
3655 * directory. We've forced the other nodes to d_delete(), but
3656 * we never actually dropped our lock because it's still
3657 * valid. The downconvert code will retain a PR for this node,
3658 * so there's no further work to do.
3660 if (blocking == DLM_LOCK_PR)
3661 return UNBLOCK_CONTINUE;
3664 * Mark this inode as potentially orphaned. The code in
3665 * ocfs2_delete_inode() will figure out whether it actually
3666 * needs to be freed or not.
3668 spin_lock(&oi->ip_lock);
3669 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3670 spin_unlock(&oi->ip_lock);
3673 * Yuck. We need to make sure however that the check of
3674 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3675 * respect to a reference decrement or the setting of that
3678 spin_lock_irqsave(&lockres->l_lock, flags);
3679 spin_lock(&dentry_attach_lock);
3680 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3685 spin_unlock(&dentry_attach_lock);
3686 spin_unlock_irqrestore(&lockres->l_lock, flags);
3688 mlog(0, "extra_ref = %d\n", extra_ref);
3691 * We have a process waiting on us in ocfs2_dentry_iput(),
3692 * which means we can't have any more outstanding
3693 * aliases. There's no need to do any more work.
3696 return UNBLOCK_CONTINUE;
3698 spin_lock(&dentry_attach_lock);
3700 dentry = ocfs2_find_local_alias(dl->dl_inode,
3701 dl->dl_parent_blkno, 1);
3704 spin_unlock(&dentry_attach_lock);
3706 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3707 dentry->d_name.name);
3710 * The following dcache calls may do an
3711 * iput(). Normally we don't want that from the
3712 * downconverting thread, but in this case it's ok
3713 * because the requesting node already has an
3714 * exclusive lock on the inode, so it can't be queued
3715 * for a downconvert.
3720 spin_lock(&dentry_attach_lock);
3722 spin_unlock(&dentry_attach_lock);
3725 * If we are the last holder of this dentry lock, there is no
3726 * reason to downconvert so skip straight to the unlock.
3728 if (dl->dl_count == 1)
3729 return UNBLOCK_STOP_POST;
3731 return UNBLOCK_CONTINUE_POST;
3734 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
3737 struct ocfs2_refcount_tree *tree =
3738 ocfs2_lock_res_refcount_tree(lockres);
3740 return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
3743 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
3746 struct ocfs2_refcount_tree *tree =
3747 ocfs2_lock_res_refcount_tree(lockres);
3749 ocfs2_metadata_cache_purge(&tree->rf_ci);
3751 return UNBLOCK_CONTINUE;
3754 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3756 struct ocfs2_qinfo_lvb *lvb;
3757 struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
3758 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3759 oinfo->dqi_gi.dqi_type);
3761 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3762 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
3763 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
3764 lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
3765 lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
3766 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
3767 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
3768 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
3771 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3773 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3774 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3775 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3777 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3778 ocfs2_cluster_unlock(osb, lockres, level);
3781 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3783 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3784 oinfo->dqi_gi.dqi_type);
3785 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3786 struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3787 struct buffer_head *bh = NULL;
3788 struct ocfs2_global_disk_dqinfo *gdinfo;
3791 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
3792 lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
3793 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
3794 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
3795 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
3796 oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
3797 oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
3798 oinfo->dqi_gi.dqi_free_entry =
3799 be32_to_cpu(lvb->lvb_free_entry);
3801 status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
3802 oinfo->dqi_giblk, &bh);
3807 gdinfo = (struct ocfs2_global_disk_dqinfo *)
3808 (bh->b_data + OCFS2_GLOBAL_INFO_OFF);
3809 info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
3810 info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
3811 oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
3812 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
3813 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
3814 oinfo->dqi_gi.dqi_free_entry =
3815 le32_to_cpu(gdinfo->dqi_free_entry);
3817 ocfs2_track_lock_refresh(lockres);
3824 /* Lock quota info, this function expects at least shared lock on the quota file
3825 * so that we can safely refresh quota info from disk. */
3826 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3828 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3829 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3830 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3833 /* On RO devices, locking really isn't needed... */
3834 if (ocfs2_is_hard_readonly(osb)) {
3839 if (ocfs2_mount_local(osb))
3842 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3847 if (!ocfs2_should_refresh_lock_res(lockres))
3849 /* OK, we have the lock but we need to refresh the quota info */
3850 status = ocfs2_refresh_qinfo(oinfo);
3852 ocfs2_qinfo_unlock(oinfo, ex);
3853 ocfs2_complete_lock_res_refresh(lockres, status);
3858 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
3861 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3862 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3863 struct ocfs2_super *osb = lockres->l_priv;
3866 if (ocfs2_is_hard_readonly(osb))
3869 if (ocfs2_mount_local(osb))
3872 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3879 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
3881 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3882 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3883 struct ocfs2_super *osb = lockres->l_priv;
3885 if (!ocfs2_mount_local(osb))
3886 ocfs2_cluster_unlock(osb, lockres, level);
3889 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3890 struct ocfs2_lock_res *lockres)
3893 struct ocfs2_unblock_ctl ctl = {0, 0,};
3894 unsigned long flags;
3896 /* Our reference to the lockres in this function can be
3897 * considered valid until we remove the OCFS2_LOCK_QUEUED
3901 BUG_ON(!lockres->l_ops);
3903 mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
3905 /* Detect whether a lock has been marked as going away while
3906 * the downconvert thread was processing other things. A lock can
3907 * still be marked with OCFS2_LOCK_FREEING after this check,
3908 * but short circuiting here will still save us some
3910 spin_lock_irqsave(&lockres->l_lock, flags);
3911 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3913 spin_unlock_irqrestore(&lockres->l_lock, flags);
3915 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3919 spin_lock_irqsave(&lockres->l_lock, flags);
3921 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3922 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3924 ocfs2_schedule_blocked_lock(osb, lockres);
3926 mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
3927 ctl.requeue ? "yes" : "no");
3928 spin_unlock_irqrestore(&lockres->l_lock, flags);
3930 if (ctl.unblock_action != UNBLOCK_CONTINUE
3931 && lockres->l_ops->post_unlock)
3932 lockres->l_ops->post_unlock(osb, lockres);
3935 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3936 struct ocfs2_lock_res *lockres)
3938 unsigned long flags;
3940 assert_spin_locked(&lockres->l_lock);
3942 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3943 /* Do not schedule a lock for downconvert when it's on
3944 * the way to destruction - any nodes wanting access
3945 * to the resource will get it soon. */
3946 mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
3947 lockres->l_name, lockres->l_flags);
3951 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3953 spin_lock_irqsave(&osb->dc_task_lock, flags);
3954 if (list_empty(&lockres->l_blocked_list)) {
3955 list_add_tail(&lockres->l_blocked_list,
3956 &osb->blocked_lock_list);
3957 osb->blocked_lock_count++;
3959 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3962 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3964 unsigned long processed;
3965 unsigned long flags;
3966 struct ocfs2_lock_res *lockres;
3968 spin_lock_irqsave(&osb->dc_task_lock, flags);
3969 /* grab this early so we know to try again if a state change and
3970 * wake happens part-way through our work */
3971 osb->dc_work_sequence = osb->dc_wake_sequence;
3973 processed = osb->blocked_lock_count;
3975 BUG_ON(list_empty(&osb->blocked_lock_list));
3977 lockres = list_entry(osb->blocked_lock_list.next,
3978 struct ocfs2_lock_res, l_blocked_list);
3979 list_del_init(&lockres->l_blocked_list);
3980 osb->blocked_lock_count--;
3981 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3986 ocfs2_process_blocked_lock(osb, lockres);
3988 spin_lock_irqsave(&osb->dc_task_lock, flags);
3990 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
3993 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
3996 unsigned long flags;
3998 spin_lock_irqsave(&osb->dc_task_lock, flags);
3999 if (list_empty(&osb->blocked_lock_list))
4002 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4006 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4008 int should_wake = 0;
4009 unsigned long flags;
4011 spin_lock_irqsave(&osb->dc_task_lock, flags);
4012 if (osb->dc_work_sequence != osb->dc_wake_sequence)
4014 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4019 static int ocfs2_downconvert_thread(void *arg)
4022 struct ocfs2_super *osb = arg;
4024 /* only quit once we've been asked to stop and there is no more
4026 while (!(kthread_should_stop() &&
4027 ocfs2_downconvert_thread_lists_empty(osb))) {
4029 wait_event_interruptible(osb->dc_event,
4030 ocfs2_downconvert_thread_should_wake(osb) ||
4031 kthread_should_stop());
4033 mlog(0, "downconvert_thread: awoken\n");
4035 ocfs2_downconvert_thread_do_work(osb);
4038 osb->dc_task = NULL;
4042 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4044 unsigned long flags;
4046 spin_lock_irqsave(&osb->dc_task_lock, flags);
4047 /* make sure the voting thread gets a swipe at whatever changes
4048 * the caller may have made to the voting state */
4049 osb->dc_wake_sequence++;
4050 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4051 wake_up(&osb->dc_event);