1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
6 ** This copyrighted material is made available to anyone wishing to use,
7 ** modify, copy, or redistribute it subject to the terms and conditions
8 ** of the GNU General Public License v.2.
10 *******************************************************************************
11 ******************************************************************************/
13 /* Central locking logic has four stages:
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
54 L: send_xxxx() -> R: receive_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
58 #include <linux/types.h>
59 #include <linux/rbtree.h>
60 #include <linux/slab.h>
61 #include "dlm_internal.h"
62 #include <linux/dlm_device.h>
65 #include "requestqueue.h"
69 #include "lockspace.h"
74 #include "lvb_table.h"
78 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int send_remove(struct dlm_rsb *r);
86 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
91 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92 static void del_timeout(struct dlm_lkb *lkb);
93 static void toss_rsb(struct kref *kref);
96 * Lock compatibilty matrix - thanks Steve
97 * UN = Unlocked state. Not really a state, used as a flag
98 * PD = Padding. Used to make the matrix a nice power of two in size
99 * Other states are the same as the VMS DLM.
100 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
103 static const int __dlm_compat_matrix[8][8] = {
104 /* UN NL CR CW PR PW EX PD */
105 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
106 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
107 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
108 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
109 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
110 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
111 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
112 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
116 * This defines the direction of transfer of LVB data.
117 * Granted mode is the row; requested mode is the column.
118 * Usage: matrix[grmode+1][rqmode+1]
119 * 1 = LVB is returned to the caller
120 * 0 = LVB is written to the resource
121 * -1 = nothing happens to the LVB
124 const int dlm_lvb_operations[8][8] = {
125 /* UN NL CR CW PR PW EX PD*/
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
136 #define modes_compat(gr, rq) \
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
139 int dlm_modes_compat(int mode1, int mode2)
141 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
145 * Compatibility matrix for conversions with QUECVT set.
146 * Granted mode is the row; requested mode is the column.
147 * Usage: matrix[grmode+1][rqmode+1]
150 static const int __quecvt_compat_matrix[8][8] = {
151 /* UN NL CR CW PR PW EX PD */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
153 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
154 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
155 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
156 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
157 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
158 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
159 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
162 void dlm_print_lkb(struct dlm_lkb *lkb)
164 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
165 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 (unsigned long long)lkb->lkb_recover_seq);
172 static void dlm_print_rsb(struct dlm_rsb *r)
174 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
181 void dlm_dump_rsb(struct dlm_rsb *r)
187 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 printk(KERN_ERR "rsb lookup list\n");
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
192 printk(KERN_ERR "rsb grant queue:\n");
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
195 printk(KERN_ERR "rsb convert queue:\n");
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
198 printk(KERN_ERR "rsb wait queue:\n");
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
203 /* Threads cannot use the lockspace while it's being recovered */
205 static inline void dlm_lock_recovery(struct dlm_ls *ls)
207 down_read(&ls->ls_in_recovery);
210 void dlm_unlock_recovery(struct dlm_ls *ls)
212 up_read(&ls->ls_in_recovery);
215 int dlm_lock_recovery_try(struct dlm_ls *ls)
217 return down_read_trylock(&ls->ls_in_recovery);
220 static inline int can_be_queued(struct dlm_lkb *lkb)
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
225 static inline int force_blocking_asts(struct dlm_lkb *lkb)
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
230 static inline int is_demoted(struct dlm_lkb *lkb)
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
235 static inline int is_altmode(struct dlm_lkb *lkb)
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
240 static inline int is_granted(struct dlm_lkb *lkb)
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
245 static inline int is_remote(struct dlm_rsb *r)
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 return !!r->res_nodeid;
251 static inline int is_process_copy(struct dlm_lkb *lkb)
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
256 static inline int is_master_copy(struct dlm_lkb *lkb)
258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
261 static inline int middle_conversion(struct dlm_lkb *lkb)
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
269 static inline int down_conversion(struct dlm_lkb *lkb)
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
274 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
279 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
284 static inline int is_overlap(struct dlm_lkb *lkb)
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 DLM_IFL_OVERLAP_CANCEL));
290 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
292 if (is_master_copy(lkb))
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
299 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
300 timeout caused the cancel then return -ETIMEDOUT */
301 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
302 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
306 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
307 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
311 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
314 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
317 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
320 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
322 if (is_master_copy(lkb)) {
323 send_bast(r, lkb, rqmode);
325 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
330 * Basic operations on rsb's and lkb's
333 /* This is only called to add a reference when the code already holds
334 a valid reference to the rsb, so there's no need for locking. */
336 static inline void hold_rsb(struct dlm_rsb *r)
338 kref_get(&r->res_ref);
341 void dlm_hold_rsb(struct dlm_rsb *r)
346 /* When all references to the rsb are gone it's transferred to
347 the tossed list for later disposal. */
349 static void put_rsb(struct dlm_rsb *r)
351 struct dlm_ls *ls = r->res_ls;
352 uint32_t bucket = r->res_bucket;
354 spin_lock(&ls->ls_rsbtbl[bucket].lock);
355 kref_put(&r->res_ref, toss_rsb);
356 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
359 void dlm_put_rsb(struct dlm_rsb *r)
364 static int pre_rsb_struct(struct dlm_ls *ls)
366 struct dlm_rsb *r1, *r2;
369 spin_lock(&ls->ls_new_rsb_spin);
370 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
371 spin_unlock(&ls->ls_new_rsb_spin);
374 spin_unlock(&ls->ls_new_rsb_spin);
376 r1 = dlm_allocate_rsb(ls);
377 r2 = dlm_allocate_rsb(ls);
379 spin_lock(&ls->ls_new_rsb_spin);
381 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
382 ls->ls_new_rsb_count++;
385 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
386 ls->ls_new_rsb_count++;
388 count = ls->ls_new_rsb_count;
389 spin_unlock(&ls->ls_new_rsb_spin);
396 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
397 unlock any spinlocks, go back and call pre_rsb_struct again.
398 Otherwise, take an rsb off the list and return it. */
400 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
401 struct dlm_rsb **r_ret)
406 spin_lock(&ls->ls_new_rsb_spin);
407 if (list_empty(&ls->ls_new_rsb)) {
408 count = ls->ls_new_rsb_count;
409 spin_unlock(&ls->ls_new_rsb_spin);
410 log_debug(ls, "find_rsb retry %d %d %s",
411 count, dlm_config.ci_new_rsb_count, name);
415 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
416 list_del(&r->res_hashchain);
417 /* Convert the empty list_head to a NULL rb_node for tree usage: */
418 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
419 ls->ls_new_rsb_count--;
420 spin_unlock(&ls->ls_new_rsb_spin);
424 memcpy(r->res_name, name, len);
425 mutex_init(&r->res_mutex);
427 INIT_LIST_HEAD(&r->res_lookup);
428 INIT_LIST_HEAD(&r->res_grantqueue);
429 INIT_LIST_HEAD(&r->res_convertqueue);
430 INIT_LIST_HEAD(&r->res_waitqueue);
431 INIT_LIST_HEAD(&r->res_root_list);
432 INIT_LIST_HEAD(&r->res_recover_list);
438 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
440 char maxname[DLM_RESNAME_MAXLEN];
442 memset(maxname, 0, DLM_RESNAME_MAXLEN);
443 memcpy(maxname, name, nlen);
444 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
447 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
448 struct dlm_rsb **r_ret)
450 struct rb_node *node = tree->rb_node;
455 r = rb_entry(node, struct dlm_rsb, res_hashnode);
456 rc = rsb_cmp(r, name, len);
458 node = node->rb_left;
460 node = node->rb_right;
472 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
474 struct rb_node **newn = &tree->rb_node;
475 struct rb_node *parent = NULL;
479 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
483 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
485 newn = &parent->rb_left;
487 newn = &parent->rb_right;
489 log_print("rsb_insert match");
496 rb_link_node(&rsb->res_hashnode, parent, newn);
497 rb_insert_color(&rsb->res_hashnode, tree);
502 * Find rsb in rsbtbl and potentially create/add one
504 * Delaying the release of rsb's has a similar benefit to applications keeping
505 * NL locks on an rsb, but without the guarantee that the cached master value
506 * will still be valid when the rsb is reused. Apps aren't always smart enough
507 * to keep NL locks on an rsb that they may lock again shortly; this can lead
508 * to excessive master lookups and removals if we don't delay the release.
510 * Searching for an rsb means looking through both the normal list and toss
511 * list. When found on the toss list the rsb is moved to the normal list with
512 * ref count of 1; when found on normal list the ref count is incremented.
514 * rsb's on the keep list are being used locally and refcounted.
515 * rsb's on the toss list are not being used locally, and are not refcounted.
517 * The toss list rsb's were either
518 * - previously used locally but not any more (were on keep list, then
519 * moved to toss list when last refcount dropped)
520 * - created and put on toss list as a directory record for a lookup
521 * (we are the dir node for the res, but are not using the res right now,
522 * but some other node is)
524 * The purpose of find_rsb() is to return a refcounted rsb for local use.
525 * So, if the given rsb is on the toss list, it is moved to the keep list
526 * before being returned.
528 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
529 * more refcounts exist, so the rsb is moved from the keep list to the
532 * rsb's on both keep and toss lists are used for doing a name to master
533 * lookups. rsb's that are in use locally (and being refcounted) are on
534 * the keep list, rsb's that are not in use locally (not refcounted) and
535 * only exist for name/master lookups are on the toss list.
537 * rsb's on the toss list who's dir_nodeid is not local can have stale
538 * name/master mappings. So, remote requests on such rsb's can potentially
539 * return with an error, which means the mapping is stale and needs to
540 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
541 * first_lkid is to keep only a single outstanding request on an rsb
542 * while that rsb has a potentially stale master.)
545 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
546 uint32_t hash, uint32_t b,
547 int dir_nodeid, int from_nodeid,
548 unsigned int flags, struct dlm_rsb **r_ret)
550 struct dlm_rsb *r = NULL;
551 int our_nodeid = dlm_our_nodeid();
558 if (flags & R_RECEIVE_REQUEST) {
559 if (from_nodeid == dir_nodeid)
563 } else if (flags & R_REQUEST) {
568 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
569 * from_nodeid has sent us a lock in dlm_recover_locks, believing
570 * we're the new master. Our local recovery may not have set
571 * res_master_nodeid to our_nodeid yet, so allow either. Don't
572 * create the rsb; dlm_recover_process_copy() will handle EBADR
575 * If someone sends us a request, we are the dir node, and we do
576 * not find the rsb anywhere, then recreate it. This happens if
577 * someone sends us a request after we have removed/freed an rsb
578 * from our toss list. (They sent a request instead of lookup
579 * because they are using an rsb from their toss list.)
582 if (from_local || from_dir ||
583 (from_other && (dir_nodeid == our_nodeid))) {
589 error = pre_rsb_struct(ls);
594 spin_lock(&ls->ls_rsbtbl[b].lock);
596 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
601 * rsb is active, so we can't check master_nodeid without lock_rsb.
604 kref_get(&r->res_ref);
610 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
615 * rsb found inactive (master_nodeid may be out of date unless
616 * we are the dir_nodeid or were the master) No other thread
617 * is using this rsb because it's on the toss list, so we can
618 * look at or update res_master_nodeid without lock_rsb.
621 if ((r->res_master_nodeid != our_nodeid) && from_other) {
622 /* our rsb was not master, and another node (not the dir node)
623 has sent us a request */
624 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
625 from_nodeid, r->res_master_nodeid, dir_nodeid,
631 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
632 /* don't think this should ever happen */
633 log_error(ls, "find_rsb toss from_dir %d master %d",
634 from_nodeid, r->res_master_nodeid);
636 /* fix it and go on */
637 r->res_master_nodeid = our_nodeid;
639 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
640 r->res_first_lkid = 0;
643 if (from_local && (r->res_master_nodeid != our_nodeid)) {
644 /* Because we have held no locks on this rsb,
645 res_master_nodeid could have become stale. */
646 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
647 r->res_first_lkid = 0;
650 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
651 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
660 if (error == -EBADR && !create)
663 error = get_rsb_struct(ls, name, len, &r);
664 if (error == -EAGAIN) {
665 spin_unlock(&ls->ls_rsbtbl[b].lock);
673 r->res_dir_nodeid = dir_nodeid;
674 kref_init(&r->res_ref);
677 /* want to see how often this happens */
678 log_debug(ls, "find_rsb new from_dir %d recreate %s",
679 from_nodeid, r->res_name);
680 r->res_master_nodeid = our_nodeid;
685 if (from_other && (dir_nodeid != our_nodeid)) {
686 /* should never happen */
687 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
688 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
695 log_debug(ls, "find_rsb new from_other %d dir %d %s",
696 from_nodeid, dir_nodeid, r->res_name);
699 if (dir_nodeid == our_nodeid) {
700 /* When we are the dir nodeid, we can set the master
702 r->res_master_nodeid = our_nodeid;
705 /* set_master will send_lookup to dir_nodeid */
706 r->res_master_nodeid = 0;
711 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
713 spin_unlock(&ls->ls_rsbtbl[b].lock);
719 /* During recovery, other nodes can send us new MSTCPY locks (from
720 dlm_recover_locks) before we've made ourself master (in
721 dlm_recover_masters). */
723 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
724 uint32_t hash, uint32_t b,
725 int dir_nodeid, int from_nodeid,
726 unsigned int flags, struct dlm_rsb **r_ret)
728 struct dlm_rsb *r = NULL;
729 int our_nodeid = dlm_our_nodeid();
730 int recover = (flags & R_RECEIVE_RECOVER);
734 error = pre_rsb_struct(ls);
738 spin_lock(&ls->ls_rsbtbl[b].lock);
740 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
745 * rsb is active, so we can't check master_nodeid without lock_rsb.
748 kref_get(&r->res_ref);
753 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
758 * rsb found inactive. No other thread is using this rsb because
759 * it's on the toss list, so we can look at or update
760 * res_master_nodeid without lock_rsb.
763 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
764 /* our rsb is not master, and another node has sent us a
765 request; this should never happen */
766 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
767 from_nodeid, r->res_master_nodeid, dir_nodeid);
773 if (!recover && (r->res_master_nodeid != our_nodeid) &&
774 (dir_nodeid == our_nodeid)) {
775 /* our rsb is not master, and we are dir; may as well fix it;
776 this should never happen */
777 log_error(ls, "find_rsb toss our %d master %d dir %d",
778 our_nodeid, r->res_master_nodeid, dir_nodeid);
780 r->res_master_nodeid = our_nodeid;
784 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
785 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
794 error = get_rsb_struct(ls, name, len, &r);
795 if (error == -EAGAIN) {
796 spin_unlock(&ls->ls_rsbtbl[b].lock);
804 r->res_dir_nodeid = dir_nodeid;
805 r->res_master_nodeid = dir_nodeid;
806 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
807 kref_init(&r->res_ref);
809 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
811 spin_unlock(&ls->ls_rsbtbl[b].lock);
817 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
818 unsigned int flags, struct dlm_rsb **r_ret)
823 if (len > DLM_RESNAME_MAXLEN)
826 hash = jhash(name, len, 0);
827 b = hash & (ls->ls_rsbtbl_size - 1);
829 dir_nodeid = dlm_hash2nodeid(ls, hash);
831 if (dlm_no_directory(ls))
832 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
833 from_nodeid, flags, r_ret);
835 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
836 from_nodeid, flags, r_ret);
839 /* we have received a request and found that res_master_nodeid != our_nodeid,
840 so we need to return an error or make ourself the master */
842 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
845 if (dlm_no_directory(ls)) {
846 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
847 from_nodeid, r->res_master_nodeid,
853 if (from_nodeid != r->res_dir_nodeid) {
854 /* our rsb is not master, and another node (not the dir node)
855 has sent us a request. this is much more common when our
856 master_nodeid is zero, so limit debug to non-zero. */
858 if (r->res_master_nodeid) {
859 log_debug(ls, "validate master from_other %d master %d "
860 "dir %d first %x %s", from_nodeid,
861 r->res_master_nodeid, r->res_dir_nodeid,
862 r->res_first_lkid, r->res_name);
866 /* our rsb is not master, but the dir nodeid has sent us a
867 request; this could happen with master 0 / res_nodeid -1 */
869 if (r->res_master_nodeid) {
870 log_error(ls, "validate master from_dir %d master %d "
872 from_nodeid, r->res_master_nodeid,
873 r->res_first_lkid, r->res_name);
876 r->res_master_nodeid = dlm_our_nodeid();
883 * We're the dir node for this res and another node wants to know the
884 * master nodeid. During normal operation (non recovery) this is only
885 * called from receive_lookup(); master lookups when the local node is
886 * the dir node are done by find_rsb().
888 * normal operation, we are the dir node for a resource
893 * . dlm_master_lookup flags 0
895 * recover directory, we are rebuilding dir for all resources
896 * . dlm_recover_directory
898 * remote node sends back the rsb names it is master of and we are dir of
899 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
900 * we either create new rsb setting remote node as master, or find existing
901 * rsb and set master to be the remote node.
903 * recover masters, we are finding the new master for resources
904 * . dlm_recover_masters
906 * . dlm_send_rcom_lookup
907 * . receive_rcom_lookup
908 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
911 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
912 unsigned int flags, int *r_nodeid, int *result)
914 struct dlm_rsb *r = NULL;
916 int from_master = (flags & DLM_LU_RECOVER_DIR);
917 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
918 int our_nodeid = dlm_our_nodeid();
919 int dir_nodeid, error, toss_list = 0;
921 if (len > DLM_RESNAME_MAXLEN)
924 if (from_nodeid == our_nodeid) {
925 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
930 hash = jhash(name, len, 0);
931 b = hash & (ls->ls_rsbtbl_size - 1);
933 dir_nodeid = dlm_hash2nodeid(ls, hash);
934 if (dir_nodeid != our_nodeid) {
935 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
936 from_nodeid, dir_nodeid, our_nodeid, hash,
943 error = pre_rsb_struct(ls);
947 spin_lock(&ls->ls_rsbtbl[b].lock);
948 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
950 /* because the rsb is active, we need to lock_rsb before
951 checking/changing re_master_nodeid */
954 spin_unlock(&ls->ls_rsbtbl[b].lock);
959 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
963 /* because the rsb is inactive (on toss list), it's not refcounted
964 and lock_rsb is not used, but is protected by the rsbtbl lock */
968 if (r->res_dir_nodeid != our_nodeid) {
969 /* should not happen, but may as well fix it and carry on */
970 log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
971 r->res_dir_nodeid, our_nodeid, r->res_name);
972 r->res_dir_nodeid = our_nodeid;
975 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
976 /* Recovery uses this function to set a new master when
977 the previous master failed. Setting NEW_MASTER will
978 force dlm_recover_masters to call recover_master on this
979 rsb even though the res_nodeid is no longer removed. */
981 r->res_master_nodeid = from_nodeid;
982 r->res_nodeid = from_nodeid;
983 rsb_set_flag(r, RSB_NEW_MASTER);
986 /* I don't think we should ever find it on toss list. */
987 log_error(ls, "dlm_master_lookup fix_master on toss");
992 if (from_master && (r->res_master_nodeid != from_nodeid)) {
993 /* this will happen if from_nodeid became master during
994 a previous recovery cycle, and we aborted the previous
995 cycle before recovering this master value */
997 log_limit(ls, "dlm_master_lookup from_master %d "
998 "master_nodeid %d res_nodeid %d first %x %s",
999 from_nodeid, r->res_master_nodeid, r->res_nodeid,
1000 r->res_first_lkid, r->res_name);
1002 if (r->res_master_nodeid == our_nodeid) {
1003 log_error(ls, "from_master %d our_master", from_nodeid);
1005 dlm_send_rcom_lookup_dump(r, from_nodeid);
1009 r->res_master_nodeid = from_nodeid;
1010 r->res_nodeid = from_nodeid;
1011 rsb_set_flag(r, RSB_NEW_MASTER);
1014 if (!r->res_master_nodeid) {
1015 /* this will happen if recovery happens while we're looking
1016 up the master for this rsb */
1018 log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1019 from_nodeid, r->res_first_lkid, r->res_name);
1020 r->res_master_nodeid = from_nodeid;
1021 r->res_nodeid = from_nodeid;
1024 if (!from_master && !fix_master &&
1025 (r->res_master_nodeid == from_nodeid)) {
1026 /* this can happen when the master sends remove, the dir node
1027 finds the rsb on the keep list and ignores the remove,
1028 and the former master sends a lookup */
1030 log_limit(ls, "dlm_master_lookup from master %d flags %x "
1031 "first %x %s", from_nodeid, flags,
1032 r->res_first_lkid, r->res_name);
1036 *r_nodeid = r->res_master_nodeid;
1038 *result = DLM_LU_MATCH;
1041 r->res_toss_time = jiffies;
1042 /* the rsb was inactive (on toss list) */
1043 spin_unlock(&ls->ls_rsbtbl[b].lock);
1045 /* the rsb was active */
1052 error = get_rsb_struct(ls, name, len, &r);
1053 if (error == -EAGAIN) {
1054 spin_unlock(&ls->ls_rsbtbl[b].lock);
1062 r->res_dir_nodeid = our_nodeid;
1063 r->res_master_nodeid = from_nodeid;
1064 r->res_nodeid = from_nodeid;
1065 kref_init(&r->res_ref);
1066 r->res_toss_time = jiffies;
1068 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1070 /* should never happen */
1072 spin_unlock(&ls->ls_rsbtbl[b].lock);
1077 *result = DLM_LU_ADD;
1078 *r_nodeid = from_nodeid;
1081 spin_unlock(&ls->ls_rsbtbl[b].lock);
1085 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1091 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1092 spin_lock(&ls->ls_rsbtbl[i].lock);
1093 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1094 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1095 if (r->res_hash == hash)
1098 spin_unlock(&ls->ls_rsbtbl[i].lock);
1102 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1104 struct dlm_rsb *r = NULL;
1108 hash = jhash(name, len, 0);
1109 b = hash & (ls->ls_rsbtbl_size - 1);
1111 spin_lock(&ls->ls_rsbtbl[b].lock);
1112 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1116 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1122 spin_unlock(&ls->ls_rsbtbl[b].lock);
1125 static void toss_rsb(struct kref *kref)
1127 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1128 struct dlm_ls *ls = r->res_ls;
1130 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1131 kref_init(&r->res_ref);
1132 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1133 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1134 r->res_toss_time = jiffies;
1135 if (r->res_lvbptr) {
1136 dlm_free_lvb(r->res_lvbptr);
1137 r->res_lvbptr = NULL;
1141 /* See comment for unhold_lkb */
1143 static void unhold_rsb(struct dlm_rsb *r)
1146 rv = kref_put(&r->res_ref, toss_rsb);
1147 DLM_ASSERT(!rv, dlm_dump_rsb(r););
1150 static void kill_rsb(struct kref *kref)
1152 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1154 /* All work is done after the return from kref_put() so we
1155 can release the write_lock before the remove and free. */
1157 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1158 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1159 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1160 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1161 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1162 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1165 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1166 The rsb must exist as long as any lkb's for it do. */
1168 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1171 lkb->lkb_resource = r;
1174 static void detach_lkb(struct dlm_lkb *lkb)
1176 if (lkb->lkb_resource) {
1177 put_rsb(lkb->lkb_resource);
1178 lkb->lkb_resource = NULL;
1182 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1184 struct dlm_lkb *lkb;
1187 lkb = dlm_allocate_lkb(ls);
1191 lkb->lkb_nodeid = -1;
1192 lkb->lkb_grmode = DLM_LOCK_IV;
1193 kref_init(&lkb->lkb_ref);
1194 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1195 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1196 INIT_LIST_HEAD(&lkb->lkb_time_list);
1197 INIT_LIST_HEAD(&lkb->lkb_cb_list);
1198 mutex_init(&lkb->lkb_cb_mutex);
1199 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1202 rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS);
1206 spin_lock(&ls->ls_lkbidr_spin);
1207 rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id);
1210 spin_unlock(&ls->ls_lkbidr_spin);
1216 log_error(ls, "create_lkb idr error %d", rv);
1224 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1226 struct dlm_lkb *lkb;
1228 spin_lock(&ls->ls_lkbidr_spin);
1229 lkb = idr_find(&ls->ls_lkbidr, lkid);
1231 kref_get(&lkb->lkb_ref);
1232 spin_unlock(&ls->ls_lkbidr_spin);
1235 return lkb ? 0 : -ENOENT;
1238 static void kill_lkb(struct kref *kref)
1240 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1242 /* All work is done after the return from kref_put() so we
1243 can release the write_lock before the detach_lkb */
1245 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1248 /* __put_lkb() is used when an lkb may not have an rsb attached to
1249 it so we need to provide the lockspace explicitly */
1251 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1253 uint32_t lkid = lkb->lkb_id;
1255 spin_lock(&ls->ls_lkbidr_spin);
1256 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1257 idr_remove(&ls->ls_lkbidr, lkid);
1258 spin_unlock(&ls->ls_lkbidr_spin);
1262 /* for local/process lkbs, lvbptr points to caller's lksb */
1263 if (lkb->lkb_lvbptr && is_master_copy(lkb))
1264 dlm_free_lvb(lkb->lkb_lvbptr);
1268 spin_unlock(&ls->ls_lkbidr_spin);
1273 int dlm_put_lkb(struct dlm_lkb *lkb)
1277 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1278 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1280 ls = lkb->lkb_resource->res_ls;
1281 return __put_lkb(ls, lkb);
1284 /* This is only called to add a reference when the code already holds
1285 a valid reference to the lkb, so there's no need for locking. */
1287 static inline void hold_lkb(struct dlm_lkb *lkb)
1289 kref_get(&lkb->lkb_ref);
1292 /* This is called when we need to remove a reference and are certain
1293 it's not the last ref. e.g. del_lkb is always called between a
1294 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1295 put_lkb would work fine, but would involve unnecessary locking */
1297 static inline void unhold_lkb(struct dlm_lkb *lkb)
1300 rv = kref_put(&lkb->lkb_ref, kill_lkb);
1301 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1304 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1307 struct dlm_lkb *lkb = NULL;
1309 list_for_each_entry(lkb, head, lkb_statequeue)
1310 if (lkb->lkb_rqmode < mode)
1313 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1316 /* add/remove lkb to rsb's grant/convert/wait queue */
1318 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1320 kref_get(&lkb->lkb_ref);
1322 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1324 lkb->lkb_timestamp = ktime_get();
1326 lkb->lkb_status = status;
1329 case DLM_LKSTS_WAITING:
1330 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1331 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1333 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1335 case DLM_LKSTS_GRANTED:
1336 /* convention says granted locks kept in order of grmode */
1337 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1340 case DLM_LKSTS_CONVERT:
1341 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1342 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1344 list_add_tail(&lkb->lkb_statequeue,
1345 &r->res_convertqueue);
1348 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1352 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1354 lkb->lkb_status = 0;
1355 list_del(&lkb->lkb_statequeue);
1359 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1363 add_lkb(r, lkb, sts);
1367 static int msg_reply_type(int mstype)
1370 case DLM_MSG_REQUEST:
1371 return DLM_MSG_REQUEST_REPLY;
1372 case DLM_MSG_CONVERT:
1373 return DLM_MSG_CONVERT_REPLY;
1374 case DLM_MSG_UNLOCK:
1375 return DLM_MSG_UNLOCK_REPLY;
1376 case DLM_MSG_CANCEL:
1377 return DLM_MSG_CANCEL_REPLY;
1378 case DLM_MSG_LOOKUP:
1379 return DLM_MSG_LOOKUP_REPLY;
1384 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1388 for (i = 0; i < num_nodes; i++) {
1393 if (warned[i] == nodeid)
1399 void dlm_scan_waiters(struct dlm_ls *ls)
1401 struct dlm_lkb *lkb;
1402 ktime_t zero = ktime_set(0, 0);
1404 s64 debug_maxus = 0;
1405 u32 debug_scanned = 0;
1406 u32 debug_expired = 0;
1410 if (!dlm_config.ci_waitwarn_us)
1413 mutex_lock(&ls->ls_waiters_mutex);
1415 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1416 if (ktime_equal(lkb->lkb_wait_time, zero))
1421 us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1423 if (us < dlm_config.ci_waitwarn_us)
1426 lkb->lkb_wait_time = zero;
1429 if (us > debug_maxus)
1433 num_nodes = ls->ls_num_nodes;
1434 warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL);
1438 if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1441 log_error(ls, "waitwarn %x %lld %d us check connection to "
1442 "node %d", lkb->lkb_id, (long long)us,
1443 dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1445 mutex_unlock(&ls->ls_waiters_mutex);
1449 log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1450 debug_scanned, debug_expired,
1451 dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1454 /* add/remove lkb from global waiters list of lkb's waiting for
1455 a reply from a remote node */
1457 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1459 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1462 mutex_lock(&ls->ls_waiters_mutex);
1464 if (is_overlap_unlock(lkb) ||
1465 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1470 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1472 case DLM_MSG_UNLOCK:
1473 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1475 case DLM_MSG_CANCEL:
1476 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1482 lkb->lkb_wait_count++;
1485 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1486 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1487 lkb->lkb_wait_count, lkb->lkb_flags);
1491 DLM_ASSERT(!lkb->lkb_wait_count,
1493 printk("wait_count %d\n", lkb->lkb_wait_count););
1495 lkb->lkb_wait_count++;
1496 lkb->lkb_wait_type = mstype;
1497 lkb->lkb_wait_time = ktime_get();
1498 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1500 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1503 log_error(ls, "addwait error %x %d flags %x %d %d %s",
1504 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1505 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1506 mutex_unlock(&ls->ls_waiters_mutex);
1510 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1511 list as part of process_requestqueue (e.g. a lookup that has an optimized
1512 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1513 set RESEND and dlm_recover_waiters_post() */
1515 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1516 struct dlm_message *ms)
1518 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1519 int overlap_done = 0;
1521 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1522 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1523 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1528 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1529 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1530 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1535 /* Cancel state was preemptively cleared by a successful convert,
1536 see next comment, nothing to do. */
1538 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1539 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1540 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1541 lkb->lkb_id, lkb->lkb_wait_type);
1545 /* Remove for the convert reply, and premptively remove for the
1546 cancel reply. A convert has been granted while there's still
1547 an outstanding cancel on it (the cancel is moot and the result
1548 in the cancel reply should be 0). We preempt the cancel reply
1549 because the app gets the convert result and then can follow up
1550 with another op, like convert. This subsequent op would see the
1551 lingering state of the cancel and fail with -EBUSY. */
1553 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1554 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1555 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1556 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1558 lkb->lkb_wait_type = 0;
1559 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1560 lkb->lkb_wait_count--;
1564 /* N.B. type of reply may not always correspond to type of original
1565 msg due to lookup->request optimization, verify others? */
1567 if (lkb->lkb_wait_type) {
1568 lkb->lkb_wait_type = 0;
1572 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1573 lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1574 mstype, lkb->lkb_flags);
1578 /* the force-unlock/cancel has completed and we haven't recvd a reply
1579 to the op that was in progress prior to the unlock/cancel; we
1580 give up on any reply to the earlier op. FIXME: not sure when/how
1581 this would happen */
1583 if (overlap_done && lkb->lkb_wait_type) {
1584 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1585 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1586 lkb->lkb_wait_count--;
1587 lkb->lkb_wait_type = 0;
1590 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1592 lkb->lkb_flags &= ~DLM_IFL_RESEND;
1593 lkb->lkb_wait_count--;
1594 if (!lkb->lkb_wait_count)
1595 list_del_init(&lkb->lkb_wait_reply);
1600 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1602 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1605 mutex_lock(&ls->ls_waiters_mutex);
1606 error = _remove_from_waiters(lkb, mstype, NULL);
1607 mutex_unlock(&ls->ls_waiters_mutex);
1611 /* Handles situations where we might be processing a "fake" or "stub" reply in
1612 which we can't try to take waiters_mutex again. */
1614 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1616 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1619 if (ms->m_flags != DLM_IFL_STUB_MS)
1620 mutex_lock(&ls->ls_waiters_mutex);
1621 error = _remove_from_waiters(lkb, ms->m_type, ms);
1622 if (ms->m_flags != DLM_IFL_STUB_MS)
1623 mutex_unlock(&ls->ls_waiters_mutex);
1627 /* FIXME: make this more efficient */
1629 static int shrink_bucket(struct dlm_ls *ls, int b)
1633 int our_nodeid = dlm_our_nodeid();
1634 int count = 0, found;
1638 spin_lock(&ls->ls_rsbtbl[b].lock);
1639 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = rb_next(n)) {
1640 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1642 /* If we're the directory record for this rsb, and
1643 we're not the master of it, then we need to wait
1644 for the master node to send us a dir remove for
1645 before removing the dir record. */
1647 if (!dlm_no_directory(ls) && !is_master(r) &&
1648 (dlm_dir_nodeid(r) == our_nodeid)) {
1652 if (!time_after_eq(jiffies, r->res_toss_time +
1653 dlm_config.ci_toss_secs * HZ))
1660 spin_unlock(&ls->ls_rsbtbl[b].lock);
1664 if (kref_put(&r->res_ref, kill_rsb)) {
1665 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1666 spin_unlock(&ls->ls_rsbtbl[b].lock);
1668 /* We're the master of this rsb but we're not
1669 the directory record, so we need to tell the
1670 dir node to remove the dir record. */
1672 if (!dlm_no_directory(ls) && is_master(r) &&
1673 (dlm_dir_nodeid(r) != our_nodeid)) {
1680 spin_unlock(&ls->ls_rsbtbl[b].lock);
1681 log_error(ls, "tossed rsb in use %s", r->res_name);
1688 void dlm_scan_rsbs(struct dlm_ls *ls)
1692 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1693 shrink_bucket(ls, i);
1694 if (dlm_locking_stopped(ls))
1700 static void add_timeout(struct dlm_lkb *lkb)
1702 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1704 if (is_master_copy(lkb))
1707 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1708 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1709 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1712 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1717 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1718 mutex_lock(&ls->ls_timeout_mutex);
1720 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1721 mutex_unlock(&ls->ls_timeout_mutex);
1724 static void del_timeout(struct dlm_lkb *lkb)
1726 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1728 mutex_lock(&ls->ls_timeout_mutex);
1729 if (!list_empty(&lkb->lkb_time_list)) {
1730 list_del_init(&lkb->lkb_time_list);
1733 mutex_unlock(&ls->ls_timeout_mutex);
1736 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1737 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1738 and then lock rsb because of lock ordering in add_timeout. We may need
1739 to specify some special timeout-related bits in the lkb that are just to
1740 be accessed under the timeout_mutex. */
1742 void dlm_scan_timeout(struct dlm_ls *ls)
1745 struct dlm_lkb *lkb;
1746 int do_cancel, do_warn;
1750 if (dlm_locking_stopped(ls))
1755 mutex_lock(&ls->ls_timeout_mutex);
1756 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1758 wait_us = ktime_to_us(ktime_sub(ktime_get(),
1759 lkb->lkb_timestamp));
1761 if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1762 wait_us >= (lkb->lkb_timeout_cs * 10000))
1765 if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1766 wait_us >= dlm_config.ci_timewarn_cs * 10000)
1769 if (!do_cancel && !do_warn)
1774 mutex_unlock(&ls->ls_timeout_mutex);
1776 if (!do_cancel && !do_warn)
1779 r = lkb->lkb_resource;
1784 /* clear flag so we only warn once */
1785 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1786 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1788 dlm_timeout_warn(lkb);
1792 log_debug(ls, "timeout cancel %x node %d %s",
1793 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1794 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1795 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1797 _cancel_lock(r, lkb);
1806 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1807 dlm_recoverd before checking/setting ls_recover_begin. */
1809 void dlm_adjust_timeouts(struct dlm_ls *ls)
1811 struct dlm_lkb *lkb;
1812 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1814 ls->ls_recover_begin = 0;
1815 mutex_lock(&ls->ls_timeout_mutex);
1816 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1817 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1818 mutex_unlock(&ls->ls_timeout_mutex);
1820 if (!dlm_config.ci_waitwarn_us)
1823 mutex_lock(&ls->ls_waiters_mutex);
1824 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1825 if (ktime_to_us(lkb->lkb_wait_time))
1826 lkb->lkb_wait_time = ktime_get();
1828 mutex_unlock(&ls->ls_waiters_mutex);
1831 /* lkb is master or local copy */
1833 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1835 int b, len = r->res_ls->ls_lvblen;
1837 /* b=1 lvb returned to caller
1838 b=0 lvb written to rsb or invalidated
1841 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1844 if (!lkb->lkb_lvbptr)
1847 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1853 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1854 lkb->lkb_lvbseq = r->res_lvbseq;
1856 } else if (b == 0) {
1857 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1858 rsb_set_flag(r, RSB_VALNOTVALID);
1862 if (!lkb->lkb_lvbptr)
1865 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1869 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1874 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1876 lkb->lkb_lvbseq = r->res_lvbseq;
1877 rsb_clear_flag(r, RSB_VALNOTVALID);
1880 if (rsb_flag(r, RSB_VALNOTVALID))
1881 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1884 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1886 if (lkb->lkb_grmode < DLM_LOCK_PW)
1889 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1890 rsb_set_flag(r, RSB_VALNOTVALID);
1894 if (!lkb->lkb_lvbptr)
1897 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1901 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1906 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1908 rsb_clear_flag(r, RSB_VALNOTVALID);
1911 /* lkb is process copy (pc) */
1913 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1914 struct dlm_message *ms)
1918 if (!lkb->lkb_lvbptr)
1921 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1924 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1926 int len = receive_extralen(ms);
1927 if (len > DLM_RESNAME_MAXLEN)
1928 len = DLM_RESNAME_MAXLEN;
1929 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1930 lkb->lkb_lvbseq = ms->m_lvbseq;
1934 /* Manipulate lkb's on rsb's convert/granted/waiting queues
1935 remove_lock -- used for unlock, removes lkb from granted
1936 revert_lock -- used for cancel, moves lkb from convert to granted
1937 grant_lock -- used for request and convert, adds lkb to granted or
1938 moves lkb from convert or waiting to granted
1940 Each of these is used for master or local copy lkb's. There is
1941 also a _pc() variation used to make the corresponding change on
1942 a process copy (pc) lkb. */
1944 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1947 lkb->lkb_grmode = DLM_LOCK_IV;
1948 /* this unhold undoes the original ref from create_lkb()
1949 so this leads to the lkb being freed */
1953 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1955 set_lvb_unlock(r, lkb);
1956 _remove_lock(r, lkb);
1959 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1961 _remove_lock(r, lkb);
1964 /* returns: 0 did nothing
1965 1 moved lock to granted
1968 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1972 lkb->lkb_rqmode = DLM_LOCK_IV;
1974 switch (lkb->lkb_status) {
1975 case DLM_LKSTS_GRANTED:
1977 case DLM_LKSTS_CONVERT:
1978 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1981 case DLM_LKSTS_WAITING:
1983 lkb->lkb_grmode = DLM_LOCK_IV;
1984 /* this unhold undoes the original ref from create_lkb()
1985 so this leads to the lkb being freed */
1990 log_print("invalid status for revert %d", lkb->lkb_status);
1995 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1997 return revert_lock(r, lkb);
2000 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2002 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2003 lkb->lkb_grmode = lkb->lkb_rqmode;
2004 if (lkb->lkb_status)
2005 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2007 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2010 lkb->lkb_rqmode = DLM_LOCK_IV;
2011 lkb->lkb_highbast = 0;
2014 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2016 set_lvb_lock(r, lkb);
2017 _grant_lock(r, lkb);
2020 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2021 struct dlm_message *ms)
2023 set_lvb_lock_pc(r, lkb, ms);
2024 _grant_lock(r, lkb);
2027 /* called by grant_pending_locks() which means an async grant message must
2028 be sent to the requesting node in addition to granting the lock if the
2029 lkb belongs to a remote node. */
2031 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2034 if (is_master_copy(lkb))
2037 queue_cast(r, lkb, 0);
2040 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2041 change the granted/requested modes. We're munging things accordingly in
2043 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2045 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2046 compatible with other granted locks */
2048 static void munge_demoted(struct dlm_lkb *lkb)
2050 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2051 log_print("munge_demoted %x invalid modes gr %d rq %d",
2052 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2056 lkb->lkb_grmode = DLM_LOCK_NL;
2059 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2061 if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2062 ms->m_type != DLM_MSG_GRANT) {
2063 log_print("munge_altmode %x invalid reply type %d",
2064 lkb->lkb_id, ms->m_type);
2068 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2069 lkb->lkb_rqmode = DLM_LOCK_PR;
2070 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2071 lkb->lkb_rqmode = DLM_LOCK_CW;
2073 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2078 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2080 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2082 if (lkb->lkb_id == first->lkb_id)
2088 /* Check if the given lkb conflicts with another lkb on the queue. */
2090 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2092 struct dlm_lkb *this;
2094 list_for_each_entry(this, head, lkb_statequeue) {
2097 if (!modes_compat(this, lkb))
2104 * "A conversion deadlock arises with a pair of lock requests in the converting
2105 * queue for one resource. The granted mode of each lock blocks the requested
2106 * mode of the other lock."
2108 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2109 * convert queue from being granted, then deadlk/demote lkb.
2112 * Granted Queue: empty
2113 * Convert Queue: NL->EX (first lock)
2114 * PR->EX (second lock)
2116 * The first lock can't be granted because of the granted mode of the second
2117 * lock and the second lock can't be granted because it's not first in the
2118 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2119 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2120 * flag set and return DEMOTED in the lksb flags.
2122 * Originally, this function detected conv-deadlk in a more limited scope:
2123 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2124 * - if lkb1 was the first entry in the queue (not just earlier), and was
2125 * blocked by the granted mode of lkb2, and there was nothing on the
2126 * granted queue preventing lkb1 from being granted immediately, i.e.
2127 * lkb2 was the only thing preventing lkb1 from being granted.
2129 * That second condition meant we'd only say there was conv-deadlk if
2130 * resolving it (by demotion) would lead to the first lock on the convert
2131 * queue being granted right away. It allowed conversion deadlocks to exist
2132 * between locks on the convert queue while they couldn't be granted anyway.
2134 * Now, we detect and take action on conversion deadlocks immediately when
2135 * they're created, even if they may not be immediately consequential. If
2136 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2137 * mode that would prevent lkb1's conversion from being granted, we do a
2138 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2139 * I think this means that the lkb_is_ahead condition below should always
2140 * be zero, i.e. there will never be conv-deadlk between two locks that are
2141 * both already on the convert queue.
2144 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2146 struct dlm_lkb *lkb1;
2147 int lkb_is_ahead = 0;
2149 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2155 if (!lkb_is_ahead) {
2156 if (!modes_compat(lkb2, lkb1))
2159 if (!modes_compat(lkb2, lkb1) &&
2160 !modes_compat(lkb1, lkb2))
2168 * Return 1 if the lock can be granted, 0 otherwise.
2169 * Also detect and resolve conversion deadlocks.
2171 * lkb is the lock to be granted
2173 * now is 1 if the function is being called in the context of the
2174 * immediate request, it is 0 if called later, after the lock has been
2177 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2180 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
2182 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2185 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2186 * a new request for a NL mode lock being blocked.
2188 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2189 * request, then it would be granted. In essence, the use of this flag
2190 * tells the Lock Manager to expedite theis request by not considering
2191 * what may be in the CONVERTING or WAITING queues... As of this
2192 * writing, the EXPEDITE flag can be used only with new requests for NL
2193 * mode locks. This flag is not valid for conversion requests.
2195 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2196 * conversion or used with a non-NL requested mode. We also know an
2197 * EXPEDITE request is always granted immediately, so now must always
2198 * be 1. The full condition to grant an expedite request: (now &&
2199 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2200 * therefore be shortened to just checking the flag.
2203 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2207 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2208 * added to the remaining conditions.
2211 if (queue_conflict(&r->res_grantqueue, lkb))
2215 * 6-3: By default, a conversion request is immediately granted if the
2216 * requested mode is compatible with the modes of all other granted
2220 if (queue_conflict(&r->res_convertqueue, lkb))
2224 * 6-5: But the default algorithm for deciding whether to grant or
2225 * queue conversion requests does not by itself guarantee that such
2226 * requests are serviced on a "first come first serve" basis. This, in
2227 * turn, can lead to a phenomenon known as "indefinate postponement".
2229 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2230 * the system service employed to request a lock conversion. This flag
2231 * forces certain conversion requests to be queued, even if they are
2232 * compatible with the granted modes of other locks on the same
2233 * resource. Thus, the use of this flag results in conversion requests
2234 * being ordered on a "first come first servce" basis.
2236 * DCT: This condition is all about new conversions being able to occur
2237 * "in place" while the lock remains on the granted queue (assuming
2238 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2239 * doesn't _have_ to go onto the convert queue where it's processed in
2240 * order. The "now" variable is necessary to distinguish converts
2241 * being received and processed for the first time now, because once a
2242 * convert is moved to the conversion queue the condition below applies
2243 * requiring fifo granting.
2246 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2250 * Even if the convert is compat with all granted locks,
2251 * QUECVT forces it behind other locks on the convert queue.
2254 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2255 if (list_empty(&r->res_convertqueue))
2262 * The NOORDER flag is set to avoid the standard vms rules on grant
2266 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2270 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2271 * granted until all other conversion requests ahead of it are granted
2275 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2279 * 6-4: By default, a new request is immediately granted only if all
2280 * three of the following conditions are satisfied when the request is
2282 * - The queue of ungranted conversion requests for the resource is
2284 * - The queue of ungranted new requests for the resource is empty.
2285 * - The mode of the new request is compatible with the most
2286 * restrictive mode of all granted locks on the resource.
2289 if (now && !conv && list_empty(&r->res_convertqueue) &&
2290 list_empty(&r->res_waitqueue))
2294 * 6-4: Once a lock request is in the queue of ungranted new requests,
2295 * it cannot be granted until the queue of ungranted conversion
2296 * requests is empty, all ungranted new requests ahead of it are
2297 * granted and/or canceled, and it is compatible with the granted mode
2298 * of the most restrictive lock granted on the resource.
2301 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2302 first_in_list(lkb, &r->res_waitqueue))
2308 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2312 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2313 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2318 rv = _can_be_granted(r, lkb, now);
2323 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2324 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2325 * cancels one of the locks.
2328 if (is_convert && can_be_queued(lkb) &&
2329 conversion_deadlock_detect(r, lkb)) {
2330 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2331 lkb->lkb_grmode = DLM_LOCK_NL;
2332 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2333 } else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
2337 log_print("can_be_granted deadlock %x now %d",
2346 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2347 * to grant a request in a mode other than the normal rqmode. It's a
2348 * simple way to provide a big optimization to applications that can
2352 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2354 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2358 lkb->lkb_rqmode = alt;
2359 rv = _can_be_granted(r, lkb, now);
2361 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2363 lkb->lkb_rqmode = rqmode;
2369 /* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
2370 for locks pending on the convert list. Once verified (watch for these
2371 log_prints), we should be able to just call _can_be_granted() and not
2372 bother with the demote/deadlk cases here (and there's no easy way to deal
2373 with a deadlk here, we'd have to generate something like grant_lock with
2374 the deadlk error.) */
2376 /* Returns the highest requested mode of all blocked conversions; sets
2377 cw if there's a blocked conversion to DLM_LOCK_CW. */
2379 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2380 unsigned int *count)
2382 struct dlm_lkb *lkb, *s;
2383 int hi, demoted, quit, grant_restart, demote_restart;
2392 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2393 demoted = is_demoted(lkb);
2396 if (can_be_granted(r, lkb, 0, &deadlk)) {
2397 grant_lock_pending(r, lkb);
2404 if (!demoted && is_demoted(lkb)) {
2405 log_print("WARN: pending demoted %x node %d %s",
2406 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2412 log_print("WARN: pending deadlock %x node %d %s",
2413 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2418 hi = max_t(int, lkb->lkb_rqmode, hi);
2420 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2426 if (demote_restart && !quit) {
2431 return max_t(int, high, hi);
2434 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2435 unsigned int *count)
2437 struct dlm_lkb *lkb, *s;
2439 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2440 if (can_be_granted(r, lkb, 0, NULL)) {
2441 grant_lock_pending(r, lkb);
2445 high = max_t(int, lkb->lkb_rqmode, high);
2446 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2454 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2455 on either the convert or waiting queue.
2456 high is the largest rqmode of all locks blocked on the convert or
2459 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2461 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2462 if (gr->lkb_highbast < DLM_LOCK_EX)
2467 if (gr->lkb_highbast < high &&
2468 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2473 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2475 struct dlm_lkb *lkb, *s;
2476 int high = DLM_LOCK_IV;
2479 if (!is_master(r)) {
2480 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2485 high = grant_pending_convert(r, high, &cw, count);
2486 high = grant_pending_wait(r, high, &cw, count);
2488 if (high == DLM_LOCK_IV)
2492 * If there are locks left on the wait/convert queue then send blocking
2493 * ASTs to granted locks based on the largest requested mode (high)
2497 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2498 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2499 if (cw && high == DLM_LOCK_PR &&
2500 lkb->lkb_grmode == DLM_LOCK_PR)
2501 queue_bast(r, lkb, DLM_LOCK_CW);
2503 queue_bast(r, lkb, high);
2504 lkb->lkb_highbast = high;
2509 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2511 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2512 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2513 if (gr->lkb_highbast < DLM_LOCK_EX)
2518 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2523 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2524 struct dlm_lkb *lkb)
2528 list_for_each_entry(gr, head, lkb_statequeue) {
2529 /* skip self when sending basts to convertqueue */
2532 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2533 queue_bast(r, gr, lkb->lkb_rqmode);
2534 gr->lkb_highbast = lkb->lkb_rqmode;
2539 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2541 send_bast_queue(r, &r->res_grantqueue, lkb);
2544 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2546 send_bast_queue(r, &r->res_grantqueue, lkb);
2547 send_bast_queue(r, &r->res_convertqueue, lkb);
2550 /* set_master(r, lkb) -- set the master nodeid of a resource
2552 The purpose of this function is to set the nodeid field in the given
2553 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2554 known, it can just be copied to the lkb and the function will return
2555 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2556 before it can be copied to the lkb.
2558 When the rsb nodeid is being looked up remotely, the initial lkb
2559 causing the lookup is kept on the ls_waiters list waiting for the
2560 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2561 on the rsb's res_lookup list until the master is verified.
2564 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2565 1: the rsb master is not available and the lkb has been placed on
2569 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2571 int our_nodeid = dlm_our_nodeid();
2573 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2574 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2575 r->res_first_lkid = lkb->lkb_id;
2576 lkb->lkb_nodeid = r->res_nodeid;
2580 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2581 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2585 if (r->res_master_nodeid == our_nodeid) {
2586 lkb->lkb_nodeid = 0;
2590 if (r->res_master_nodeid) {
2591 lkb->lkb_nodeid = r->res_master_nodeid;
2595 if (dlm_dir_nodeid(r) == our_nodeid) {
2596 /* This is a somewhat unusual case; find_rsb will usually
2597 have set res_master_nodeid when dir nodeid is local, but
2598 there are cases where we become the dir node after we've
2599 past find_rsb and go through _request_lock again.
2600 confirm_master() or process_lookup_list() needs to be
2601 called after this. */
2602 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2603 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2605 r->res_master_nodeid = our_nodeid;
2607 lkb->lkb_nodeid = 0;
2611 r->res_first_lkid = lkb->lkb_id;
2612 send_lookup(r, lkb);
2616 static void process_lookup_list(struct dlm_rsb *r)
2618 struct dlm_lkb *lkb, *safe;
2620 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2621 list_del_init(&lkb->lkb_rsb_lookup);
2622 _request_lock(r, lkb);
2627 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2629 static void confirm_master(struct dlm_rsb *r, int error)
2631 struct dlm_lkb *lkb;
2633 if (!r->res_first_lkid)
2639 r->res_first_lkid = 0;
2640 process_lookup_list(r);
2646 /* the remote request failed and won't be retried (it was
2647 a NOQUEUE, or has been canceled/unlocked); make a waiting
2648 lkb the first_lkid */
2650 r->res_first_lkid = 0;
2652 if (!list_empty(&r->res_lookup)) {
2653 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2655 list_del_init(&lkb->lkb_rsb_lookup);
2656 r->res_first_lkid = lkb->lkb_id;
2657 _request_lock(r, lkb);
2662 log_error(r->res_ls, "confirm_master unknown error %d", error);
2666 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2667 int namelen, unsigned long timeout_cs,
2668 void (*ast) (void *astparam),
2670 void (*bast) (void *astparam, int mode),
2671 struct dlm_args *args)
2675 /* check for invalid arg usage */
2677 if (mode < 0 || mode > DLM_LOCK_EX)
2680 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2683 if (flags & DLM_LKF_CANCEL)
2686 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2689 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2692 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2695 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2698 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2701 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2704 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2710 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2713 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2716 /* these args will be copied to the lkb in validate_lock_args,
2717 it cannot be done now because when converting locks, fields in
2718 an active lkb cannot be modified before locking the rsb */
2720 args->flags = flags;
2722 args->astparam = astparam;
2723 args->bastfn = bast;
2724 args->timeout = timeout_cs;
2732 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2734 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2735 DLM_LKF_FORCEUNLOCK))
2738 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2741 args->flags = flags;
2742 args->astparam = astarg;
2746 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2747 struct dlm_args *args)
2751 if (args->flags & DLM_LKF_CONVERT) {
2752 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2755 if (args->flags & DLM_LKF_QUECVT &&
2756 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2760 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2763 if (lkb->lkb_wait_type)
2766 if (is_overlap(lkb))
2770 lkb->lkb_exflags = args->flags;
2771 lkb->lkb_sbflags = 0;
2772 lkb->lkb_astfn = args->astfn;
2773 lkb->lkb_astparam = args->astparam;
2774 lkb->lkb_bastfn = args->bastfn;
2775 lkb->lkb_rqmode = args->mode;
2776 lkb->lkb_lksb = args->lksb;
2777 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2778 lkb->lkb_ownpid = (int) current->pid;
2779 lkb->lkb_timeout_cs = args->timeout;
2783 log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2784 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2785 lkb->lkb_status, lkb->lkb_wait_type,
2786 lkb->lkb_resource->res_name);
2790 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2793 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2794 because there may be a lookup in progress and it's valid to do
2795 cancel/unlockf on it */
2797 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2799 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2802 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2803 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2808 /* an lkb may still exist even though the lock is EOL'ed due to a
2809 cancel, unlock or failed noqueue request; an app can't use these
2810 locks; return same error as if the lkid had not been found at all */
2812 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2813 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2818 /* an lkb may be waiting for an rsb lookup to complete where the
2819 lookup was initiated by another lock */
2821 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2822 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2823 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2824 list_del_init(&lkb->lkb_rsb_lookup);
2825 queue_cast(lkb->lkb_resource, lkb,
2826 args->flags & DLM_LKF_CANCEL ?
2827 -DLM_ECANCEL : -DLM_EUNLOCK);
2828 unhold_lkb(lkb); /* undoes create_lkb() */
2830 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2835 /* cancel not allowed with another cancel/unlock in progress */
2837 if (args->flags & DLM_LKF_CANCEL) {
2838 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2841 if (is_overlap(lkb))
2844 /* don't let scand try to do a cancel */
2847 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2848 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2853 /* there's nothing to cancel */
2854 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2855 !lkb->lkb_wait_type) {
2860 switch (lkb->lkb_wait_type) {
2861 case DLM_MSG_LOOKUP:
2862 case DLM_MSG_REQUEST:
2863 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2866 case DLM_MSG_UNLOCK:
2867 case DLM_MSG_CANCEL:
2870 /* add_to_waiters() will set OVERLAP_CANCEL */
2874 /* do we need to allow a force-unlock if there's a normal unlock
2875 already in progress? in what conditions could the normal unlock
2876 fail such that we'd want to send a force-unlock to be sure? */
2878 if (args->flags & DLM_LKF_FORCEUNLOCK) {
2879 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2882 if (is_overlap_unlock(lkb))
2885 /* don't let scand try to do a cancel */
2888 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2889 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2894 switch (lkb->lkb_wait_type) {
2895 case DLM_MSG_LOOKUP:
2896 case DLM_MSG_REQUEST:
2897 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2900 case DLM_MSG_UNLOCK:
2903 /* add_to_waiters() will set OVERLAP_UNLOCK */
2907 /* normal unlock not allowed if there's any op in progress */
2909 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
2913 /* an overlapping op shouldn't blow away exflags from other op */
2914 lkb->lkb_exflags |= args->flags;
2915 lkb->lkb_sbflags = 0;
2916 lkb->lkb_astparam = args->astparam;
2920 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
2921 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
2922 args->flags, lkb->lkb_wait_type,
2923 lkb->lkb_resource->res_name);
2928 * Four stage 4 varieties:
2929 * do_request(), do_convert(), do_unlock(), do_cancel()
2930 * These are called on the master node for the given lock and
2931 * from the central locking logic.
2934 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2938 if (can_be_granted(r, lkb, 1, NULL)) {
2940 queue_cast(r, lkb, 0);
2944 if (can_be_queued(lkb)) {
2945 error = -EINPROGRESS;
2946 add_lkb(r, lkb, DLM_LKSTS_WAITING);
2952 queue_cast(r, lkb, -EAGAIN);
2957 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
2962 if (force_blocking_asts(lkb))
2963 send_blocking_asts_all(r, lkb);
2966 send_blocking_asts(r, lkb);
2971 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2976 /* changing an existing lock may allow others to be granted */
2978 if (can_be_granted(r, lkb, 1, &deadlk)) {
2980 queue_cast(r, lkb, 0);
2984 /* can_be_granted() detected that this lock would block in a conversion
2985 deadlock, so we leave it on the granted queue and return EDEADLK in
2986 the ast for the convert. */
2989 /* it's left on the granted queue */
2990 revert_lock(r, lkb);
2991 queue_cast(r, lkb, -EDEADLK);
2996 /* is_demoted() means the can_be_granted() above set the grmode
2997 to NL, and left us on the granted queue. This auto-demotion
2998 (due to CONVDEADLK) might mean other locks, and/or this lock, are
2999 now grantable. We have to try to grant other converting locks
3000 before we try again to grant this one. */
3002 if (is_demoted(lkb)) {
3003 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3004 if (_can_be_granted(r, lkb, 1)) {
3006 queue_cast(r, lkb, 0);
3009 /* else fall through and move to convert queue */
3012 if (can_be_queued(lkb)) {
3013 error = -EINPROGRESS;
3015 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3021 queue_cast(r, lkb, -EAGAIN);
3026 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3031 grant_pending_locks(r, NULL);
3032 /* grant_pending_locks also sends basts */
3035 if (force_blocking_asts(lkb))
3036 send_blocking_asts_all(r, lkb);
3039 send_blocking_asts(r, lkb);
3044 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3046 remove_lock(r, lkb);
3047 queue_cast(r, lkb, -DLM_EUNLOCK);
3048 return -DLM_EUNLOCK;
3051 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3054 grant_pending_locks(r, NULL);
3057 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3059 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3063 error = revert_lock(r, lkb);
3065 queue_cast(r, lkb, -DLM_ECANCEL);
3066 return -DLM_ECANCEL;
3071 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3075 grant_pending_locks(r, NULL);
3079 * Four stage 3 varieties:
3080 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3083 /* add a new lkb to a possibly new rsb, called by requesting process */
3085 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3089 /* set_master: sets lkb nodeid from r */
3091 error = set_master(r, lkb);
3100 /* receive_request() calls do_request() on remote node */
3101 error = send_request(r, lkb);
3103 error = do_request(r, lkb);
3104 /* for remote locks the request_reply is sent
3105 between do_request and do_request_effects */
3106 do_request_effects(r, lkb, error);
3112 /* change some property of an existing lkb, e.g. mode */
3114 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3119 /* receive_convert() calls do_convert() on remote node */
3120 error = send_convert(r, lkb);
3122 error = do_convert(r, lkb);
3123 /* for remote locks the convert_reply is sent
3124 between do_convert and do_convert_effects */
3125 do_convert_effects(r, lkb, error);
3131 /* remove an existing lkb from the granted queue */
3133 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3138 /* receive_unlock() calls do_unlock() on remote node */
3139 error = send_unlock(r, lkb);
3141 error = do_unlock(r, lkb);
3142 /* for remote locks the unlock_reply is sent
3143 between do_unlock and do_unlock_effects */
3144 do_unlock_effects(r, lkb, error);
3150 /* remove an existing lkb from the convert or wait queue */
3152 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3157 /* receive_cancel() calls do_cancel() on remote node */
3158 error = send_cancel(r, lkb);
3160 error = do_cancel(r, lkb);
3161 /* for remote locks the cancel_reply is sent
3162 between do_cancel and do_cancel_effects */
3163 do_cancel_effects(r, lkb, error);
3170 * Four stage 2 varieties:
3171 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3174 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3175 int len, struct dlm_args *args)
3180 error = validate_lock_args(ls, lkb, args);
3184 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3191 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3193 error = _request_lock(r, lkb);
3200 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3201 struct dlm_args *args)
3206 r = lkb->lkb_resource;
3211 error = validate_lock_args(ls, lkb, args);
3215 error = _convert_lock(r, lkb);
3222 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3223 struct dlm_args *args)
3228 r = lkb->lkb_resource;
3233 error = validate_unlock_args(lkb, args);
3237 error = _unlock_lock(r, lkb);
3244 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3245 struct dlm_args *args)
3250 r = lkb->lkb_resource;
3255 error = validate_unlock_args(lkb, args);
3259 error = _cancel_lock(r, lkb);
3267 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3270 int dlm_lock(dlm_lockspace_t *lockspace,
3272 struct dlm_lksb *lksb,
3275 unsigned int namelen,
3276 uint32_t parent_lkid,
3277 void (*ast) (void *astarg),
3279 void (*bast) (void *astarg, int mode))
3282 struct dlm_lkb *lkb;
3283 struct dlm_args args;
3284 int error, convert = flags & DLM_LKF_CONVERT;
3286 ls = dlm_find_lockspace_local(lockspace);
3290 dlm_lock_recovery(ls);
3293 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3295 error = create_lkb(ls, &lkb);
3300 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3301 astarg, bast, &args);
3306 error = convert_lock(ls, lkb, &args);
3308 error = request_lock(ls, lkb, name, namelen, &args);
3310 if (error == -EINPROGRESS)
3313 if (convert || error)
3315 if (error == -EAGAIN || error == -EDEADLK)
3318 dlm_unlock_recovery(ls);
3319 dlm_put_lockspace(ls);
3323 int dlm_unlock(dlm_lockspace_t *lockspace,
3326 struct dlm_lksb *lksb,
3330 struct dlm_lkb *lkb;
3331 struct dlm_args args;
3334 ls = dlm_find_lockspace_local(lockspace);
3338 dlm_lock_recovery(ls);
3340 error = find_lkb(ls, lkid, &lkb);
3344 error = set_unlock_args(flags, astarg, &args);
3348 if (flags & DLM_LKF_CANCEL)
3349 error = cancel_lock(ls, lkb, &args);
3351 error = unlock_lock(ls, lkb, &args);
3353 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3355 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3360 dlm_unlock_recovery(ls);
3361 dlm_put_lockspace(ls);
3366 * send/receive routines for remote operations and replies
3370 * send_request receive_request
3371 * send_convert receive_convert
3372 * send_unlock receive_unlock
3373 * send_cancel receive_cancel
3374 * send_grant receive_grant
3375 * send_bast receive_bast
3376 * send_lookup receive_lookup
3377 * send_remove receive_remove
3380 * receive_request_reply send_request_reply
3381 * receive_convert_reply send_convert_reply
3382 * receive_unlock_reply send_unlock_reply
3383 * receive_cancel_reply send_cancel_reply
3384 * receive_lookup_reply send_lookup_reply
3387 static int _create_message(struct dlm_ls *ls, int mb_len,
3388 int to_nodeid, int mstype,
3389 struct dlm_message **ms_ret,
3390 struct dlm_mhandle **mh_ret)
3392 struct dlm_message *ms;
3393 struct dlm_mhandle *mh;
3396 /* get_buffer gives us a message handle (mh) that we need to
3397 pass into lowcomms_commit and a message buffer (mb) that we
3398 write our data into */
3400 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3404 memset(mb, 0, mb_len);
3406 ms = (struct dlm_message *) mb;
3408 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3409 ms->m_header.h_lockspace = ls->ls_global_id;
3410 ms->m_header.h_nodeid = dlm_our_nodeid();
3411 ms->m_header.h_length = mb_len;
3412 ms->m_header.h_cmd = DLM_MSG;
3414 ms->m_type = mstype;
3421 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3422 int to_nodeid, int mstype,
3423 struct dlm_message **ms_ret,
3424 struct dlm_mhandle **mh_ret)
3426 int mb_len = sizeof(struct dlm_message);
3429 case DLM_MSG_REQUEST:
3430 case DLM_MSG_LOOKUP:
3431 case DLM_MSG_REMOVE:
3432 mb_len += r->res_length;
3434 case DLM_MSG_CONVERT:
3435 case DLM_MSG_UNLOCK:
3436 case DLM_MSG_REQUEST_REPLY:
3437 case DLM_MSG_CONVERT_REPLY:
3439 if (lkb && lkb->lkb_lvbptr)
3440 mb_len += r->res_ls->ls_lvblen;
3444 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3448 /* further lowcomms enhancements or alternate implementations may make
3449 the return value from this function useful at some point */
3451 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3453 dlm_message_out(ms);
3454 dlm_lowcomms_commit_buffer(mh);
3458 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3459 struct dlm_message *ms)
3461 ms->m_nodeid = lkb->lkb_nodeid;
3462 ms->m_pid = lkb->lkb_ownpid;
3463 ms->m_lkid = lkb->lkb_id;
3464 ms->m_remid = lkb->lkb_remid;
3465 ms->m_exflags = lkb->lkb_exflags;
3466 ms->m_sbflags = lkb->lkb_sbflags;
3467 ms->m_flags = lkb->lkb_flags;
3468 ms->m_lvbseq = lkb->lkb_lvbseq;
3469 ms->m_status = lkb->lkb_status;
3470 ms->m_grmode = lkb->lkb_grmode;
3471 ms->m_rqmode = lkb->lkb_rqmode;
3472 ms->m_hash = r->res_hash;
3474 /* m_result and m_bastmode are set from function args,
3475 not from lkb fields */
3477 if (lkb->lkb_bastfn)
3478 ms->m_asts |= DLM_CB_BAST;
3480 ms->m_asts |= DLM_CB_CAST;
3482 /* compare with switch in create_message; send_remove() doesn't
3485 switch (ms->m_type) {
3486 case DLM_MSG_REQUEST:
3487 case DLM_MSG_LOOKUP:
3488 memcpy(ms->m_extra, r->res_name, r->res_length);
3490 case DLM_MSG_CONVERT:
3491 case DLM_MSG_UNLOCK:
3492 case DLM_MSG_REQUEST_REPLY:
3493 case DLM_MSG_CONVERT_REPLY:
3495 if (!lkb->lkb_lvbptr)
3497 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3502 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3504 struct dlm_message *ms;
3505 struct dlm_mhandle *mh;
3506 int to_nodeid, error;
3508 to_nodeid = r->res_nodeid;
3510 error = add_to_waiters(lkb, mstype, to_nodeid);
3514 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3518 send_args(r, lkb, ms);
3520 error = send_message(mh, ms);
3526 remove_from_waiters(lkb, msg_reply_type(mstype));
3530 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3532 return send_common(r, lkb, DLM_MSG_REQUEST);
3535 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3539 error = send_common(r, lkb, DLM_MSG_CONVERT);
3541 /* down conversions go without a reply from the master */
3542 if (!error && down_conversion(lkb)) {
3543 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3544 r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3545 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3546 r->res_ls->ls_stub_ms.m_result = 0;
3547 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3553 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3554 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3555 that the master is still correct. */
3557 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3559 return send_common(r, lkb, DLM_MSG_UNLOCK);
3562 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3564 return send_common(r, lkb, DLM_MSG_CANCEL);
3567 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3569 struct dlm_message *ms;
3570 struct dlm_mhandle *mh;
3571 int to_nodeid, error;
3573 to_nodeid = lkb->lkb_nodeid;
3575 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3579 send_args(r, lkb, ms);
3583 error = send_message(mh, ms);
3588 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3590 struct dlm_message *ms;
3591 struct dlm_mhandle *mh;
3592 int to_nodeid, error;
3594 to_nodeid = lkb->lkb_nodeid;
3596 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3600 send_args(r, lkb, ms);
3602 ms->m_bastmode = mode;
3604 error = send_message(mh, ms);
3609 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3611 struct dlm_message *ms;
3612 struct dlm_mhandle *mh;
3613 int to_nodeid, error;
3615 to_nodeid = dlm_dir_nodeid(r);
3617 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3621 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3625 send_args(r, lkb, ms);
3627 error = send_message(mh, ms);
3633 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3637 static int send_remove(struct dlm_rsb *r)
3639 struct dlm_message *ms;
3640 struct dlm_mhandle *mh;
3641 int to_nodeid, error;
3643 to_nodeid = dlm_dir_nodeid(r);
3645 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3649 memcpy(ms->m_extra, r->res_name, r->res_length);
3650 ms->m_hash = r->res_hash;
3652 error = send_message(mh, ms);
3657 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3660 struct dlm_message *ms;
3661 struct dlm_mhandle *mh;
3662 int to_nodeid, error;
3664 to_nodeid = lkb->lkb_nodeid;
3666 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3670 send_args(r, lkb, ms);
3674 error = send_message(mh, ms);
3679 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3681 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3684 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3686 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3689 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3691 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3694 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3696 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3699 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3700 int ret_nodeid, int rv)
3702 struct dlm_rsb *r = &ls->ls_stub_rsb;
3703 struct dlm_message *ms;
3704 struct dlm_mhandle *mh;
3705 int error, nodeid = ms_in->m_header.h_nodeid;
3707 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3711 ms->m_lkid = ms_in->m_lkid;
3713 ms->m_nodeid = ret_nodeid;
3715 error = send_message(mh, ms);
3720 /* which args we save from a received message depends heavily on the type
3721 of message, unlike the send side where we can safely send everything about
3722 the lkb for any type of message */
3724 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3726 lkb->lkb_exflags = ms->m_exflags;
3727 lkb->lkb_sbflags = ms->m_sbflags;
3728 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3729 (ms->m_flags & 0x0000FFFF);
3732 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3734 if (ms->m_flags == DLM_IFL_STUB_MS)
3737 lkb->lkb_sbflags = ms->m_sbflags;
3738 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3739 (ms->m_flags & 0x0000FFFF);
3742 static int receive_extralen(struct dlm_message *ms)
3744 return (ms->m_header.h_length - sizeof(struct dlm_message));
3747 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3748 struct dlm_message *ms)
3752 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3753 if (!lkb->lkb_lvbptr)
3754 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3755 if (!lkb->lkb_lvbptr)
3757 len = receive_extralen(ms);
3758 if (len > DLM_RESNAME_MAXLEN)
3759 len = DLM_RESNAME_MAXLEN;
3760 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3765 static void fake_bastfn(void *astparam, int mode)
3767 log_print("fake_bastfn should not be called");
3770 static void fake_astfn(void *astparam)
3772 log_print("fake_astfn should not be called");
3775 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3776 struct dlm_message *ms)
3778 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3779 lkb->lkb_ownpid = ms->m_pid;
3780 lkb->lkb_remid = ms->m_lkid;
3781 lkb->lkb_grmode = DLM_LOCK_IV;
3782 lkb->lkb_rqmode = ms->m_rqmode;
3784 lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3785 lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3787 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3788 /* lkb was just created so there won't be an lvb yet */
3789 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3790 if (!lkb->lkb_lvbptr)
3797 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3798 struct dlm_message *ms)
3800 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3803 if (receive_lvb(ls, lkb, ms))
3806 lkb->lkb_rqmode = ms->m_rqmode;
3807 lkb->lkb_lvbseq = ms->m_lvbseq;
3812 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3813 struct dlm_message *ms)
3815 if (receive_lvb(ls, lkb, ms))
3820 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3821 uses to send a reply and that the remote end uses to process the reply. */
3823 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3825 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3826 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3827 lkb->lkb_remid = ms->m_lkid;
3830 /* This is called after the rsb is locked so that we can safely inspect
3831 fields in the lkb. */
3833 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3835 int from = ms->m_header.h_nodeid;
3838 switch (ms->m_type) {
3839 case DLM_MSG_CONVERT:
3840 case DLM_MSG_UNLOCK:
3841 case DLM_MSG_CANCEL:
3842 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3846 case DLM_MSG_CONVERT_REPLY:
3847 case DLM_MSG_UNLOCK_REPLY:
3848 case DLM_MSG_CANCEL_REPLY:
3851 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3855 case DLM_MSG_REQUEST_REPLY:
3856 if (!is_process_copy(lkb))
3858 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
3867 log_error(lkb->lkb_resource->res_ls,
3868 "ignore invalid message %d from %d %x %x %x %d",
3869 ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
3870 lkb->lkb_flags, lkb->lkb_nodeid);
3874 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
3876 struct dlm_lkb *lkb;
3881 from_nodeid = ms->m_header.h_nodeid;
3883 error = create_lkb(ls, &lkb);
3887 receive_flags(lkb, ms);
3888 lkb->lkb_flags |= DLM_IFL_MSTCPY;
3889 error = receive_request_args(ls, lkb, ms);
3895 /* The dir node is the authority on whether we are the master
3896 for this rsb or not, so if the master sends us a request, we should
3897 recreate the rsb if we've destroyed it. This race happens when we
3898 send a remove message to the dir node at the same time that the dir
3899 node sends us a request for the rsb. */
3901 namelen = receive_extralen(ms);
3903 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
3904 R_RECEIVE_REQUEST, &r);
3912 if (r->res_master_nodeid != dlm_our_nodeid()) {
3913 error = validate_master_nodeid(ls, r, from_nodeid);
3923 error = do_request(r, lkb);
3924 send_request_reply(r, lkb, error);
3925 do_request_effects(r, lkb, error);
3930 if (error == -EINPROGRESS)
3937 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
3938 and do this receive_request again from process_lookup_list once
3939 we get the lookup reply. This would avoid a many repeated
3940 ENOTBLK request failures when the lookup reply designating us
3941 as master is delayed. */
3943 /* We could repeatedly return -EBADR here if our send_remove() is
3944 delayed in being sent/arriving/being processed on the dir node.
3945 Another node would repeatedly lookup up the master, and the dir
3946 node would continue returning our nodeid until our send_remove
3949 if (error != -ENOTBLK) {
3950 log_limit(ls, "receive_request %x from %d %d",
3951 ms->m_lkid, from_nodeid, error);
3954 setup_stub_lkb(ls, ms);
3955 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3959 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
3961 struct dlm_lkb *lkb;
3963 int error, reply = 1;
3965 error = find_lkb(ls, ms->m_remid, &lkb);
3969 if (lkb->lkb_remid != ms->m_lkid) {
3970 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
3971 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
3972 (unsigned long long)lkb->lkb_recover_seq,
3973 ms->m_header.h_nodeid, ms->m_lkid);
3978 r = lkb->lkb_resource;
3983 error = validate_message(lkb, ms);
3987 receive_flags(lkb, ms);
3989 error = receive_convert_args(ls, lkb, ms);
3991 send_convert_reply(r, lkb, error);
3995 reply = !down_conversion(lkb);
3997 error = do_convert(r, lkb);
3999 send_convert_reply(r, lkb, error);
4000 do_convert_effects(r, lkb, error);
4008 setup_stub_lkb(ls, ms);
4009 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4013 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4015 struct dlm_lkb *lkb;
4019 error = find_lkb(ls, ms->m_remid, &lkb);
4023 if (lkb->lkb_remid != ms->m_lkid) {
4024 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4025 lkb->lkb_id, lkb->lkb_remid,
4026 ms->m_header.h_nodeid, ms->m_lkid);
4031 r = lkb->lkb_resource;
4036 error = validate_message(lkb, ms);
4040 receive_flags(lkb, ms);
4042 error = receive_unlock_args(ls, lkb, ms);
4044 send_unlock_reply(r, lkb, error);
4048 error = do_unlock(r, lkb);
4049 send_unlock_reply(r, lkb, error);
4050 do_unlock_effects(r, lkb, error);
4058 setup_stub_lkb(ls, ms);
4059 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4063 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4065 struct dlm_lkb *lkb;
4069 error = find_lkb(ls, ms->m_remid, &lkb);
4073 receive_flags(lkb, ms);
4075 r = lkb->lkb_resource;
4080 error = validate_message(lkb, ms);
4084 error = do_cancel(r, lkb);
4085 send_cancel_reply(r, lkb, error);
4086 do_cancel_effects(r, lkb, error);
4094 setup_stub_lkb(ls, ms);
4095 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4099 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4101 struct dlm_lkb *lkb;
4105 error = find_lkb(ls, ms->m_remid, &lkb);
4109 r = lkb->lkb_resource;
4114 error = validate_message(lkb, ms);
4118 receive_flags_reply(lkb, ms);
4119 if (is_altmode(lkb))
4120 munge_altmode(lkb, ms);
4121 grant_lock_pc(r, lkb, ms);
4122 queue_cast(r, lkb, 0);
4130 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4132 struct dlm_lkb *lkb;
4136 error = find_lkb(ls, ms->m_remid, &lkb);
4140 r = lkb->lkb_resource;
4145 error = validate_message(lkb, ms);
4149 queue_bast(r, lkb, ms->m_bastmode);
4150 lkb->lkb_highbast = ms->m_bastmode;
4158 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4160 int len, error, ret_nodeid, from_nodeid, our_nodeid;
4162 from_nodeid = ms->m_header.h_nodeid;
4163 our_nodeid = dlm_our_nodeid();
4165 len = receive_extralen(ms);
4167 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4170 /* Optimization: we're master so treat lookup as a request */
4171 if (!error && ret_nodeid == our_nodeid) {
4172 receive_request(ls, ms);
4175 send_lookup_reply(ls, ms, ret_nodeid, error);
4178 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4180 char name[DLM_RESNAME_MAXLEN+1];
4183 int rv, len, dir_nodeid, from_nodeid;
4185 from_nodeid = ms->m_header.h_nodeid;
4187 len = receive_extralen(ms);
4189 if (len > DLM_RESNAME_MAXLEN) {
4190 log_error(ls, "receive_remove from %d bad len %d",
4195 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4196 if (dir_nodeid != dlm_our_nodeid()) {
4197 log_error(ls, "receive_remove from %d bad nodeid %d",
4198 from_nodeid, dir_nodeid);
4202 /* Look for name on rsbtbl.toss, if it's there, kill it.
4203 If it's on rsbtbl.keep, it's being used, and we should ignore this
4204 message. This is an expected race between the dir node sending a
4205 request to the master node at the same time as the master node sends
4206 a remove to the dir node. The resolution to that race is for the
4207 dir node to ignore the remove message, and the master node to
4208 recreate the master rsb when it gets a request from the dir node for
4209 an rsb it doesn't have. */
4211 memset(name, 0, sizeof(name));
4212 memcpy(name, ms->m_extra, len);
4214 hash = jhash(name, len, 0);
4215 b = hash & (ls->ls_rsbtbl_size - 1);
4217 spin_lock(&ls->ls_rsbtbl[b].lock);
4219 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4221 /* verify the rsb is on keep list per comment above */
4222 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4224 /* should not happen */
4225 log_error(ls, "receive_remove from %d not found %s",
4227 spin_unlock(&ls->ls_rsbtbl[b].lock);
4230 if (r->res_master_nodeid != from_nodeid) {
4231 /* should not happen */
4232 log_error(ls, "receive_remove keep from %d master %d",
4233 from_nodeid, r->res_master_nodeid);
4235 spin_unlock(&ls->ls_rsbtbl[b].lock);
4239 log_debug(ls, "receive_remove from %d master %d first %x %s",
4240 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4242 spin_unlock(&ls->ls_rsbtbl[b].lock);
4246 if (r->res_master_nodeid != from_nodeid) {
4247 log_error(ls, "receive_remove toss from %d master %d",
4248 from_nodeid, r->res_master_nodeid);
4250 spin_unlock(&ls->ls_rsbtbl[b].lock);
4254 if (kref_put(&r->res_ref, kill_rsb)) {
4255 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4256 spin_unlock(&ls->ls_rsbtbl[b].lock);
4259 log_error(ls, "receive_remove from %d rsb ref error",
4262 spin_unlock(&ls->ls_rsbtbl[b].lock);
4266 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4268 do_purge(ls, ms->m_nodeid, ms->m_pid);
4271 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4273 struct dlm_lkb *lkb;
4275 int error, mstype, result;
4276 int from_nodeid = ms->m_header.h_nodeid;
4278 error = find_lkb(ls, ms->m_remid, &lkb);
4282 r = lkb->lkb_resource;
4286 error = validate_message(lkb, ms);
4290 mstype = lkb->lkb_wait_type;
4291 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4293 log_error(ls, "receive_request_reply %x remote %d %x result %d",
4294 lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4299 /* Optimization: the dir node was also the master, so it took our
4300 lookup as a request and sent request reply instead of lookup reply */
4301 if (mstype == DLM_MSG_LOOKUP) {
4302 r->res_master_nodeid = from_nodeid;
4303 r->res_nodeid = from_nodeid;
4304 lkb->lkb_nodeid = from_nodeid;
4307 /* this is the value returned from do_request() on the master */
4308 result = ms->m_result;
4312 /* request would block (be queued) on remote master */
4313 queue_cast(r, lkb, -EAGAIN);
4314 confirm_master(r, -EAGAIN);
4315 unhold_lkb(lkb); /* undoes create_lkb() */
4320 /* request was queued or granted on remote master */
4321 receive_flags_reply(lkb, ms);
4322 lkb->lkb_remid = ms->m_lkid;
4323 if (is_altmode(lkb))
4324 munge_altmode(lkb, ms);
4326 add_lkb(r, lkb, DLM_LKSTS_WAITING);
4329 grant_lock_pc(r, lkb, ms);
4330 queue_cast(r, lkb, 0);
4332 confirm_master(r, result);
4337 /* find_rsb failed to find rsb or rsb wasn't master */
4338 log_limit(ls, "receive_request_reply %x from %d %d "
4339 "master %d dir %d first %x %s", lkb->lkb_id,
4340 from_nodeid, result, r->res_master_nodeid,
4341 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4343 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4344 r->res_master_nodeid != dlm_our_nodeid()) {
4345 /* cause _request_lock->set_master->send_lookup */
4346 r->res_master_nodeid = 0;
4348 lkb->lkb_nodeid = -1;
4351 if (is_overlap(lkb)) {
4352 /* we'll ignore error in cancel/unlock reply */
4353 queue_cast_overlap(r, lkb);
4354 confirm_master(r, result);
4355 unhold_lkb(lkb); /* undoes create_lkb() */
4357 _request_lock(r, lkb);
4359 if (r->res_master_nodeid == dlm_our_nodeid())
4360 confirm_master(r, 0);
4365 log_error(ls, "receive_request_reply %x error %d",
4366 lkb->lkb_id, result);
4369 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4370 log_debug(ls, "receive_request_reply %x result %d unlock",
4371 lkb->lkb_id, result);
4372 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4373 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4374 send_unlock(r, lkb);
4375 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4376 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4377 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4378 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4379 send_cancel(r, lkb);
4381 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4382 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4391 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4392 struct dlm_message *ms)
4394 /* this is the value returned from do_convert() on the master */
4395 switch (ms->m_result) {
4397 /* convert would block (be queued) on remote master */
4398 queue_cast(r, lkb, -EAGAIN);
4402 receive_flags_reply(lkb, ms);
4403 revert_lock_pc(r, lkb);
4404 queue_cast(r, lkb, -EDEADLK);
4408 /* convert was queued on remote master */
4409 receive_flags_reply(lkb, ms);
4410 if (is_demoted(lkb))
4413 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4418 /* convert was granted on remote master */
4419 receive_flags_reply(lkb, ms);
4420 if (is_demoted(lkb))
4422 grant_lock_pc(r, lkb, ms);
4423 queue_cast(r, lkb, 0);
4427 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4428 lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4435 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4437 struct dlm_rsb *r = lkb->lkb_resource;
4443 error = validate_message(lkb, ms);
4447 /* stub reply can happen with waiters_mutex held */
4448 error = remove_from_waiters_ms(lkb, ms);
4452 __receive_convert_reply(r, lkb, ms);
4458 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4460 struct dlm_lkb *lkb;
4463 error = find_lkb(ls, ms->m_remid, &lkb);
4467 _receive_convert_reply(lkb, ms);
4472 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4474 struct dlm_rsb *r = lkb->lkb_resource;
4480 error = validate_message(lkb, ms);
4484 /* stub reply can happen with waiters_mutex held */
4485 error = remove_from_waiters_ms(lkb, ms);
4489 /* this is the value returned from do_unlock() on the master */
4491 switch (ms->m_result) {
4493 receive_flags_reply(lkb, ms);
4494 remove_lock_pc(r, lkb);
4495 queue_cast(r, lkb, -DLM_EUNLOCK);
4500 log_error(r->res_ls, "receive_unlock_reply %x error %d",
4501 lkb->lkb_id, ms->m_result);
4508 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4510 struct dlm_lkb *lkb;
4513 error = find_lkb(ls, ms->m_remid, &lkb);
4517 _receive_unlock_reply(lkb, ms);
4522 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4524 struct dlm_rsb *r = lkb->lkb_resource;
4530 error = validate_message(lkb, ms);
4534 /* stub reply can happen with waiters_mutex held */
4535 error = remove_from_waiters_ms(lkb, ms);
4539 /* this is the value returned from do_cancel() on the master */
4541 switch (ms->m_result) {
4543 receive_flags_reply(lkb, ms);
4544 revert_lock_pc(r, lkb);
4545 queue_cast(r, lkb, -DLM_ECANCEL);
4550 log_error(r->res_ls, "receive_cancel_reply %x error %d",
4551 lkb->lkb_id, ms->m_result);
4558 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4560 struct dlm_lkb *lkb;
4563 error = find_lkb(ls, ms->m_remid, &lkb);
4567 _receive_cancel_reply(lkb, ms);
4572 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4574 struct dlm_lkb *lkb;
4576 int error, ret_nodeid;
4577 int do_lookup_list = 0;
4579 error = find_lkb(ls, ms->m_lkid, &lkb);
4581 log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4585 /* ms->m_result is the value returned by dlm_master_lookup on dir node
4586 FIXME: will a non-zero error ever be returned? */
4588 r = lkb->lkb_resource;
4592 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4596 ret_nodeid = ms->m_nodeid;
4598 /* We sometimes receive a request from the dir node for this
4599 rsb before we've received the dir node's loookup_reply for it.
4600 The request from the dir node implies we're the master, so we set
4601 ourself as master in receive_request_reply, and verify here that
4602 we are indeed the master. */
4604 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4605 /* This should never happen */
4606 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4607 "master %d dir %d our %d first %x %s",
4608 lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4609 r->res_master_nodeid, r->res_dir_nodeid,
4610 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4613 if (ret_nodeid == dlm_our_nodeid()) {
4614 r->res_master_nodeid = ret_nodeid;
4617 r->res_first_lkid = 0;
4618 } else if (ret_nodeid == -1) {
4619 /* the remote node doesn't believe it's the dir node */
4620 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4621 lkb->lkb_id, ms->m_header.h_nodeid);
4622 r->res_master_nodeid = 0;
4624 lkb->lkb_nodeid = -1;
4626 /* set_master() will set lkb_nodeid from r */
4627 r->res_master_nodeid = ret_nodeid;
4628 r->res_nodeid = ret_nodeid;
4631 if (is_overlap(lkb)) {
4632 log_debug(ls, "receive_lookup_reply %x unlock %x",
4633 lkb->lkb_id, lkb->lkb_flags);
4634 queue_cast_overlap(r, lkb);
4635 unhold_lkb(lkb); /* undoes create_lkb() */
4639 _request_lock(r, lkb);
4643 process_lookup_list(r);
4650 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4653 int error = 0, noent = 0;
4655 if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4656 log_limit(ls, "receive %d from non-member %d %x %x %d",
4657 ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4658 ms->m_remid, ms->m_result);
4662 switch (ms->m_type) {
4664 /* messages sent to a master node */
4666 case DLM_MSG_REQUEST:
4667 error = receive_request(ls, ms);
4670 case DLM_MSG_CONVERT:
4671 error = receive_convert(ls, ms);
4674 case DLM_MSG_UNLOCK:
4675 error = receive_unlock(ls, ms);
4678 case DLM_MSG_CANCEL:
4680 error = receive_cancel(ls, ms);
4683 /* messages sent from a master node (replies to above) */
4685 case DLM_MSG_REQUEST_REPLY:
4686 error = receive_request_reply(ls, ms);
4689 case DLM_MSG_CONVERT_REPLY:
4690 error = receive_convert_reply(ls, ms);
4693 case DLM_MSG_UNLOCK_REPLY:
4694 error = receive_unlock_reply(ls, ms);
4697 case DLM_MSG_CANCEL_REPLY:
4698 error = receive_cancel_reply(ls, ms);
4701 /* messages sent from a master node (only two types of async msg) */
4705 error = receive_grant(ls, ms);
4710 error = receive_bast(ls, ms);
4713 /* messages sent to a dir node */
4715 case DLM_MSG_LOOKUP:
4716 receive_lookup(ls, ms);
4719 case DLM_MSG_REMOVE:
4720 receive_remove(ls, ms);
4723 /* messages sent from a dir node (remove has no reply) */
4725 case DLM_MSG_LOOKUP_REPLY:
4726 receive_lookup_reply(ls, ms);
4729 /* other messages */
4732 receive_purge(ls, ms);
4736 log_error(ls, "unknown message type %d", ms->m_type);
4740 * When checking for ENOENT, we're checking the result of
4741 * find_lkb(m_remid):
4743 * The lock id referenced in the message wasn't found. This may
4744 * happen in normal usage for the async messages and cancel, so
4745 * only use log_debug for them.
4747 * Some errors are expected and normal.
4750 if (error == -ENOENT && noent) {
4751 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4752 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4753 ms->m_lkid, saved_seq);
4754 } else if (error == -ENOENT) {
4755 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4756 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4757 ms->m_lkid, saved_seq);
4759 if (ms->m_type == DLM_MSG_CONVERT)
4760 dlm_dump_rsb_hash(ls, ms->m_hash);
4763 if (error == -EINVAL) {
4764 log_error(ls, "receive %d inval from %d lkid %x remid %x "
4766 ms->m_type, ms->m_header.h_nodeid,
4767 ms->m_lkid, ms->m_remid, saved_seq);
4771 /* If the lockspace is in recovery mode (locking stopped), then normal
4772 messages are saved on the requestqueue for processing after recovery is
4773 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
4774 messages off the requestqueue before we process new ones. This occurs right
4775 after recovery completes when we transition from saving all messages on
4776 requestqueue, to processing all the saved messages, to processing new
4777 messages as they arrive. */
4779 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4782 if (dlm_locking_stopped(ls)) {
4783 /* If we were a member of this lockspace, left, and rejoined,
4784 other nodes may still be sending us messages from the
4785 lockspace generation before we left. */
4786 if (!ls->ls_generation) {
4787 log_limit(ls, "receive %d from %d ignore old gen",
4788 ms->m_type, nodeid);
4792 dlm_add_requestqueue(ls, nodeid, ms);
4794 dlm_wait_requestqueue(ls);
4795 _receive_message(ls, ms, 0);
4799 /* This is called by dlm_recoverd to process messages that were saved on
4800 the requestqueue. */
4802 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
4805 _receive_message(ls, ms, saved_seq);
4808 /* This is called by the midcomms layer when something is received for
4809 the lockspace. It could be either a MSG (normal message sent as part of
4810 standard locking activity) or an RCOM (recovery message sent as part of
4811 lockspace recovery). */
4813 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
4815 struct dlm_header *hd = &p->header;
4819 switch (hd->h_cmd) {
4821 dlm_message_in(&p->message);
4822 type = p->message.m_type;
4825 dlm_rcom_in(&p->rcom);
4826 type = p->rcom.rc_type;
4829 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
4833 if (hd->h_nodeid != nodeid) {
4834 log_print("invalid h_nodeid %d from %d lockspace %x",
4835 hd->h_nodeid, nodeid, hd->h_lockspace);
4839 ls = dlm_find_lockspace_global(hd->h_lockspace);
4841 if (dlm_config.ci_log_debug) {
4842 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
4843 "%u from %d cmd %d type %d\n",
4844 hd->h_lockspace, nodeid, hd->h_cmd, type);
4847 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
4848 dlm_send_ls_not_ready(nodeid, &p->rcom);
4852 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
4853 be inactive (in this ls) before transitioning to recovery mode */
4855 down_read(&ls->ls_recv_active);
4856 if (hd->h_cmd == DLM_MSG)
4857 dlm_receive_message(ls, &p->message, nodeid);
4859 dlm_receive_rcom(ls, &p->rcom, nodeid);
4860 up_read(&ls->ls_recv_active);
4862 dlm_put_lockspace(ls);
4865 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
4866 struct dlm_message *ms_stub)
4868 if (middle_conversion(lkb)) {
4870 memset(ms_stub, 0, sizeof(struct dlm_message));
4871 ms_stub->m_flags = DLM_IFL_STUB_MS;
4872 ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
4873 ms_stub->m_result = -EINPROGRESS;
4874 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4875 _receive_convert_reply(lkb, ms_stub);
4877 /* Same special case as in receive_rcom_lock_args() */
4878 lkb->lkb_grmode = DLM_LOCK_IV;
4879 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
4882 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
4883 lkb->lkb_flags |= DLM_IFL_RESEND;
4886 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
4887 conversions are async; there's no reply from the remote master */
4890 /* A waiting lkb needs recovery if the master node has failed, or
4891 the master node is changing (only when no directory is used) */
4893 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
4896 if (dlm_no_directory(ls))
4899 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
4905 /* Recovery for locks that are waiting for replies from nodes that are now
4906 gone. We can just complete unlocks and cancels by faking a reply from the
4907 dead node. Requests and up-conversions we flag to be resent after
4908 recovery. Down-conversions can just be completed with a fake reply like
4909 unlocks. Conversions between PR and CW need special attention. */
4911 void dlm_recover_waiters_pre(struct dlm_ls *ls)
4913 struct dlm_lkb *lkb, *safe;
4914 struct dlm_message *ms_stub;
4915 int wait_type, stub_unlock_result, stub_cancel_result;
4918 ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL);
4920 log_error(ls, "dlm_recover_waiters_pre no mem");
4924 mutex_lock(&ls->ls_waiters_mutex);
4926 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
4928 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
4930 /* exclude debug messages about unlocks because there can be so
4931 many and they aren't very interesting */
4933 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
4934 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
4935 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
4939 lkb->lkb_resource->res_nodeid,
4941 lkb->lkb_wait_nodeid,
4945 /* all outstanding lookups, regardless of destination will be
4946 resent after recovery is done */
4948 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
4949 lkb->lkb_flags |= DLM_IFL_RESEND;
4953 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
4956 wait_type = lkb->lkb_wait_type;
4957 stub_unlock_result = -DLM_EUNLOCK;
4958 stub_cancel_result = -DLM_ECANCEL;
4960 /* Main reply may have been received leaving a zero wait_type,
4961 but a reply for the overlapping op may not have been
4962 received. In that case we need to fake the appropriate
4963 reply for the overlap op. */
4966 if (is_overlap_cancel(lkb)) {
4967 wait_type = DLM_MSG_CANCEL;
4968 if (lkb->lkb_grmode == DLM_LOCK_IV)
4969 stub_cancel_result = 0;
4971 if (is_overlap_unlock(lkb)) {
4972 wait_type = DLM_MSG_UNLOCK;
4973 if (lkb->lkb_grmode == DLM_LOCK_IV)
4974 stub_unlock_result = -ENOENT;
4977 log_debug(ls, "rwpre overlap %x %x %d %d %d",
4978 lkb->lkb_id, lkb->lkb_flags, wait_type,
4979 stub_cancel_result, stub_unlock_result);
4982 switch (wait_type) {
4984 case DLM_MSG_REQUEST:
4985 lkb->lkb_flags |= DLM_IFL_RESEND;
4988 case DLM_MSG_CONVERT:
4989 recover_convert_waiter(ls, lkb, ms_stub);
4992 case DLM_MSG_UNLOCK:
4994 memset(ms_stub, 0, sizeof(struct dlm_message));
4995 ms_stub->m_flags = DLM_IFL_STUB_MS;
4996 ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
4997 ms_stub->m_result = stub_unlock_result;
4998 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
4999 _receive_unlock_reply(lkb, ms_stub);
5003 case DLM_MSG_CANCEL:
5005 memset(ms_stub, 0, sizeof(struct dlm_message));
5006 ms_stub->m_flags = DLM_IFL_STUB_MS;
5007 ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5008 ms_stub->m_result = stub_cancel_result;
5009 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5010 _receive_cancel_reply(lkb, ms_stub);
5015 log_error(ls, "invalid lkb wait_type %d %d",
5016 lkb->lkb_wait_type, wait_type);
5020 mutex_unlock(&ls->ls_waiters_mutex);
5024 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5026 struct dlm_lkb *lkb;
5029 mutex_lock(&ls->ls_waiters_mutex);
5030 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5031 if (lkb->lkb_flags & DLM_IFL_RESEND) {
5037 mutex_unlock(&ls->ls_waiters_mutex);
5044 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5045 master or dir-node for r. Processing the lkb may result in it being placed
5048 /* We do this after normal locking has been enabled and any saved messages
5049 (in requestqueue) have been processed. We should be confident that at
5050 this point we won't get or process a reply to any of these waiting
5051 operations. But, new ops may be coming in on the rsbs/locks here from
5052 userspace or remotely. */
5054 /* there may have been an overlap unlock/cancel prior to recovery or after
5055 recovery. if before, the lkb may still have a pos wait_count; if after, the
5056 overlap flag would just have been set and nothing new sent. we can be
5057 confident here than any replies to either the initial op or overlap ops
5058 prior to recovery have been received. */
5060 int dlm_recover_waiters_post(struct dlm_ls *ls)
5062 struct dlm_lkb *lkb;
5064 int error = 0, mstype, err, oc, ou;
5067 if (dlm_locking_stopped(ls)) {
5068 log_debug(ls, "recover_waiters_post aborted");
5073 lkb = find_resend_waiter(ls);
5077 r = lkb->lkb_resource;
5081 mstype = lkb->lkb_wait_type;
5082 oc = is_overlap_cancel(lkb);
5083 ou = is_overlap_unlock(lkb);
5086 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5087 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5088 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5089 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5090 dlm_dir_nodeid(r), oc, ou);
5092 /* At this point we assume that we won't get a reply to any
5093 previous op or overlap op on this lock. First, do a big
5094 remove_from_waiters() for all previous ops. */
5096 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5097 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5098 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5099 lkb->lkb_wait_type = 0;
5100 lkb->lkb_wait_count = 0;
5101 mutex_lock(&ls->ls_waiters_mutex);
5102 list_del_init(&lkb->lkb_wait_reply);
5103 mutex_unlock(&ls->ls_waiters_mutex);
5104 unhold_lkb(lkb); /* for waiters list */
5107 /* do an unlock or cancel instead of resending */
5109 case DLM_MSG_LOOKUP:
5110 case DLM_MSG_REQUEST:
5111 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5113 unhold_lkb(lkb); /* undoes create_lkb() */
5115 case DLM_MSG_CONVERT:
5117 queue_cast(r, lkb, -DLM_ECANCEL);
5119 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5120 _unlock_lock(r, lkb);
5128 case DLM_MSG_LOOKUP:
5129 case DLM_MSG_REQUEST:
5130 _request_lock(r, lkb);
5132 confirm_master(r, 0);
5134 case DLM_MSG_CONVERT:
5135 _convert_lock(r, lkb);
5143 log_error(ls, "waiter %x msg %d r_nodeid %d "
5144 "dir_nodeid %d overlap %d %d",
5145 lkb->lkb_id, mstype, r->res_nodeid,
5146 dlm_dir_nodeid(r), oc, ou);
5156 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5157 struct list_head *list)
5159 struct dlm_lkb *lkb, *safe;
5161 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5162 if (!is_master_copy(lkb))
5165 /* don't purge lkbs we've added in recover_master_copy for
5166 the current recovery seq */
5168 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5173 /* this put should free the lkb */
5174 if (!dlm_put_lkb(lkb))
5175 log_error(ls, "purged mstcpy lkb not released");
5179 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5181 struct dlm_ls *ls = r->res_ls;
5183 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5184 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5185 purge_mstcpy_list(ls, r, &r->res_waitqueue);
5188 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5189 struct list_head *list,
5190 int nodeid_gone, unsigned int *count)
5192 struct dlm_lkb *lkb, *safe;
5194 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5195 if (!is_master_copy(lkb))
5198 if ((lkb->lkb_nodeid == nodeid_gone) ||
5199 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5203 /* this put should free the lkb */
5204 if (!dlm_put_lkb(lkb))
5205 log_error(ls, "purged dead lkb not released");
5207 rsb_set_flag(r, RSB_RECOVER_GRANT);
5214 /* Get rid of locks held by nodes that are gone. */
5216 void dlm_recover_purge(struct dlm_ls *ls)
5219 struct dlm_member *memb;
5220 int nodes_count = 0;
5221 int nodeid_gone = 0;
5222 unsigned int lkb_count = 0;
5224 /* cache one removed nodeid to optimize the common
5225 case of a single node removed */
5227 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5229 nodeid_gone = memb->nodeid;
5235 down_write(&ls->ls_root_sem);
5236 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5240 purge_dead_list(ls, r, &r->res_grantqueue,
5241 nodeid_gone, &lkb_count);
5242 purge_dead_list(ls, r, &r->res_convertqueue,
5243 nodeid_gone, &lkb_count);
5244 purge_dead_list(ls, r, &r->res_waitqueue,
5245 nodeid_gone, &lkb_count);
5251 up_write(&ls->ls_root_sem);
5254 log_debug(ls, "dlm_recover_purge %u locks for %u nodes",
5255 lkb_count, nodes_count);
5258 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5263 spin_lock(&ls->ls_rsbtbl[bucket].lock);
5264 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5265 r = rb_entry(n, struct dlm_rsb, res_hashnode);
5267 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5269 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5273 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5276 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5281 * Attempt to grant locks on resources that we are the master of.
5282 * Locks may have become grantable during recovery because locks
5283 * from departed nodes have been purged (or not rebuilt), allowing
5284 * previously blocked locks to now be granted. The subset of rsb's
5285 * we are interested in are those with lkb's on either the convert or
5288 * Simplest would be to go through each master rsb and check for non-empty
5289 * convert or waiting queues, and attempt to grant on those rsbs.
5290 * Checking the queues requires lock_rsb, though, for which we'd need
5291 * to release the rsbtbl lock. This would make iterating through all
5292 * rsb's very inefficient. So, we rely on earlier recovery routines
5293 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5297 void dlm_recover_grant(struct dlm_ls *ls)
5301 unsigned int count = 0;
5302 unsigned int rsb_count = 0;
5303 unsigned int lkb_count = 0;
5306 r = find_grant_rsb(ls, bucket);
5308 if (bucket == ls->ls_rsbtbl_size - 1)
5316 grant_pending_locks(r, &count);
5318 confirm_master(r, 0);
5325 log_debug(ls, "dlm_recover_grant %u locks on %u resources",
5326 lkb_count, rsb_count);
5329 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5332 struct dlm_lkb *lkb;
5334 list_for_each_entry(lkb, head, lkb_statequeue) {
5335 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5341 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5344 struct dlm_lkb *lkb;
5346 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5349 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5352 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5358 /* needs at least dlm_rcom + rcom_lock */
5359 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5360 struct dlm_rsb *r, struct dlm_rcom *rc)
5362 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5364 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5365 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5366 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5367 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5368 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5369 lkb->lkb_flags |= DLM_IFL_MSTCPY;
5370 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5371 lkb->lkb_rqmode = rl->rl_rqmode;
5372 lkb->lkb_grmode = rl->rl_grmode;
5373 /* don't set lkb_status because add_lkb wants to itself */
5375 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5376 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5378 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5379 int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5380 sizeof(struct rcom_lock);
5381 if (lvblen > ls->ls_lvblen)
5383 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5384 if (!lkb->lkb_lvbptr)
5386 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5389 /* Conversions between PR and CW (middle modes) need special handling.
5390 The real granted mode of these converting locks cannot be determined
5391 until all locks have been rebuilt on the rsb (recover_conversion) */
5393 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5394 middle_conversion(lkb)) {
5395 rl->rl_status = DLM_LKSTS_CONVERT;
5396 lkb->lkb_grmode = DLM_LOCK_IV;
5397 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5403 /* This lkb may have been recovered in a previous aborted recovery so we need
5404 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5405 If so we just send back a standard reply. If not, we create a new lkb with
5406 the given values and send back our lkid. We send back our lkid by sending
5407 back the rcom_lock struct we got but with the remid field filled in. */
5409 /* needs at least dlm_rcom + rcom_lock */
5410 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5412 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5414 struct dlm_lkb *lkb;
5416 int from_nodeid = rc->rc_header.h_nodeid;
5419 if (rl->rl_parent_lkid) {
5420 error = -EOPNOTSUPP;
5424 remid = le32_to_cpu(rl->rl_lkid);
5426 /* In general we expect the rsb returned to be R_MASTER, but we don't
5427 have to require it. Recovery of masters on one node can overlap
5428 recovery of locks on another node, so one node can send us MSTCPY
5429 locks before we've made ourselves master of this rsb. We can still
5430 add new MSTCPY locks that we receive here without any harm; when
5431 we make ourselves master, dlm_recover_masters() won't touch the
5432 MSTCPY locks we've received early. */
5434 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5435 from_nodeid, R_RECEIVE_RECOVER, &r);
5441 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5442 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5443 from_nodeid, remid);
5448 lkb = search_remid(r, from_nodeid, remid);
5454 error = create_lkb(ls, &lkb);
5458 error = receive_rcom_lock_args(ls, lkb, r, rc);
5465 add_lkb(r, lkb, rl->rl_status);
5467 ls->ls_recover_locks_in++;
5469 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5470 rsb_set_flag(r, RSB_RECOVER_GRANT);
5473 /* this is the new value returned to the lock holder for
5474 saving in its process-copy lkb */
5475 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5477 lkb->lkb_recover_seq = ls->ls_recover_seq;
5483 if (error && error != -EEXIST)
5484 log_debug(ls, "dlm_recover_master_copy remote %d %x error %d",
5485 from_nodeid, remid, error);
5486 rl->rl_result = cpu_to_le32(error);
5490 /* needs at least dlm_rcom + rcom_lock */
5491 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5493 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5495 struct dlm_lkb *lkb;
5496 uint32_t lkid, remid;
5499 lkid = le32_to_cpu(rl->rl_lkid);
5500 remid = le32_to_cpu(rl->rl_remid);
5501 result = le32_to_cpu(rl->rl_result);
5503 error = find_lkb(ls, lkid, &lkb);
5505 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5506 lkid, rc->rc_header.h_nodeid, remid, result);
5510 r = lkb->lkb_resource;
5514 if (!is_process_copy(lkb)) {
5515 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5516 lkid, rc->rc_header.h_nodeid, remid, result);
5526 /* There's a chance the new master received our lock before
5527 dlm_recover_master_reply(), this wouldn't happen if we did
5528 a barrier between recover_masters and recover_locks. */
5530 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5531 lkid, rc->rc_header.h_nodeid, remid, result);
5533 dlm_send_rcom_lock(r, lkb);
5537 lkb->lkb_remid = remid;
5540 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5541 lkid, rc->rc_header.h_nodeid, remid, result);
5544 /* an ack for dlm_recover_locks() which waits for replies from
5545 all the locks it sends to new masters */
5546 dlm_recovered_lock(r);
5555 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5556 int mode, uint32_t flags, void *name, unsigned int namelen,
5557 unsigned long timeout_cs)
5559 struct dlm_lkb *lkb;
5560 struct dlm_args args;
5563 dlm_lock_recovery(ls);
5565 error = create_lkb(ls, &lkb);
5571 if (flags & DLM_LKF_VALBLK) {
5572 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5573 if (!ua->lksb.sb_lvbptr) {
5581 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5582 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5583 lock and that lkb_astparam is the dlm_user_args structure. */
5585 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5586 fake_astfn, ua, fake_bastfn, &args);
5587 lkb->lkb_flags |= DLM_IFL_USER;
5594 error = request_lock(ls, lkb, name, namelen, &args);
5610 /* add this new lkb to the per-process list of locks */
5611 spin_lock(&ua->proc->locks_spin);
5613 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5614 spin_unlock(&ua->proc->locks_spin);
5616 dlm_unlock_recovery(ls);
5620 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5621 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5622 unsigned long timeout_cs)
5624 struct dlm_lkb *lkb;
5625 struct dlm_args args;
5626 struct dlm_user_args *ua;
5629 dlm_lock_recovery(ls);
5631 error = find_lkb(ls, lkid, &lkb);
5635 /* user can change the params on its lock when it converts it, or
5636 add an lvb that didn't exist before */
5640 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5641 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5642 if (!ua->lksb.sb_lvbptr) {
5647 if (lvb_in && ua->lksb.sb_lvbptr)
5648 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5650 ua->xid = ua_tmp->xid;
5651 ua->castparam = ua_tmp->castparam;
5652 ua->castaddr = ua_tmp->castaddr;
5653 ua->bastparam = ua_tmp->bastparam;
5654 ua->bastaddr = ua_tmp->bastaddr;
5655 ua->user_lksb = ua_tmp->user_lksb;
5657 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5658 fake_astfn, ua, fake_bastfn, &args);
5662 error = convert_lock(ls, lkb, &args);
5664 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5669 dlm_unlock_recovery(ls);
5674 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5675 uint32_t flags, uint32_t lkid, char *lvb_in)
5677 struct dlm_lkb *lkb;
5678 struct dlm_args args;
5679 struct dlm_user_args *ua;
5682 dlm_lock_recovery(ls);
5684 error = find_lkb(ls, lkid, &lkb);
5690 if (lvb_in && ua->lksb.sb_lvbptr)
5691 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5692 if (ua_tmp->castparam)
5693 ua->castparam = ua_tmp->castparam;
5694 ua->user_lksb = ua_tmp->user_lksb;
5696 error = set_unlock_args(flags, ua, &args);
5700 error = unlock_lock(ls, lkb, &args);
5702 if (error == -DLM_EUNLOCK)
5704 /* from validate_unlock_args() */
5705 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
5710 spin_lock(&ua->proc->locks_spin);
5711 /* dlm_user_add_cb() may have already taken lkb off the proc list */
5712 if (!list_empty(&lkb->lkb_ownqueue))
5713 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
5714 spin_unlock(&ua->proc->locks_spin);
5718 dlm_unlock_recovery(ls);
5723 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5724 uint32_t flags, uint32_t lkid)
5726 struct dlm_lkb *lkb;
5727 struct dlm_args args;
5728 struct dlm_user_args *ua;
5731 dlm_lock_recovery(ls);
5733 error = find_lkb(ls, lkid, &lkb);
5738 if (ua_tmp->castparam)
5739 ua->castparam = ua_tmp->castparam;
5740 ua->user_lksb = ua_tmp->user_lksb;
5742 error = set_unlock_args(flags, ua, &args);
5746 error = cancel_lock(ls, lkb, &args);
5748 if (error == -DLM_ECANCEL)
5750 /* from validate_unlock_args() */
5751 if (error == -EBUSY)
5756 dlm_unlock_recovery(ls);
5761 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
5763 struct dlm_lkb *lkb;
5764 struct dlm_args args;
5765 struct dlm_user_args *ua;
5769 dlm_lock_recovery(ls);
5771 error = find_lkb(ls, lkid, &lkb);
5777 error = set_unlock_args(flags, ua, &args);
5781 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
5783 r = lkb->lkb_resource;
5787 error = validate_unlock_args(lkb, &args);
5790 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
5792 error = _cancel_lock(r, lkb);
5797 if (error == -DLM_ECANCEL)
5799 /* from validate_unlock_args() */
5800 if (error == -EBUSY)
5805 dlm_unlock_recovery(ls);
5809 /* lkb's that are removed from the waiters list by revert are just left on the
5810 orphans list with the granted orphan locks, to be freed by purge */
5812 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
5814 struct dlm_args args;
5818 mutex_lock(&ls->ls_orphans_mutex);
5819 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
5820 mutex_unlock(&ls->ls_orphans_mutex);
5822 set_unlock_args(0, lkb->lkb_ua, &args);
5824 error = cancel_lock(ls, lkb, &args);
5825 if (error == -DLM_ECANCEL)
5830 /* The force flag allows the unlock to go ahead even if the lkb isn't granted.
5831 Regardless of what rsb queue the lock is on, it's removed and freed. */
5833 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
5835 struct dlm_args args;
5838 set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args);
5840 error = unlock_lock(ls, lkb, &args);
5841 if (error == -DLM_EUNLOCK)
5846 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
5847 (which does lock_rsb) due to deadlock with receiving a message that does
5848 lock_rsb followed by dlm_user_add_cb() */
5850 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
5851 struct dlm_user_proc *proc)
5853 struct dlm_lkb *lkb = NULL;
5855 mutex_lock(&ls->ls_clear_proc_locks);
5856 if (list_empty(&proc->locks))
5859 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
5860 list_del_init(&lkb->lkb_ownqueue);
5862 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
5863 lkb->lkb_flags |= DLM_IFL_ORPHAN;
5865 lkb->lkb_flags |= DLM_IFL_DEAD;
5867 mutex_unlock(&ls->ls_clear_proc_locks);
5871 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
5872 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
5873 which we clear here. */
5875 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
5876 list, and no more device_writes should add lkb's to proc->locks list; so we
5877 shouldn't need to take asts_spin or locks_spin here. this assumes that
5878 device reads/writes/closes are serialized -- FIXME: we may need to serialize
5881 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
5883 struct dlm_lkb *lkb, *safe;
5885 dlm_lock_recovery(ls);
5888 lkb = del_proc_lock(ls, proc);
5892 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
5893 orphan_proc_lock(ls, lkb);
5895 unlock_proc_lock(ls, lkb);
5897 /* this removes the reference for the proc->locks list
5898 added by dlm_user_request, it may result in the lkb
5904 mutex_lock(&ls->ls_clear_proc_locks);
5906 /* in-progress unlocks */
5907 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
5908 list_del_init(&lkb->lkb_ownqueue);
5909 lkb->lkb_flags |= DLM_IFL_DEAD;
5913 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
5914 memset(&lkb->lkb_callbacks, 0,
5915 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
5916 list_del_init(&lkb->lkb_cb_list);
5920 mutex_unlock(&ls->ls_clear_proc_locks);
5921 dlm_unlock_recovery(ls);
5924 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
5926 struct dlm_lkb *lkb, *safe;
5930 spin_lock(&proc->locks_spin);
5931 if (!list_empty(&proc->locks)) {
5932 lkb = list_entry(proc->locks.next, struct dlm_lkb,
5934 list_del_init(&lkb->lkb_ownqueue);
5936 spin_unlock(&proc->locks_spin);
5941 lkb->lkb_flags |= DLM_IFL_DEAD;
5942 unlock_proc_lock(ls, lkb);
5943 dlm_put_lkb(lkb); /* ref from proc->locks list */
5946 spin_lock(&proc->locks_spin);
5947 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
5948 list_del_init(&lkb->lkb_ownqueue);
5949 lkb->lkb_flags |= DLM_IFL_DEAD;
5952 spin_unlock(&proc->locks_spin);
5954 spin_lock(&proc->asts_spin);
5955 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
5956 memset(&lkb->lkb_callbacks, 0,
5957 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
5958 list_del_init(&lkb->lkb_cb_list);
5961 spin_unlock(&proc->asts_spin);
5964 /* pid of 0 means purge all orphans */
5966 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
5968 struct dlm_lkb *lkb, *safe;
5970 mutex_lock(&ls->ls_orphans_mutex);
5971 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
5972 if (pid && lkb->lkb_ownpid != pid)
5974 unlock_proc_lock(ls, lkb);
5975 list_del_init(&lkb->lkb_ownqueue);
5978 mutex_unlock(&ls->ls_orphans_mutex);
5981 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
5983 struct dlm_message *ms;
5984 struct dlm_mhandle *mh;
5987 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
5988 DLM_MSG_PURGE, &ms, &mh);
5991 ms->m_nodeid = nodeid;
5994 return send_message(mh, ms);
5997 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
5998 int nodeid, int pid)
6002 if (nodeid != dlm_our_nodeid()) {
6003 error = send_purge(ls, nodeid, pid);
6005 dlm_lock_recovery(ls);
6006 if (pid == current->pid)
6007 purge_proc_locks(ls, proc);
6009 do_purge(ls, nodeid, pid);
6010 dlm_unlock_recovery(ls);