1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
6 ** This copyrighted material is made available to anyone wishing to use,
7 ** modify, copy, or redistribute it subject to the terms and conditions
8 ** of the GNU General Public License v.2.
10 *******************************************************************************
11 ******************************************************************************/
13 /* Central locking logic has four stages:
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
54 L: send_xxxx() -> R: receive_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
58 #include <linux/types.h>
59 #include <linux/rbtree.h>
60 #include <linux/slab.h>
61 #include "dlm_internal.h"
62 #include <linux/dlm_device.h>
65 #include "requestqueue.h"
69 #include "lockspace.h"
74 #include "lvb_table.h"
78 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int send_remove(struct dlm_rsb *r);
86 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
91 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92 static void del_timeout(struct dlm_lkb *lkb);
93 static void toss_rsb(struct kref *kref);
96 * Lock compatibilty matrix - thanks Steve
97 * UN = Unlocked state. Not really a state, used as a flag
98 * PD = Padding. Used to make the matrix a nice power of two in size
99 * Other states are the same as the VMS DLM.
100 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
103 static const int __dlm_compat_matrix[8][8] = {
104 /* UN NL CR CW PR PW EX PD */
105 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
106 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
107 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
108 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
109 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
110 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
111 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
112 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
116 * This defines the direction of transfer of LVB data.
117 * Granted mode is the row; requested mode is the column.
118 * Usage: matrix[grmode+1][rqmode+1]
119 * 1 = LVB is returned to the caller
120 * 0 = LVB is written to the resource
121 * -1 = nothing happens to the LVB
124 const int dlm_lvb_operations[8][8] = {
125 /* UN NL CR CW PR PW EX PD*/
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
136 #define modes_compat(gr, rq) \
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
139 int dlm_modes_compat(int mode1, int mode2)
141 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
145 * Compatibility matrix for conversions with QUECVT set.
146 * Granted mode is the row; requested mode is the column.
147 * Usage: matrix[grmode+1][rqmode+1]
150 static const int __quecvt_compat_matrix[8][8] = {
151 /* UN NL CR CW PR PW EX PD */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
153 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
154 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
155 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
156 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
157 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
158 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
159 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
162 void dlm_print_lkb(struct dlm_lkb *lkb)
164 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
165 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 (unsigned long long)lkb->lkb_recover_seq);
172 static void dlm_print_rsb(struct dlm_rsb *r)
174 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
181 void dlm_dump_rsb(struct dlm_rsb *r)
187 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 printk(KERN_ERR "rsb lookup list\n");
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
192 printk(KERN_ERR "rsb grant queue:\n");
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
195 printk(KERN_ERR "rsb convert queue:\n");
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
198 printk(KERN_ERR "rsb wait queue:\n");
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
203 /* Threads cannot use the lockspace while it's being recovered */
205 static inline void dlm_lock_recovery(struct dlm_ls *ls)
207 down_read(&ls->ls_in_recovery);
210 void dlm_unlock_recovery(struct dlm_ls *ls)
212 up_read(&ls->ls_in_recovery);
215 int dlm_lock_recovery_try(struct dlm_ls *ls)
217 return down_read_trylock(&ls->ls_in_recovery);
220 static inline int can_be_queued(struct dlm_lkb *lkb)
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
225 static inline int force_blocking_asts(struct dlm_lkb *lkb)
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
230 static inline int is_demoted(struct dlm_lkb *lkb)
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
235 static inline int is_altmode(struct dlm_lkb *lkb)
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
240 static inline int is_granted(struct dlm_lkb *lkb)
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
245 static inline int is_remote(struct dlm_rsb *r)
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 return !!r->res_nodeid;
251 static inline int is_process_copy(struct dlm_lkb *lkb)
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
256 static inline int is_master_copy(struct dlm_lkb *lkb)
258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
261 static inline int middle_conversion(struct dlm_lkb *lkb)
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
269 static inline int down_conversion(struct dlm_lkb *lkb)
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
274 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
279 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
284 static inline int is_overlap(struct dlm_lkb *lkb)
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 DLM_IFL_OVERLAP_CANCEL));
290 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
292 if (is_master_copy(lkb))
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
299 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
300 timeout caused the cancel then return -ETIMEDOUT */
301 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
302 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
306 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
307 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
311 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
314 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
317 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
320 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
322 if (is_master_copy(lkb)) {
323 send_bast(r, lkb, rqmode);
325 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
330 * Basic operations on rsb's and lkb's
333 /* This is only called to add a reference when the code already holds
334 a valid reference to the rsb, so there's no need for locking. */
336 static inline void hold_rsb(struct dlm_rsb *r)
338 kref_get(&r->res_ref);
341 void dlm_hold_rsb(struct dlm_rsb *r)
346 /* When all references to the rsb are gone it's transferred to
347 the tossed list for later disposal. */
349 static void put_rsb(struct dlm_rsb *r)
351 struct dlm_ls *ls = r->res_ls;
352 uint32_t bucket = r->res_bucket;
354 spin_lock(&ls->ls_rsbtbl[bucket].lock);
355 kref_put(&r->res_ref, toss_rsb);
356 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
359 void dlm_put_rsb(struct dlm_rsb *r)
364 static int pre_rsb_struct(struct dlm_ls *ls)
366 struct dlm_rsb *r1, *r2;
369 spin_lock(&ls->ls_new_rsb_spin);
370 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
371 spin_unlock(&ls->ls_new_rsb_spin);
374 spin_unlock(&ls->ls_new_rsb_spin);
376 r1 = dlm_allocate_rsb(ls);
377 r2 = dlm_allocate_rsb(ls);
379 spin_lock(&ls->ls_new_rsb_spin);
381 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
382 ls->ls_new_rsb_count++;
385 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
386 ls->ls_new_rsb_count++;
388 count = ls->ls_new_rsb_count;
389 spin_unlock(&ls->ls_new_rsb_spin);
396 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
397 unlock any spinlocks, go back and call pre_rsb_struct again.
398 Otherwise, take an rsb off the list and return it. */
400 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
401 struct dlm_rsb **r_ret)
406 spin_lock(&ls->ls_new_rsb_spin);
407 if (list_empty(&ls->ls_new_rsb)) {
408 count = ls->ls_new_rsb_count;
409 spin_unlock(&ls->ls_new_rsb_spin);
410 log_debug(ls, "find_rsb retry %d %d %s",
411 count, dlm_config.ci_new_rsb_count, name);
415 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
416 list_del(&r->res_hashchain);
417 /* Convert the empty list_head to a NULL rb_node for tree usage: */
418 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
419 ls->ls_new_rsb_count--;
420 spin_unlock(&ls->ls_new_rsb_spin);
424 memcpy(r->res_name, name, len);
425 mutex_init(&r->res_mutex);
427 INIT_LIST_HEAD(&r->res_lookup);
428 INIT_LIST_HEAD(&r->res_grantqueue);
429 INIT_LIST_HEAD(&r->res_convertqueue);
430 INIT_LIST_HEAD(&r->res_waitqueue);
431 INIT_LIST_HEAD(&r->res_root_list);
432 INIT_LIST_HEAD(&r->res_recover_list);
438 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
440 char maxname[DLM_RESNAME_MAXLEN];
442 memset(maxname, 0, DLM_RESNAME_MAXLEN);
443 memcpy(maxname, name, nlen);
444 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
447 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
448 struct dlm_rsb **r_ret)
450 struct rb_node *node = tree->rb_node;
455 r = rb_entry(node, struct dlm_rsb, res_hashnode);
456 rc = rsb_cmp(r, name, len);
458 node = node->rb_left;
460 node = node->rb_right;
472 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
474 struct rb_node **newn = &tree->rb_node;
475 struct rb_node *parent = NULL;
479 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
483 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
485 newn = &parent->rb_left;
487 newn = &parent->rb_right;
489 log_print("rsb_insert match");
496 rb_link_node(&rsb->res_hashnode, parent, newn);
497 rb_insert_color(&rsb->res_hashnode, tree);
502 * Find rsb in rsbtbl and potentially create/add one
504 * Delaying the release of rsb's has a similar benefit to applications keeping
505 * NL locks on an rsb, but without the guarantee that the cached master value
506 * will still be valid when the rsb is reused. Apps aren't always smart enough
507 * to keep NL locks on an rsb that they may lock again shortly; this can lead
508 * to excessive master lookups and removals if we don't delay the release.
510 * Searching for an rsb means looking through both the normal list and toss
511 * list. When found on the toss list the rsb is moved to the normal list with
512 * ref count of 1; when found on normal list the ref count is incremented.
514 * rsb's on the keep list are being used locally and refcounted.
515 * rsb's on the toss list are not being used locally, and are not refcounted.
517 * The toss list rsb's were either
518 * - previously used locally but not any more (were on keep list, then
519 * moved to toss list when last refcount dropped)
520 * - created and put on toss list as a directory record for a lookup
521 * (we are the dir node for the res, but are not using the res right now,
522 * but some other node is)
524 * The purpose of find_rsb() is to return a refcounted rsb for local use.
525 * So, if the given rsb is on the toss list, it is moved to the keep list
526 * before being returned.
528 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
529 * more refcounts exist, so the rsb is moved from the keep list to the
532 * rsb's on both keep and toss lists are used for doing a name to master
533 * lookups. rsb's that are in use locally (and being refcounted) are on
534 * the keep list, rsb's that are not in use locally (not refcounted) and
535 * only exist for name/master lookups are on the toss list.
537 * rsb's on the toss list who's dir_nodeid is not local can have stale
538 * name/master mappings. So, remote requests on such rsb's can potentially
539 * return with an error, which means the mapping is stale and needs to
540 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
541 * first_lkid is to keep only a single outstanding request on an rsb
542 * while that rsb has a potentially stale master.)
545 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
546 uint32_t hash, uint32_t b,
547 int dir_nodeid, int from_nodeid,
548 unsigned int flags, struct dlm_rsb **r_ret)
550 struct dlm_rsb *r = NULL;
551 int our_nodeid = dlm_our_nodeid();
558 if (flags & R_RECEIVE_REQUEST) {
559 if (from_nodeid == dir_nodeid)
563 } else if (flags & R_REQUEST) {
568 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
569 * from_nodeid has sent us a lock in dlm_recover_locks, believing
570 * we're the new master. Our local recovery may not have set
571 * res_master_nodeid to our_nodeid yet, so allow either. Don't
572 * create the rsb; dlm_recover_process_copy() will handle EBADR
575 * If someone sends us a request, we are the dir node, and we do
576 * not find the rsb anywhere, then recreate it. This happens if
577 * someone sends us a request after we have removed/freed an rsb
578 * from our toss list. (They sent a request instead of lookup
579 * because they are using an rsb from their toss list.)
582 if (from_local || from_dir ||
583 (from_other && (dir_nodeid == our_nodeid))) {
589 error = pre_rsb_struct(ls);
594 spin_lock(&ls->ls_rsbtbl[b].lock);
596 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
601 * rsb is active, so we can't check master_nodeid without lock_rsb.
604 kref_get(&r->res_ref);
610 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
615 * rsb found inactive (master_nodeid may be out of date unless
616 * we are the dir_nodeid or were the master) No other thread
617 * is using this rsb because it's on the toss list, so we can
618 * look at or update res_master_nodeid without lock_rsb.
621 if ((r->res_master_nodeid != our_nodeid) && from_other) {
622 /* our rsb was not master, and another node (not the dir node)
623 has sent us a request */
624 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
625 from_nodeid, r->res_master_nodeid, dir_nodeid,
631 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
632 /* don't think this should ever happen */
633 log_error(ls, "find_rsb toss from_dir %d master %d",
634 from_nodeid, r->res_master_nodeid);
636 /* fix it and go on */
637 r->res_master_nodeid = our_nodeid;
639 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
640 r->res_first_lkid = 0;
643 if (from_local && (r->res_master_nodeid != our_nodeid)) {
644 /* Because we have held no locks on this rsb,
645 res_master_nodeid could have become stale. */
646 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
647 r->res_first_lkid = 0;
650 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
651 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
660 if (error == -EBADR && !create)
663 error = get_rsb_struct(ls, name, len, &r);
664 if (error == -EAGAIN) {
665 spin_unlock(&ls->ls_rsbtbl[b].lock);
673 r->res_dir_nodeid = dir_nodeid;
674 kref_init(&r->res_ref);
677 /* want to see how often this happens */
678 log_debug(ls, "find_rsb new from_dir %d recreate %s",
679 from_nodeid, r->res_name);
680 r->res_master_nodeid = our_nodeid;
685 if (from_other && (dir_nodeid != our_nodeid)) {
686 /* should never happen */
687 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
688 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
696 log_debug(ls, "find_rsb new from_other %d dir %d %s",
697 from_nodeid, dir_nodeid, r->res_name);
700 if (dir_nodeid == our_nodeid) {
701 /* When we are the dir nodeid, we can set the master
703 r->res_master_nodeid = our_nodeid;
706 /* set_master will send_lookup to dir_nodeid */
707 r->res_master_nodeid = 0;
712 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
714 spin_unlock(&ls->ls_rsbtbl[b].lock);
720 /* During recovery, other nodes can send us new MSTCPY locks (from
721 dlm_recover_locks) before we've made ourself master (in
722 dlm_recover_masters). */
724 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
725 uint32_t hash, uint32_t b,
726 int dir_nodeid, int from_nodeid,
727 unsigned int flags, struct dlm_rsb **r_ret)
729 struct dlm_rsb *r = NULL;
730 int our_nodeid = dlm_our_nodeid();
731 int recover = (flags & R_RECEIVE_RECOVER);
735 error = pre_rsb_struct(ls);
739 spin_lock(&ls->ls_rsbtbl[b].lock);
741 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
746 * rsb is active, so we can't check master_nodeid without lock_rsb.
749 kref_get(&r->res_ref);
754 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
759 * rsb found inactive. No other thread is using this rsb because
760 * it's on the toss list, so we can look at or update
761 * res_master_nodeid without lock_rsb.
764 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
765 /* our rsb is not master, and another node has sent us a
766 request; this should never happen */
767 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
768 from_nodeid, r->res_master_nodeid, dir_nodeid);
774 if (!recover && (r->res_master_nodeid != our_nodeid) &&
775 (dir_nodeid == our_nodeid)) {
776 /* our rsb is not master, and we are dir; may as well fix it;
777 this should never happen */
778 log_error(ls, "find_rsb toss our %d master %d dir %d",
779 our_nodeid, r->res_master_nodeid, dir_nodeid);
781 r->res_master_nodeid = our_nodeid;
785 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
786 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
795 error = get_rsb_struct(ls, name, len, &r);
796 if (error == -EAGAIN) {
797 spin_unlock(&ls->ls_rsbtbl[b].lock);
805 r->res_dir_nodeid = dir_nodeid;
806 r->res_master_nodeid = dir_nodeid;
807 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
808 kref_init(&r->res_ref);
810 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
812 spin_unlock(&ls->ls_rsbtbl[b].lock);
818 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
819 unsigned int flags, struct dlm_rsb **r_ret)
824 if (len > DLM_RESNAME_MAXLEN)
827 hash = jhash(name, len, 0);
828 b = hash & (ls->ls_rsbtbl_size - 1);
830 dir_nodeid = dlm_hash2nodeid(ls, hash);
832 if (dlm_no_directory(ls))
833 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
834 from_nodeid, flags, r_ret);
836 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
837 from_nodeid, flags, r_ret);
840 /* we have received a request and found that res_master_nodeid != our_nodeid,
841 so we need to return an error or make ourself the master */
843 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
846 if (dlm_no_directory(ls)) {
847 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
848 from_nodeid, r->res_master_nodeid,
854 if (from_nodeid != r->res_dir_nodeid) {
855 /* our rsb is not master, and another node (not the dir node)
856 has sent us a request. this is much more common when our
857 master_nodeid is zero, so limit debug to non-zero. */
859 if (r->res_master_nodeid) {
860 log_debug(ls, "validate master from_other %d master %d "
861 "dir %d first %x %s", from_nodeid,
862 r->res_master_nodeid, r->res_dir_nodeid,
863 r->res_first_lkid, r->res_name);
867 /* our rsb is not master, but the dir nodeid has sent us a
868 request; this could happen with master 0 / res_nodeid -1 */
870 if (r->res_master_nodeid) {
871 log_error(ls, "validate master from_dir %d master %d "
873 from_nodeid, r->res_master_nodeid,
874 r->res_first_lkid, r->res_name);
877 r->res_master_nodeid = dlm_our_nodeid();
884 * We're the dir node for this res and another node wants to know the
885 * master nodeid. During normal operation (non recovery) this is only
886 * called from receive_lookup(); master lookups when the local node is
887 * the dir node are done by find_rsb().
889 * normal operation, we are the dir node for a resource
894 * . dlm_master_lookup flags 0
896 * recover directory, we are rebuilding dir for all resources
897 * . dlm_recover_directory
899 * remote node sends back the rsb names it is master of and we are dir of
900 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
901 * we either create new rsb setting remote node as master, or find existing
902 * rsb and set master to be the remote node.
904 * recover masters, we are finding the new master for resources
905 * . dlm_recover_masters
907 * . dlm_send_rcom_lookup
908 * . receive_rcom_lookup
909 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
912 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
913 unsigned int flags, int *r_nodeid, int *result)
915 struct dlm_rsb *r = NULL;
917 int from_master = (flags & DLM_LU_RECOVER_DIR);
918 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
919 int our_nodeid = dlm_our_nodeid();
920 int dir_nodeid, error, toss_list = 0;
922 if (len > DLM_RESNAME_MAXLEN)
925 if (from_nodeid == our_nodeid) {
926 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
931 hash = jhash(name, len, 0);
932 b = hash & (ls->ls_rsbtbl_size - 1);
934 dir_nodeid = dlm_hash2nodeid(ls, hash);
935 if (dir_nodeid != our_nodeid) {
936 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
937 from_nodeid, dir_nodeid, our_nodeid, hash,
944 error = pre_rsb_struct(ls);
948 spin_lock(&ls->ls_rsbtbl[b].lock);
949 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
951 /* because the rsb is active, we need to lock_rsb before
952 checking/changing re_master_nodeid */
955 spin_unlock(&ls->ls_rsbtbl[b].lock);
960 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
964 /* because the rsb is inactive (on toss list), it's not refcounted
965 and lock_rsb is not used, but is protected by the rsbtbl lock */
969 if (r->res_dir_nodeid != our_nodeid) {
970 /* should not happen, but may as well fix it and carry on */
971 log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
972 r->res_dir_nodeid, our_nodeid, r->res_name);
973 r->res_dir_nodeid = our_nodeid;
976 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
977 /* Recovery uses this function to set a new master when
978 the previous master failed. Setting NEW_MASTER will
979 force dlm_recover_masters to call recover_master on this
980 rsb even though the res_nodeid is no longer removed. */
982 r->res_master_nodeid = from_nodeid;
983 r->res_nodeid = from_nodeid;
984 rsb_set_flag(r, RSB_NEW_MASTER);
987 /* I don't think we should ever find it on toss list. */
988 log_error(ls, "dlm_master_lookup fix_master on toss");
993 if (from_master && (r->res_master_nodeid != from_nodeid)) {
994 /* this will happen if from_nodeid became master during
995 a previous recovery cycle, and we aborted the previous
996 cycle before recovering this master value */
998 log_limit(ls, "dlm_master_lookup from_master %d "
999 "master_nodeid %d res_nodeid %d first %x %s",
1000 from_nodeid, r->res_master_nodeid, r->res_nodeid,
1001 r->res_first_lkid, r->res_name);
1003 if (r->res_master_nodeid == our_nodeid) {
1004 log_error(ls, "from_master %d our_master", from_nodeid);
1009 r->res_master_nodeid = from_nodeid;
1010 r->res_nodeid = from_nodeid;
1011 rsb_set_flag(r, RSB_NEW_MASTER);
1014 if (!r->res_master_nodeid) {
1015 /* this will happen if recovery happens while we're looking
1016 up the master for this rsb */
1018 log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1019 from_nodeid, r->res_first_lkid, r->res_name);
1020 r->res_master_nodeid = from_nodeid;
1021 r->res_nodeid = from_nodeid;
1024 if (!from_master && !fix_master &&
1025 (r->res_master_nodeid == from_nodeid)) {
1026 /* this can happen when the master sends remove, the dir node
1027 finds the rsb on the keep list and ignores the remove,
1028 and the former master sends a lookup */
1030 log_limit(ls, "dlm_master_lookup from master %d flags %x "
1031 "first %x %s", from_nodeid, flags,
1032 r->res_first_lkid, r->res_name);
1036 *r_nodeid = r->res_master_nodeid;
1038 *result = DLM_LU_MATCH;
1041 r->res_toss_time = jiffies;
1042 /* the rsb was inactive (on toss list) */
1043 spin_unlock(&ls->ls_rsbtbl[b].lock);
1045 /* the rsb was active */
1052 error = get_rsb_struct(ls, name, len, &r);
1053 if (error == -EAGAIN) {
1054 spin_unlock(&ls->ls_rsbtbl[b].lock);
1062 r->res_dir_nodeid = our_nodeid;
1063 r->res_master_nodeid = from_nodeid;
1064 r->res_nodeid = from_nodeid;
1065 kref_init(&r->res_ref);
1066 r->res_toss_time = jiffies;
1068 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1070 /* should never happen */
1072 spin_unlock(&ls->ls_rsbtbl[b].lock);
1077 *result = DLM_LU_ADD;
1078 *r_nodeid = from_nodeid;
1081 spin_unlock(&ls->ls_rsbtbl[b].lock);
1085 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1091 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1092 spin_lock(&ls->ls_rsbtbl[i].lock);
1093 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1094 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1095 if (r->res_hash == hash)
1098 spin_unlock(&ls->ls_rsbtbl[i].lock);
1102 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1104 struct dlm_rsb *r = NULL;
1108 hash = jhash(name, len, 0);
1109 b = hash & (ls->ls_rsbtbl_size - 1);
1111 spin_lock(&ls->ls_rsbtbl[b].lock);
1112 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1116 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1122 spin_unlock(&ls->ls_rsbtbl[b].lock);
1125 static void toss_rsb(struct kref *kref)
1127 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1128 struct dlm_ls *ls = r->res_ls;
1130 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1131 kref_init(&r->res_ref);
1132 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1133 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1134 r->res_toss_time = jiffies;
1135 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1136 if (r->res_lvbptr) {
1137 dlm_free_lvb(r->res_lvbptr);
1138 r->res_lvbptr = NULL;
1142 /* See comment for unhold_lkb */
1144 static void unhold_rsb(struct dlm_rsb *r)
1147 rv = kref_put(&r->res_ref, toss_rsb);
1148 DLM_ASSERT(!rv, dlm_dump_rsb(r););
1151 static void kill_rsb(struct kref *kref)
1153 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1155 /* All work is done after the return from kref_put() so we
1156 can release the write_lock before the remove and free. */
1158 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1159 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1160 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1161 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1162 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1163 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1166 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1167 The rsb must exist as long as any lkb's for it do. */
1169 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1172 lkb->lkb_resource = r;
1175 static void detach_lkb(struct dlm_lkb *lkb)
1177 if (lkb->lkb_resource) {
1178 put_rsb(lkb->lkb_resource);
1179 lkb->lkb_resource = NULL;
1183 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1185 struct dlm_lkb *lkb;
1188 lkb = dlm_allocate_lkb(ls);
1192 lkb->lkb_nodeid = -1;
1193 lkb->lkb_grmode = DLM_LOCK_IV;
1194 kref_init(&lkb->lkb_ref);
1195 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1196 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1197 INIT_LIST_HEAD(&lkb->lkb_time_list);
1198 INIT_LIST_HEAD(&lkb->lkb_cb_list);
1199 mutex_init(&lkb->lkb_cb_mutex);
1200 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1202 idr_preload(GFP_NOFS);
1203 spin_lock(&ls->ls_lkbidr_spin);
1204 rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1207 spin_unlock(&ls->ls_lkbidr_spin);
1211 log_error(ls, "create_lkb idr error %d", rv);
1220 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1222 struct dlm_lkb *lkb;
1224 spin_lock(&ls->ls_lkbidr_spin);
1225 lkb = idr_find(&ls->ls_lkbidr, lkid);
1227 kref_get(&lkb->lkb_ref);
1228 spin_unlock(&ls->ls_lkbidr_spin);
1231 return lkb ? 0 : -ENOENT;
1234 static void kill_lkb(struct kref *kref)
1236 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1238 /* All work is done after the return from kref_put() so we
1239 can release the write_lock before the detach_lkb */
1241 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1244 /* __put_lkb() is used when an lkb may not have an rsb attached to
1245 it so we need to provide the lockspace explicitly */
1247 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1249 uint32_t lkid = lkb->lkb_id;
1251 spin_lock(&ls->ls_lkbidr_spin);
1252 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1253 idr_remove(&ls->ls_lkbidr, lkid);
1254 spin_unlock(&ls->ls_lkbidr_spin);
1258 /* for local/process lkbs, lvbptr points to caller's lksb */
1259 if (lkb->lkb_lvbptr && is_master_copy(lkb))
1260 dlm_free_lvb(lkb->lkb_lvbptr);
1264 spin_unlock(&ls->ls_lkbidr_spin);
1269 int dlm_put_lkb(struct dlm_lkb *lkb)
1273 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1274 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1276 ls = lkb->lkb_resource->res_ls;
1277 return __put_lkb(ls, lkb);
1280 /* This is only called to add a reference when the code already holds
1281 a valid reference to the lkb, so there's no need for locking. */
1283 static inline void hold_lkb(struct dlm_lkb *lkb)
1285 kref_get(&lkb->lkb_ref);
1288 /* This is called when we need to remove a reference and are certain
1289 it's not the last ref. e.g. del_lkb is always called between a
1290 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1291 put_lkb would work fine, but would involve unnecessary locking */
1293 static inline void unhold_lkb(struct dlm_lkb *lkb)
1296 rv = kref_put(&lkb->lkb_ref, kill_lkb);
1297 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1300 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1303 struct dlm_lkb *lkb = NULL;
1305 list_for_each_entry(lkb, head, lkb_statequeue)
1306 if (lkb->lkb_rqmode < mode)
1309 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1312 /* add/remove lkb to rsb's grant/convert/wait queue */
1314 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1316 kref_get(&lkb->lkb_ref);
1318 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1320 lkb->lkb_timestamp = ktime_get();
1322 lkb->lkb_status = status;
1325 case DLM_LKSTS_WAITING:
1326 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1327 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1329 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1331 case DLM_LKSTS_GRANTED:
1332 /* convention says granted locks kept in order of grmode */
1333 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1336 case DLM_LKSTS_CONVERT:
1337 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1338 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1340 list_add_tail(&lkb->lkb_statequeue,
1341 &r->res_convertqueue);
1344 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1348 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1350 lkb->lkb_status = 0;
1351 list_del(&lkb->lkb_statequeue);
1355 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1359 add_lkb(r, lkb, sts);
1363 static int msg_reply_type(int mstype)
1366 case DLM_MSG_REQUEST:
1367 return DLM_MSG_REQUEST_REPLY;
1368 case DLM_MSG_CONVERT:
1369 return DLM_MSG_CONVERT_REPLY;
1370 case DLM_MSG_UNLOCK:
1371 return DLM_MSG_UNLOCK_REPLY;
1372 case DLM_MSG_CANCEL:
1373 return DLM_MSG_CANCEL_REPLY;
1374 case DLM_MSG_LOOKUP:
1375 return DLM_MSG_LOOKUP_REPLY;
1380 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1384 for (i = 0; i < num_nodes; i++) {
1389 if (warned[i] == nodeid)
1395 void dlm_scan_waiters(struct dlm_ls *ls)
1397 struct dlm_lkb *lkb;
1399 s64 debug_maxus = 0;
1400 u32 debug_scanned = 0;
1401 u32 debug_expired = 0;
1405 if (!dlm_config.ci_waitwarn_us)
1408 mutex_lock(&ls->ls_waiters_mutex);
1410 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1411 if (!lkb->lkb_wait_time)
1416 us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1418 if (us < dlm_config.ci_waitwarn_us)
1421 lkb->lkb_wait_time = 0;
1424 if (us > debug_maxus)
1428 num_nodes = ls->ls_num_nodes;
1429 warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
1433 if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1436 log_error(ls, "waitwarn %x %lld %d us check connection to "
1437 "node %d", lkb->lkb_id, (long long)us,
1438 dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1440 mutex_unlock(&ls->ls_waiters_mutex);
1444 log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1445 debug_scanned, debug_expired,
1446 dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1449 /* add/remove lkb from global waiters list of lkb's waiting for
1450 a reply from a remote node */
1452 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1454 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1457 mutex_lock(&ls->ls_waiters_mutex);
1459 if (is_overlap_unlock(lkb) ||
1460 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1465 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1467 case DLM_MSG_UNLOCK:
1468 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1470 case DLM_MSG_CANCEL:
1471 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1477 lkb->lkb_wait_count++;
1480 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1481 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1482 lkb->lkb_wait_count, lkb->lkb_flags);
1486 DLM_ASSERT(!lkb->lkb_wait_count,
1488 printk("wait_count %d\n", lkb->lkb_wait_count););
1490 lkb->lkb_wait_count++;
1491 lkb->lkb_wait_type = mstype;
1492 lkb->lkb_wait_time = ktime_get();
1493 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1495 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1498 log_error(ls, "addwait error %x %d flags %x %d %d %s",
1499 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1500 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1501 mutex_unlock(&ls->ls_waiters_mutex);
1505 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1506 list as part of process_requestqueue (e.g. a lookup that has an optimized
1507 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1508 set RESEND and dlm_recover_waiters_post() */
1510 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1511 struct dlm_message *ms)
1513 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1514 int overlap_done = 0;
1516 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1517 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1518 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1523 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1524 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1525 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1530 /* Cancel state was preemptively cleared by a successful convert,
1531 see next comment, nothing to do. */
1533 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1534 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1535 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1536 lkb->lkb_id, lkb->lkb_wait_type);
1540 /* Remove for the convert reply, and premptively remove for the
1541 cancel reply. A convert has been granted while there's still
1542 an outstanding cancel on it (the cancel is moot and the result
1543 in the cancel reply should be 0). We preempt the cancel reply
1544 because the app gets the convert result and then can follow up
1545 with another op, like convert. This subsequent op would see the
1546 lingering state of the cancel and fail with -EBUSY. */
1548 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1549 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1550 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1551 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1553 lkb->lkb_wait_type = 0;
1554 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1555 lkb->lkb_wait_count--;
1560 /* N.B. type of reply may not always correspond to type of original
1561 msg due to lookup->request optimization, verify others? */
1563 if (lkb->lkb_wait_type) {
1564 lkb->lkb_wait_type = 0;
1568 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1569 lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1570 mstype, lkb->lkb_flags);
1574 /* the force-unlock/cancel has completed and we haven't recvd a reply
1575 to the op that was in progress prior to the unlock/cancel; we
1576 give up on any reply to the earlier op. FIXME: not sure when/how
1577 this would happen */
1579 if (overlap_done && lkb->lkb_wait_type) {
1580 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1581 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1582 lkb->lkb_wait_count--;
1584 lkb->lkb_wait_type = 0;
1587 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1589 lkb->lkb_flags &= ~DLM_IFL_RESEND;
1590 lkb->lkb_wait_count--;
1591 if (!lkb->lkb_wait_count)
1592 list_del_init(&lkb->lkb_wait_reply);
1597 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1599 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1602 mutex_lock(&ls->ls_waiters_mutex);
1603 error = _remove_from_waiters(lkb, mstype, NULL);
1604 mutex_unlock(&ls->ls_waiters_mutex);
1608 /* Handles situations where we might be processing a "fake" or "stub" reply in
1609 which we can't try to take waiters_mutex again. */
1611 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1613 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1616 if (ms->m_flags != DLM_IFL_STUB_MS)
1617 mutex_lock(&ls->ls_waiters_mutex);
1618 error = _remove_from_waiters(lkb, ms->m_type, ms);
1619 if (ms->m_flags != DLM_IFL_STUB_MS)
1620 mutex_unlock(&ls->ls_waiters_mutex);
1624 /* If there's an rsb for the same resource being removed, ensure
1625 that the remove message is sent before the new lookup message.
1626 It should be rare to need a delay here, but if not, then it may
1627 be worthwhile to add a proper wait mechanism rather than a delay. */
1629 static void wait_pending_remove(struct dlm_rsb *r)
1631 struct dlm_ls *ls = r->res_ls;
1633 spin_lock(&ls->ls_remove_spin);
1634 if (ls->ls_remove_len &&
1635 !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1636 log_debug(ls, "delay lookup for remove dir %d %s",
1637 r->res_dir_nodeid, r->res_name);
1638 spin_unlock(&ls->ls_remove_spin);
1642 spin_unlock(&ls->ls_remove_spin);
1646 * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1647 * read by other threads in wait_pending_remove. ls_remove_names
1648 * and ls_remove_lens are only used by the scan thread, so they do
1649 * not need protection.
1652 static void shrink_bucket(struct dlm_ls *ls, int b)
1654 struct rb_node *n, *next;
1657 int our_nodeid = dlm_our_nodeid();
1658 int remote_count = 0;
1659 int need_shrink = 0;
1662 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1664 spin_lock(&ls->ls_rsbtbl[b].lock);
1666 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1667 spin_unlock(&ls->ls_rsbtbl[b].lock);
1671 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1673 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1675 /* If we're the directory record for this rsb, and
1676 we're not the master of it, then we need to wait
1677 for the master node to send us a dir remove for
1678 before removing the dir record. */
1680 if (!dlm_no_directory(ls) &&
1681 (r->res_master_nodeid != our_nodeid) &&
1682 (dlm_dir_nodeid(r) == our_nodeid)) {
1688 if (!time_after_eq(jiffies, r->res_toss_time +
1689 dlm_config.ci_toss_secs * HZ)) {
1693 if (!dlm_no_directory(ls) &&
1694 (r->res_master_nodeid == our_nodeid) &&
1695 (dlm_dir_nodeid(r) != our_nodeid)) {
1697 /* We're the master of this rsb but we're not
1698 the directory record, so we need to tell the
1699 dir node to remove the dir record. */
1701 ls->ls_remove_lens[remote_count] = r->res_length;
1702 memcpy(ls->ls_remove_names[remote_count], r->res_name,
1703 DLM_RESNAME_MAXLEN);
1706 if (remote_count >= DLM_REMOVE_NAMES_MAX)
1711 if (!kref_put(&r->res_ref, kill_rsb)) {
1712 log_error(ls, "tossed rsb in use %s", r->res_name);
1716 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1721 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1723 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1724 spin_unlock(&ls->ls_rsbtbl[b].lock);
1727 * While searching for rsb's to free, we found some that require
1728 * remote removal. We leave them in place and find them again here
1729 * so there is a very small gap between removing them from the toss
1730 * list and sending the removal. Keeping this gap small is
1731 * important to keep us (the master node) from being out of sync
1732 * with the remote dir node for very long.
1734 * From the time the rsb is removed from toss until just after
1735 * send_remove, the rsb name is saved in ls_remove_name. A new
1736 * lookup checks this to ensure that a new lookup message for the
1737 * same resource name is not sent just before the remove message.
1740 for (i = 0; i < remote_count; i++) {
1741 name = ls->ls_remove_names[i];
1742 len = ls->ls_remove_lens[i];
1744 spin_lock(&ls->ls_rsbtbl[b].lock);
1745 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1747 spin_unlock(&ls->ls_rsbtbl[b].lock);
1748 log_debug(ls, "remove_name not toss %s", name);
1752 if (r->res_master_nodeid != our_nodeid) {
1753 spin_unlock(&ls->ls_rsbtbl[b].lock);
1754 log_debug(ls, "remove_name master %d dir %d our %d %s",
1755 r->res_master_nodeid, r->res_dir_nodeid,
1760 if (r->res_dir_nodeid == our_nodeid) {
1761 /* should never happen */
1762 spin_unlock(&ls->ls_rsbtbl[b].lock);
1763 log_error(ls, "remove_name dir %d master %d our %d %s",
1764 r->res_dir_nodeid, r->res_master_nodeid,
1769 if (!time_after_eq(jiffies, r->res_toss_time +
1770 dlm_config.ci_toss_secs * HZ)) {
1771 spin_unlock(&ls->ls_rsbtbl[b].lock);
1772 log_debug(ls, "remove_name toss_time %lu now %lu %s",
1773 r->res_toss_time, jiffies, name);
1777 if (!kref_put(&r->res_ref, kill_rsb)) {
1778 spin_unlock(&ls->ls_rsbtbl[b].lock);
1779 log_error(ls, "remove_name in use %s", name);
1783 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1785 /* block lookup of same name until we've sent remove */
1786 spin_lock(&ls->ls_remove_spin);
1787 ls->ls_remove_len = len;
1788 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1789 spin_unlock(&ls->ls_remove_spin);
1790 spin_unlock(&ls->ls_rsbtbl[b].lock);
1794 /* allow lookup of name again */
1795 spin_lock(&ls->ls_remove_spin);
1796 ls->ls_remove_len = 0;
1797 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1798 spin_unlock(&ls->ls_remove_spin);
1804 void dlm_scan_rsbs(struct dlm_ls *ls)
1808 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1809 shrink_bucket(ls, i);
1810 if (dlm_locking_stopped(ls))
1816 static void add_timeout(struct dlm_lkb *lkb)
1818 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1820 if (is_master_copy(lkb))
1823 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1824 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1825 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1828 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1833 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1834 mutex_lock(&ls->ls_timeout_mutex);
1836 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1837 mutex_unlock(&ls->ls_timeout_mutex);
1840 static void del_timeout(struct dlm_lkb *lkb)
1842 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1844 mutex_lock(&ls->ls_timeout_mutex);
1845 if (!list_empty(&lkb->lkb_time_list)) {
1846 list_del_init(&lkb->lkb_time_list);
1849 mutex_unlock(&ls->ls_timeout_mutex);
1852 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1853 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1854 and then lock rsb because of lock ordering in add_timeout. We may need
1855 to specify some special timeout-related bits in the lkb that are just to
1856 be accessed under the timeout_mutex. */
1858 void dlm_scan_timeout(struct dlm_ls *ls)
1861 struct dlm_lkb *lkb;
1862 int do_cancel, do_warn;
1866 if (dlm_locking_stopped(ls))
1871 mutex_lock(&ls->ls_timeout_mutex);
1872 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1874 wait_us = ktime_to_us(ktime_sub(ktime_get(),
1875 lkb->lkb_timestamp));
1877 if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1878 wait_us >= (lkb->lkb_timeout_cs * 10000))
1881 if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1882 wait_us >= dlm_config.ci_timewarn_cs * 10000)
1885 if (!do_cancel && !do_warn)
1890 mutex_unlock(&ls->ls_timeout_mutex);
1892 if (!do_cancel && !do_warn)
1895 r = lkb->lkb_resource;
1900 /* clear flag so we only warn once */
1901 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1902 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1904 dlm_timeout_warn(lkb);
1908 log_debug(ls, "timeout cancel %x node %d %s",
1909 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1910 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1911 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1913 _cancel_lock(r, lkb);
1922 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1923 dlm_recoverd before checking/setting ls_recover_begin. */
1925 void dlm_adjust_timeouts(struct dlm_ls *ls)
1927 struct dlm_lkb *lkb;
1928 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1930 ls->ls_recover_begin = 0;
1931 mutex_lock(&ls->ls_timeout_mutex);
1932 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1933 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1934 mutex_unlock(&ls->ls_timeout_mutex);
1936 if (!dlm_config.ci_waitwarn_us)
1939 mutex_lock(&ls->ls_waiters_mutex);
1940 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1941 if (ktime_to_us(lkb->lkb_wait_time))
1942 lkb->lkb_wait_time = ktime_get();
1944 mutex_unlock(&ls->ls_waiters_mutex);
1947 /* lkb is master or local copy */
1949 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1951 int b, len = r->res_ls->ls_lvblen;
1953 /* b=1 lvb returned to caller
1954 b=0 lvb written to rsb or invalidated
1957 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1960 if (!lkb->lkb_lvbptr)
1963 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1969 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1970 lkb->lkb_lvbseq = r->res_lvbseq;
1972 } else if (b == 0) {
1973 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1974 rsb_set_flag(r, RSB_VALNOTVALID);
1978 if (!lkb->lkb_lvbptr)
1981 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1985 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1990 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1992 lkb->lkb_lvbseq = r->res_lvbseq;
1993 rsb_clear_flag(r, RSB_VALNOTVALID);
1996 if (rsb_flag(r, RSB_VALNOTVALID))
1997 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
2000 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2002 if (lkb->lkb_grmode < DLM_LOCK_PW)
2005 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2006 rsb_set_flag(r, RSB_VALNOTVALID);
2010 if (!lkb->lkb_lvbptr)
2013 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2017 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2022 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2024 rsb_clear_flag(r, RSB_VALNOTVALID);
2027 /* lkb is process copy (pc) */
2029 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2030 struct dlm_message *ms)
2034 if (!lkb->lkb_lvbptr)
2037 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2040 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2042 int len = receive_extralen(ms);
2043 if (len > r->res_ls->ls_lvblen)
2044 len = r->res_ls->ls_lvblen;
2045 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2046 lkb->lkb_lvbseq = ms->m_lvbseq;
2050 /* Manipulate lkb's on rsb's convert/granted/waiting queues
2051 remove_lock -- used for unlock, removes lkb from granted
2052 revert_lock -- used for cancel, moves lkb from convert to granted
2053 grant_lock -- used for request and convert, adds lkb to granted or
2054 moves lkb from convert or waiting to granted
2056 Each of these is used for master or local copy lkb's. There is
2057 also a _pc() variation used to make the corresponding change on
2058 a process copy (pc) lkb. */
2060 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2063 lkb->lkb_grmode = DLM_LOCK_IV;
2064 /* this unhold undoes the original ref from create_lkb()
2065 so this leads to the lkb being freed */
2069 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2071 set_lvb_unlock(r, lkb);
2072 _remove_lock(r, lkb);
2075 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2077 _remove_lock(r, lkb);
2080 /* returns: 0 did nothing
2081 1 moved lock to granted
2084 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2088 lkb->lkb_rqmode = DLM_LOCK_IV;
2090 switch (lkb->lkb_status) {
2091 case DLM_LKSTS_GRANTED:
2093 case DLM_LKSTS_CONVERT:
2094 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2097 case DLM_LKSTS_WAITING:
2099 lkb->lkb_grmode = DLM_LOCK_IV;
2100 /* this unhold undoes the original ref from create_lkb()
2101 so this leads to the lkb being freed */
2106 log_print("invalid status for revert %d", lkb->lkb_status);
2111 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2113 return revert_lock(r, lkb);
2116 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2118 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2119 lkb->lkb_grmode = lkb->lkb_rqmode;
2120 if (lkb->lkb_status)
2121 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2123 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2126 lkb->lkb_rqmode = DLM_LOCK_IV;
2127 lkb->lkb_highbast = 0;
2130 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2132 set_lvb_lock(r, lkb);
2133 _grant_lock(r, lkb);
2136 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2137 struct dlm_message *ms)
2139 set_lvb_lock_pc(r, lkb, ms);
2140 _grant_lock(r, lkb);
2143 /* called by grant_pending_locks() which means an async grant message must
2144 be sent to the requesting node in addition to granting the lock if the
2145 lkb belongs to a remote node. */
2147 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2150 if (is_master_copy(lkb))
2153 queue_cast(r, lkb, 0);
2156 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2157 change the granted/requested modes. We're munging things accordingly in
2159 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2161 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2162 compatible with other granted locks */
2164 static void munge_demoted(struct dlm_lkb *lkb)
2166 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2167 log_print("munge_demoted %x invalid modes gr %d rq %d",
2168 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2172 lkb->lkb_grmode = DLM_LOCK_NL;
2175 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2177 if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2178 ms->m_type != DLM_MSG_GRANT) {
2179 log_print("munge_altmode %x invalid reply type %d",
2180 lkb->lkb_id, ms->m_type);
2184 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2185 lkb->lkb_rqmode = DLM_LOCK_PR;
2186 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2187 lkb->lkb_rqmode = DLM_LOCK_CW;
2189 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2194 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2196 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2198 if (lkb->lkb_id == first->lkb_id)
2204 /* Check if the given lkb conflicts with another lkb on the queue. */
2206 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2208 struct dlm_lkb *this;
2210 list_for_each_entry(this, head, lkb_statequeue) {
2213 if (!modes_compat(this, lkb))
2220 * "A conversion deadlock arises with a pair of lock requests in the converting
2221 * queue for one resource. The granted mode of each lock blocks the requested
2222 * mode of the other lock."
2224 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2225 * convert queue from being granted, then deadlk/demote lkb.
2228 * Granted Queue: empty
2229 * Convert Queue: NL->EX (first lock)
2230 * PR->EX (second lock)
2232 * The first lock can't be granted because of the granted mode of the second
2233 * lock and the second lock can't be granted because it's not first in the
2234 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2235 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2236 * flag set and return DEMOTED in the lksb flags.
2238 * Originally, this function detected conv-deadlk in a more limited scope:
2239 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2240 * - if lkb1 was the first entry in the queue (not just earlier), and was
2241 * blocked by the granted mode of lkb2, and there was nothing on the
2242 * granted queue preventing lkb1 from being granted immediately, i.e.
2243 * lkb2 was the only thing preventing lkb1 from being granted.
2245 * That second condition meant we'd only say there was conv-deadlk if
2246 * resolving it (by demotion) would lead to the first lock on the convert
2247 * queue being granted right away. It allowed conversion deadlocks to exist
2248 * between locks on the convert queue while they couldn't be granted anyway.
2250 * Now, we detect and take action on conversion deadlocks immediately when
2251 * they're created, even if they may not be immediately consequential. If
2252 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2253 * mode that would prevent lkb1's conversion from being granted, we do a
2254 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2255 * I think this means that the lkb_is_ahead condition below should always
2256 * be zero, i.e. there will never be conv-deadlk between two locks that are
2257 * both already on the convert queue.
2260 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2262 struct dlm_lkb *lkb1;
2263 int lkb_is_ahead = 0;
2265 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2271 if (!lkb_is_ahead) {
2272 if (!modes_compat(lkb2, lkb1))
2275 if (!modes_compat(lkb2, lkb1) &&
2276 !modes_compat(lkb1, lkb2))
2284 * Return 1 if the lock can be granted, 0 otherwise.
2285 * Also detect and resolve conversion deadlocks.
2287 * lkb is the lock to be granted
2289 * now is 1 if the function is being called in the context of the
2290 * immediate request, it is 0 if called later, after the lock has been
2293 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2296 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2299 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2302 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2305 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2306 * a new request for a NL mode lock being blocked.
2308 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2309 * request, then it would be granted. In essence, the use of this flag
2310 * tells the Lock Manager to expedite theis request by not considering
2311 * what may be in the CONVERTING or WAITING queues... As of this
2312 * writing, the EXPEDITE flag can be used only with new requests for NL
2313 * mode locks. This flag is not valid for conversion requests.
2315 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2316 * conversion or used with a non-NL requested mode. We also know an
2317 * EXPEDITE request is always granted immediately, so now must always
2318 * be 1. The full condition to grant an expedite request: (now &&
2319 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2320 * therefore be shortened to just checking the flag.
2323 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2327 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2328 * added to the remaining conditions.
2331 if (queue_conflict(&r->res_grantqueue, lkb))
2335 * 6-3: By default, a conversion request is immediately granted if the
2336 * requested mode is compatible with the modes of all other granted
2340 if (queue_conflict(&r->res_convertqueue, lkb))
2344 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2345 * locks for a recovered rsb, on which lkb's have been rebuilt.
2346 * The lkb's may have been rebuilt on the queues in a different
2347 * order than they were in on the previous master. So, granting
2348 * queued conversions in order after recovery doesn't make sense
2349 * since the order hasn't been preserved anyway. The new order
2350 * could also have created a new "in place" conversion deadlock.
2351 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2352 * After recovery, there would be no granted locks, and possibly
2353 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
2354 * recovery, grant conversions without considering order.
2357 if (conv && recover)
2361 * 6-5: But the default algorithm for deciding whether to grant or
2362 * queue conversion requests does not by itself guarantee that such
2363 * requests are serviced on a "first come first serve" basis. This, in
2364 * turn, can lead to a phenomenon known as "indefinate postponement".
2366 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2367 * the system service employed to request a lock conversion. This flag
2368 * forces certain conversion requests to be queued, even if they are
2369 * compatible with the granted modes of other locks on the same
2370 * resource. Thus, the use of this flag results in conversion requests
2371 * being ordered on a "first come first servce" basis.
2373 * DCT: This condition is all about new conversions being able to occur
2374 * "in place" while the lock remains on the granted queue (assuming
2375 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2376 * doesn't _have_ to go onto the convert queue where it's processed in
2377 * order. The "now" variable is necessary to distinguish converts
2378 * being received and processed for the first time now, because once a
2379 * convert is moved to the conversion queue the condition below applies
2380 * requiring fifo granting.
2383 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2387 * Even if the convert is compat with all granted locks,
2388 * QUECVT forces it behind other locks on the convert queue.
2391 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2392 if (list_empty(&r->res_convertqueue))
2399 * The NOORDER flag is set to avoid the standard vms rules on grant
2403 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2407 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2408 * granted until all other conversion requests ahead of it are granted
2412 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2416 * 6-4: By default, a new request is immediately granted only if all
2417 * three of the following conditions are satisfied when the request is
2419 * - The queue of ungranted conversion requests for the resource is
2421 * - The queue of ungranted new requests for the resource is empty.
2422 * - The mode of the new request is compatible with the most
2423 * restrictive mode of all granted locks on the resource.
2426 if (now && !conv && list_empty(&r->res_convertqueue) &&
2427 list_empty(&r->res_waitqueue))
2431 * 6-4: Once a lock request is in the queue of ungranted new requests,
2432 * it cannot be granted until the queue of ungranted conversion
2433 * requests is empty, all ungranted new requests ahead of it are
2434 * granted and/or canceled, and it is compatible with the granted mode
2435 * of the most restrictive lock granted on the resource.
2438 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2439 first_in_list(lkb, &r->res_waitqueue))
2445 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2446 int recover, int *err)
2449 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2450 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2455 rv = _can_be_granted(r, lkb, now, recover);
2460 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2461 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2462 * cancels one of the locks.
2465 if (is_convert && can_be_queued(lkb) &&
2466 conversion_deadlock_detect(r, lkb)) {
2467 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2468 lkb->lkb_grmode = DLM_LOCK_NL;
2469 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2473 log_print("can_be_granted deadlock %x now %d",
2481 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2482 * to grant a request in a mode other than the normal rqmode. It's a
2483 * simple way to provide a big optimization to applications that can
2487 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2489 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2493 lkb->lkb_rqmode = alt;
2494 rv = _can_be_granted(r, lkb, now, 0);
2496 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2498 lkb->lkb_rqmode = rqmode;
2504 /* Returns the highest requested mode of all blocked conversions; sets
2505 cw if there's a blocked conversion to DLM_LOCK_CW. */
2507 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2508 unsigned int *count)
2510 struct dlm_lkb *lkb, *s;
2511 int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2512 int hi, demoted, quit, grant_restart, demote_restart;
2521 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2522 demoted = is_demoted(lkb);
2525 if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2526 grant_lock_pending(r, lkb);
2533 if (!demoted && is_demoted(lkb)) {
2534 log_print("WARN: pending demoted %x node %d %s",
2535 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2542 * If DLM_LKB_NODLKWT flag is set and conversion
2543 * deadlock is detected, we request blocking AST and
2544 * down (or cancel) conversion.
2546 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2547 if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2548 queue_bast(r, lkb, lkb->lkb_rqmode);
2549 lkb->lkb_highbast = lkb->lkb_rqmode;
2552 log_print("WARN: pending deadlock %x node %d %s",
2553 lkb->lkb_id, lkb->lkb_nodeid,
2560 hi = max_t(int, lkb->lkb_rqmode, hi);
2562 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2568 if (demote_restart && !quit) {
2573 return max_t(int, high, hi);
2576 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2577 unsigned int *count)
2579 struct dlm_lkb *lkb, *s;
2581 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2582 if (can_be_granted(r, lkb, 0, 0, NULL)) {
2583 grant_lock_pending(r, lkb);
2587 high = max_t(int, lkb->lkb_rqmode, high);
2588 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2596 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2597 on either the convert or waiting queue.
2598 high is the largest rqmode of all locks blocked on the convert or
2601 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2603 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2604 if (gr->lkb_highbast < DLM_LOCK_EX)
2609 if (gr->lkb_highbast < high &&
2610 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2615 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2617 struct dlm_lkb *lkb, *s;
2618 int high = DLM_LOCK_IV;
2621 if (!is_master(r)) {
2622 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2627 high = grant_pending_convert(r, high, &cw, count);
2628 high = grant_pending_wait(r, high, &cw, count);
2630 if (high == DLM_LOCK_IV)
2634 * If there are locks left on the wait/convert queue then send blocking
2635 * ASTs to granted locks based on the largest requested mode (high)
2639 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2640 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2641 if (cw && high == DLM_LOCK_PR &&
2642 lkb->lkb_grmode == DLM_LOCK_PR)
2643 queue_bast(r, lkb, DLM_LOCK_CW);
2645 queue_bast(r, lkb, high);
2646 lkb->lkb_highbast = high;
2651 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2653 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2654 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2655 if (gr->lkb_highbast < DLM_LOCK_EX)
2660 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2665 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2666 struct dlm_lkb *lkb)
2670 list_for_each_entry(gr, head, lkb_statequeue) {
2671 /* skip self when sending basts to convertqueue */
2674 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2675 queue_bast(r, gr, lkb->lkb_rqmode);
2676 gr->lkb_highbast = lkb->lkb_rqmode;
2681 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2683 send_bast_queue(r, &r->res_grantqueue, lkb);
2686 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2688 send_bast_queue(r, &r->res_grantqueue, lkb);
2689 send_bast_queue(r, &r->res_convertqueue, lkb);
2692 /* set_master(r, lkb) -- set the master nodeid of a resource
2694 The purpose of this function is to set the nodeid field in the given
2695 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2696 known, it can just be copied to the lkb and the function will return
2697 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2698 before it can be copied to the lkb.
2700 When the rsb nodeid is being looked up remotely, the initial lkb
2701 causing the lookup is kept on the ls_waiters list waiting for the
2702 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2703 on the rsb's res_lookup list until the master is verified.
2706 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2707 1: the rsb master is not available and the lkb has been placed on
2711 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2713 int our_nodeid = dlm_our_nodeid();
2715 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2716 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2717 r->res_first_lkid = lkb->lkb_id;
2718 lkb->lkb_nodeid = r->res_nodeid;
2722 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2723 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2727 if (r->res_master_nodeid == our_nodeid) {
2728 lkb->lkb_nodeid = 0;
2732 if (r->res_master_nodeid) {
2733 lkb->lkb_nodeid = r->res_master_nodeid;
2737 if (dlm_dir_nodeid(r) == our_nodeid) {
2738 /* This is a somewhat unusual case; find_rsb will usually
2739 have set res_master_nodeid when dir nodeid is local, but
2740 there are cases where we become the dir node after we've
2741 past find_rsb and go through _request_lock again.
2742 confirm_master() or process_lookup_list() needs to be
2743 called after this. */
2744 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2745 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2747 r->res_master_nodeid = our_nodeid;
2749 lkb->lkb_nodeid = 0;
2753 wait_pending_remove(r);
2755 r->res_first_lkid = lkb->lkb_id;
2756 send_lookup(r, lkb);
2760 static void process_lookup_list(struct dlm_rsb *r)
2762 struct dlm_lkb *lkb, *safe;
2764 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2765 list_del_init(&lkb->lkb_rsb_lookup);
2766 _request_lock(r, lkb);
2771 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2773 static void confirm_master(struct dlm_rsb *r, int error)
2775 struct dlm_lkb *lkb;
2777 if (!r->res_first_lkid)
2783 r->res_first_lkid = 0;
2784 process_lookup_list(r);
2790 /* the remote request failed and won't be retried (it was
2791 a NOQUEUE, or has been canceled/unlocked); make a waiting
2792 lkb the first_lkid */
2794 r->res_first_lkid = 0;
2796 if (!list_empty(&r->res_lookup)) {
2797 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2799 list_del_init(&lkb->lkb_rsb_lookup);
2800 r->res_first_lkid = lkb->lkb_id;
2801 _request_lock(r, lkb);
2806 log_error(r->res_ls, "confirm_master unknown error %d", error);
2810 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2811 int namelen, unsigned long timeout_cs,
2812 void (*ast) (void *astparam),
2814 void (*bast) (void *astparam, int mode),
2815 struct dlm_args *args)
2819 /* check for invalid arg usage */
2821 if (mode < 0 || mode > DLM_LOCK_EX)
2824 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2827 if (flags & DLM_LKF_CANCEL)
2830 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2833 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2836 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2839 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2842 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2845 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2848 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2854 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2857 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2860 /* these args will be copied to the lkb in validate_lock_args,
2861 it cannot be done now because when converting locks, fields in
2862 an active lkb cannot be modified before locking the rsb */
2864 args->flags = flags;
2866 args->astparam = astparam;
2867 args->bastfn = bast;
2868 args->timeout = timeout_cs;
2876 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2878 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2879 DLM_LKF_FORCEUNLOCK))
2882 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2885 args->flags = flags;
2886 args->astparam = astarg;
2890 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2891 struct dlm_args *args)
2895 if (args->flags & DLM_LKF_CONVERT) {
2896 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2899 if (lkb->lkb_wait_type)
2902 if (is_overlap(lkb))
2906 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2909 if (args->flags & DLM_LKF_QUECVT &&
2910 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2914 lkb->lkb_exflags = args->flags;
2915 lkb->lkb_sbflags = 0;
2916 lkb->lkb_astfn = args->astfn;
2917 lkb->lkb_astparam = args->astparam;
2918 lkb->lkb_bastfn = args->bastfn;
2919 lkb->lkb_rqmode = args->mode;
2920 lkb->lkb_lksb = args->lksb;
2921 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2922 lkb->lkb_ownpid = (int) current->pid;
2923 lkb->lkb_timeout_cs = args->timeout;
2927 log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2928 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2929 lkb->lkb_status, lkb->lkb_wait_type,
2930 lkb->lkb_resource->res_name);
2934 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2937 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2938 because there may be a lookup in progress and it's valid to do
2939 cancel/unlockf on it */
2941 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2943 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2946 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2947 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2952 /* an lkb may still exist even though the lock is EOL'ed due to a
2953 cancel, unlock or failed noqueue request; an app can't use these
2954 locks; return same error as if the lkid had not been found at all */
2956 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2957 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2962 /* an lkb may be waiting for an rsb lookup to complete where the
2963 lookup was initiated by another lock */
2965 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2966 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2967 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2968 list_del_init(&lkb->lkb_rsb_lookup);
2969 queue_cast(lkb->lkb_resource, lkb,
2970 args->flags & DLM_LKF_CANCEL ?
2971 -DLM_ECANCEL : -DLM_EUNLOCK);
2972 unhold_lkb(lkb); /* undoes create_lkb() */
2974 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2979 /* cancel not allowed with another cancel/unlock in progress */
2981 if (args->flags & DLM_LKF_CANCEL) {
2982 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2985 if (is_overlap(lkb))
2988 /* don't let scand try to do a cancel */
2991 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2992 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2997 /* there's nothing to cancel */
2998 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2999 !lkb->lkb_wait_type) {
3004 switch (lkb->lkb_wait_type) {
3005 case DLM_MSG_LOOKUP:
3006 case DLM_MSG_REQUEST:
3007 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3010 case DLM_MSG_UNLOCK:
3011 case DLM_MSG_CANCEL:
3014 /* add_to_waiters() will set OVERLAP_CANCEL */
3018 /* do we need to allow a force-unlock if there's a normal unlock
3019 already in progress? in what conditions could the normal unlock
3020 fail such that we'd want to send a force-unlock to be sure? */
3022 if (args->flags & DLM_LKF_FORCEUNLOCK) {
3023 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3026 if (is_overlap_unlock(lkb))
3029 /* don't let scand try to do a cancel */
3032 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3033 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3038 switch (lkb->lkb_wait_type) {
3039 case DLM_MSG_LOOKUP:
3040 case DLM_MSG_REQUEST:
3041 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3044 case DLM_MSG_UNLOCK:
3047 /* add_to_waiters() will set OVERLAP_UNLOCK */
3051 /* normal unlock not allowed if there's any op in progress */
3053 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3057 /* an overlapping op shouldn't blow away exflags from other op */
3058 lkb->lkb_exflags |= args->flags;
3059 lkb->lkb_sbflags = 0;
3060 lkb->lkb_astparam = args->astparam;
3064 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3065 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3066 args->flags, lkb->lkb_wait_type,
3067 lkb->lkb_resource->res_name);
3072 * Four stage 4 varieties:
3073 * do_request(), do_convert(), do_unlock(), do_cancel()
3074 * These are called on the master node for the given lock and
3075 * from the central locking logic.
3078 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3082 if (can_be_granted(r, lkb, 1, 0, NULL)) {
3084 queue_cast(r, lkb, 0);
3088 if (can_be_queued(lkb)) {
3089 error = -EINPROGRESS;
3090 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3096 queue_cast(r, lkb, -EAGAIN);
3101 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3106 if (force_blocking_asts(lkb))
3107 send_blocking_asts_all(r, lkb);
3110 send_blocking_asts(r, lkb);
3115 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3120 /* changing an existing lock may allow others to be granted */
3122 if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3124 queue_cast(r, lkb, 0);
3128 /* can_be_granted() detected that this lock would block in a conversion
3129 deadlock, so we leave it on the granted queue and return EDEADLK in
3130 the ast for the convert. */
3132 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
3133 /* it's left on the granted queue */
3134 revert_lock(r, lkb);
3135 queue_cast(r, lkb, -EDEADLK);
3140 /* is_demoted() means the can_be_granted() above set the grmode
3141 to NL, and left us on the granted queue. This auto-demotion
3142 (due to CONVDEADLK) might mean other locks, and/or this lock, are
3143 now grantable. We have to try to grant other converting locks
3144 before we try again to grant this one. */
3146 if (is_demoted(lkb)) {
3147 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3148 if (_can_be_granted(r, lkb, 1, 0)) {
3150 queue_cast(r, lkb, 0);
3153 /* else fall through and move to convert queue */
3156 if (can_be_queued(lkb)) {
3157 error = -EINPROGRESS;
3159 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3165 queue_cast(r, lkb, -EAGAIN);
3170 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3175 grant_pending_locks(r, NULL);
3176 /* grant_pending_locks also sends basts */
3179 if (force_blocking_asts(lkb))
3180 send_blocking_asts_all(r, lkb);
3183 send_blocking_asts(r, lkb);
3188 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3190 remove_lock(r, lkb);
3191 queue_cast(r, lkb, -DLM_EUNLOCK);
3192 return -DLM_EUNLOCK;
3195 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3198 grant_pending_locks(r, NULL);
3201 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3203 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3207 error = revert_lock(r, lkb);
3209 queue_cast(r, lkb, -DLM_ECANCEL);
3210 return -DLM_ECANCEL;
3215 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3219 grant_pending_locks(r, NULL);
3223 * Four stage 3 varieties:
3224 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3227 /* add a new lkb to a possibly new rsb, called by requesting process */
3229 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3233 /* set_master: sets lkb nodeid from r */
3235 error = set_master(r, lkb);
3244 /* receive_request() calls do_request() on remote node */
3245 error = send_request(r, lkb);
3247 error = do_request(r, lkb);
3248 /* for remote locks the request_reply is sent
3249 between do_request and do_request_effects */
3250 do_request_effects(r, lkb, error);
3256 /* change some property of an existing lkb, e.g. mode */
3258 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3263 /* receive_convert() calls do_convert() on remote node */
3264 error = send_convert(r, lkb);
3266 error = do_convert(r, lkb);
3267 /* for remote locks the convert_reply is sent
3268 between do_convert and do_convert_effects */
3269 do_convert_effects(r, lkb, error);
3275 /* remove an existing lkb from the granted queue */
3277 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3282 /* receive_unlock() calls do_unlock() on remote node */
3283 error = send_unlock(r, lkb);
3285 error = do_unlock(r, lkb);
3286 /* for remote locks the unlock_reply is sent
3287 between do_unlock and do_unlock_effects */
3288 do_unlock_effects(r, lkb, error);
3294 /* remove an existing lkb from the convert or wait queue */
3296 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3301 /* receive_cancel() calls do_cancel() on remote node */
3302 error = send_cancel(r, lkb);
3304 error = do_cancel(r, lkb);
3305 /* for remote locks the cancel_reply is sent
3306 between do_cancel and do_cancel_effects */
3307 do_cancel_effects(r, lkb, error);
3314 * Four stage 2 varieties:
3315 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3318 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3319 int len, struct dlm_args *args)
3324 error = validate_lock_args(ls, lkb, args);
3328 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3335 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3337 error = _request_lock(r, lkb);
3344 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3345 struct dlm_args *args)
3350 r = lkb->lkb_resource;
3355 error = validate_lock_args(ls, lkb, args);
3359 error = _convert_lock(r, lkb);
3366 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3367 struct dlm_args *args)
3372 r = lkb->lkb_resource;
3377 error = validate_unlock_args(lkb, args);
3381 error = _unlock_lock(r, lkb);
3388 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3389 struct dlm_args *args)
3394 r = lkb->lkb_resource;
3399 error = validate_unlock_args(lkb, args);
3403 error = _cancel_lock(r, lkb);
3411 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3414 int dlm_lock(dlm_lockspace_t *lockspace,
3416 struct dlm_lksb *lksb,
3419 unsigned int namelen,
3420 uint32_t parent_lkid,
3421 void (*ast) (void *astarg),
3423 void (*bast) (void *astarg, int mode))
3426 struct dlm_lkb *lkb;
3427 struct dlm_args args;
3428 int error, convert = flags & DLM_LKF_CONVERT;
3430 ls = dlm_find_lockspace_local(lockspace);
3434 dlm_lock_recovery(ls);
3437 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3439 error = create_lkb(ls, &lkb);
3444 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3445 astarg, bast, &args);
3450 error = convert_lock(ls, lkb, &args);
3452 error = request_lock(ls, lkb, name, namelen, &args);
3454 if (error == -EINPROGRESS)
3457 if (convert || error)
3459 if (error == -EAGAIN || error == -EDEADLK)
3462 dlm_unlock_recovery(ls);
3463 dlm_put_lockspace(ls);
3467 int dlm_unlock(dlm_lockspace_t *lockspace,
3470 struct dlm_lksb *lksb,
3474 struct dlm_lkb *lkb;
3475 struct dlm_args args;
3478 ls = dlm_find_lockspace_local(lockspace);
3482 dlm_lock_recovery(ls);
3484 error = find_lkb(ls, lkid, &lkb);
3488 error = set_unlock_args(flags, astarg, &args);
3492 if (flags & DLM_LKF_CANCEL)
3493 error = cancel_lock(ls, lkb, &args);
3495 error = unlock_lock(ls, lkb, &args);
3497 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3499 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3504 dlm_unlock_recovery(ls);
3505 dlm_put_lockspace(ls);
3510 * send/receive routines for remote operations and replies
3514 * send_request receive_request
3515 * send_convert receive_convert
3516 * send_unlock receive_unlock
3517 * send_cancel receive_cancel
3518 * send_grant receive_grant
3519 * send_bast receive_bast
3520 * send_lookup receive_lookup
3521 * send_remove receive_remove
3524 * receive_request_reply send_request_reply
3525 * receive_convert_reply send_convert_reply
3526 * receive_unlock_reply send_unlock_reply
3527 * receive_cancel_reply send_cancel_reply
3528 * receive_lookup_reply send_lookup_reply
3531 static int _create_message(struct dlm_ls *ls, int mb_len,
3532 int to_nodeid, int mstype,
3533 struct dlm_message **ms_ret,
3534 struct dlm_mhandle **mh_ret)
3536 struct dlm_message *ms;
3537 struct dlm_mhandle *mh;
3540 /* get_buffer gives us a message handle (mh) that we need to
3541 pass into lowcomms_commit and a message buffer (mb) that we
3542 write our data into */
3544 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3548 memset(mb, 0, mb_len);
3550 ms = (struct dlm_message *) mb;
3552 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3553 ms->m_header.h_lockspace = ls->ls_global_id;
3554 ms->m_header.h_nodeid = dlm_our_nodeid();
3555 ms->m_header.h_length = mb_len;
3556 ms->m_header.h_cmd = DLM_MSG;
3558 ms->m_type = mstype;
3565 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3566 int to_nodeid, int mstype,
3567 struct dlm_message **ms_ret,
3568 struct dlm_mhandle **mh_ret)
3570 int mb_len = sizeof(struct dlm_message);
3573 case DLM_MSG_REQUEST:
3574 case DLM_MSG_LOOKUP:
3575 case DLM_MSG_REMOVE:
3576 mb_len += r->res_length;
3578 case DLM_MSG_CONVERT:
3579 case DLM_MSG_UNLOCK:
3580 case DLM_MSG_REQUEST_REPLY:
3581 case DLM_MSG_CONVERT_REPLY:
3583 if (lkb && lkb->lkb_lvbptr)
3584 mb_len += r->res_ls->ls_lvblen;
3588 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3592 /* further lowcomms enhancements or alternate implementations may make
3593 the return value from this function useful at some point */
3595 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3597 dlm_message_out(ms);
3598 dlm_lowcomms_commit_buffer(mh);
3602 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3603 struct dlm_message *ms)
3605 ms->m_nodeid = lkb->lkb_nodeid;
3606 ms->m_pid = lkb->lkb_ownpid;
3607 ms->m_lkid = lkb->lkb_id;
3608 ms->m_remid = lkb->lkb_remid;
3609 ms->m_exflags = lkb->lkb_exflags;
3610 ms->m_sbflags = lkb->lkb_sbflags;
3611 ms->m_flags = lkb->lkb_flags;
3612 ms->m_lvbseq = lkb->lkb_lvbseq;
3613 ms->m_status = lkb->lkb_status;
3614 ms->m_grmode = lkb->lkb_grmode;
3615 ms->m_rqmode = lkb->lkb_rqmode;
3616 ms->m_hash = r->res_hash;
3618 /* m_result and m_bastmode are set from function args,
3619 not from lkb fields */
3621 if (lkb->lkb_bastfn)
3622 ms->m_asts |= DLM_CB_BAST;
3624 ms->m_asts |= DLM_CB_CAST;
3626 /* compare with switch in create_message; send_remove() doesn't
3629 switch (ms->m_type) {
3630 case DLM_MSG_REQUEST:
3631 case DLM_MSG_LOOKUP:
3632 memcpy(ms->m_extra, r->res_name, r->res_length);
3634 case DLM_MSG_CONVERT:
3635 case DLM_MSG_UNLOCK:
3636 case DLM_MSG_REQUEST_REPLY:
3637 case DLM_MSG_CONVERT_REPLY:
3639 if (!lkb->lkb_lvbptr)
3641 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3646 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3648 struct dlm_message *ms;
3649 struct dlm_mhandle *mh;
3650 int to_nodeid, error;
3652 to_nodeid = r->res_nodeid;
3654 error = add_to_waiters(lkb, mstype, to_nodeid);
3658 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3662 send_args(r, lkb, ms);
3664 error = send_message(mh, ms);
3670 remove_from_waiters(lkb, msg_reply_type(mstype));
3674 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3676 return send_common(r, lkb, DLM_MSG_REQUEST);
3679 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3683 error = send_common(r, lkb, DLM_MSG_CONVERT);
3685 /* down conversions go without a reply from the master */
3686 if (!error && down_conversion(lkb)) {
3687 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3688 r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3689 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3690 r->res_ls->ls_stub_ms.m_result = 0;
3691 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3697 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3698 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3699 that the master is still correct. */
3701 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3703 return send_common(r, lkb, DLM_MSG_UNLOCK);
3706 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3708 return send_common(r, lkb, DLM_MSG_CANCEL);
3711 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3713 struct dlm_message *ms;
3714 struct dlm_mhandle *mh;
3715 int to_nodeid, error;
3717 to_nodeid = lkb->lkb_nodeid;
3719 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3723 send_args(r, lkb, ms);
3727 error = send_message(mh, ms);
3732 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3734 struct dlm_message *ms;
3735 struct dlm_mhandle *mh;
3736 int to_nodeid, error;
3738 to_nodeid = lkb->lkb_nodeid;
3740 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3744 send_args(r, lkb, ms);
3746 ms->m_bastmode = mode;
3748 error = send_message(mh, ms);
3753 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3755 struct dlm_message *ms;
3756 struct dlm_mhandle *mh;
3757 int to_nodeid, error;
3759 to_nodeid = dlm_dir_nodeid(r);
3761 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3765 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3769 send_args(r, lkb, ms);
3771 error = send_message(mh, ms);
3777 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3781 static int send_remove(struct dlm_rsb *r)
3783 struct dlm_message *ms;
3784 struct dlm_mhandle *mh;
3785 int to_nodeid, error;
3787 to_nodeid = dlm_dir_nodeid(r);
3789 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3793 memcpy(ms->m_extra, r->res_name, r->res_length);
3794 ms->m_hash = r->res_hash;
3796 error = send_message(mh, ms);
3801 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3804 struct dlm_message *ms;
3805 struct dlm_mhandle *mh;
3806 int to_nodeid, error;
3808 to_nodeid = lkb->lkb_nodeid;
3810 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3814 send_args(r, lkb, ms);
3818 error = send_message(mh, ms);
3823 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3825 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3828 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3830 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3833 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3835 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3838 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3840 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3843 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3844 int ret_nodeid, int rv)
3846 struct dlm_rsb *r = &ls->ls_stub_rsb;
3847 struct dlm_message *ms;
3848 struct dlm_mhandle *mh;
3849 int error, nodeid = ms_in->m_header.h_nodeid;
3851 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3855 ms->m_lkid = ms_in->m_lkid;
3857 ms->m_nodeid = ret_nodeid;
3859 error = send_message(mh, ms);
3864 /* which args we save from a received message depends heavily on the type
3865 of message, unlike the send side where we can safely send everything about
3866 the lkb for any type of message */
3868 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3870 lkb->lkb_exflags = ms->m_exflags;
3871 lkb->lkb_sbflags = ms->m_sbflags;
3872 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3873 (ms->m_flags & 0x0000FFFF);
3876 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3878 if (ms->m_flags == DLM_IFL_STUB_MS)
3881 lkb->lkb_sbflags = ms->m_sbflags;
3882 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3883 (ms->m_flags & 0x0000FFFF);
3886 static int receive_extralen(struct dlm_message *ms)
3888 return (ms->m_header.h_length - sizeof(struct dlm_message));
3891 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3892 struct dlm_message *ms)
3896 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3897 if (!lkb->lkb_lvbptr)
3898 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3899 if (!lkb->lkb_lvbptr)
3901 len = receive_extralen(ms);
3902 if (len > ls->ls_lvblen)
3903 len = ls->ls_lvblen;
3904 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3909 static void fake_bastfn(void *astparam, int mode)
3911 log_print("fake_bastfn should not be called");
3914 static void fake_astfn(void *astparam)
3916 log_print("fake_astfn should not be called");
3919 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3920 struct dlm_message *ms)
3922 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3923 lkb->lkb_ownpid = ms->m_pid;
3924 lkb->lkb_remid = ms->m_lkid;
3925 lkb->lkb_grmode = DLM_LOCK_IV;
3926 lkb->lkb_rqmode = ms->m_rqmode;
3928 lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3929 lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3931 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3932 /* lkb was just created so there won't be an lvb yet */
3933 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3934 if (!lkb->lkb_lvbptr)
3941 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3942 struct dlm_message *ms)
3944 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3947 if (receive_lvb(ls, lkb, ms))
3950 lkb->lkb_rqmode = ms->m_rqmode;
3951 lkb->lkb_lvbseq = ms->m_lvbseq;
3956 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3957 struct dlm_message *ms)
3959 if (receive_lvb(ls, lkb, ms))
3964 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3965 uses to send a reply and that the remote end uses to process the reply. */
3967 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3969 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3970 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3971 lkb->lkb_remid = ms->m_lkid;
3974 /* This is called after the rsb is locked so that we can safely inspect
3975 fields in the lkb. */
3977 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3979 int from = ms->m_header.h_nodeid;
3982 /* currently mixing of user/kernel locks are not supported */
3983 if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) {
3984 log_error(lkb->lkb_resource->res_ls,
3985 "got user dlm message for a kernel lock");
3990 switch (ms->m_type) {
3991 case DLM_MSG_CONVERT:
3992 case DLM_MSG_UNLOCK:
3993 case DLM_MSG_CANCEL:
3994 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3998 case DLM_MSG_CONVERT_REPLY:
3999 case DLM_MSG_UNLOCK_REPLY:
4000 case DLM_MSG_CANCEL_REPLY:
4003 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
4007 case DLM_MSG_REQUEST_REPLY:
4008 if (!is_process_copy(lkb))
4010 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4020 log_error(lkb->lkb_resource->res_ls,
4021 "ignore invalid message %d from %d %x %x %x %d",
4022 ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4023 lkb->lkb_flags, lkb->lkb_nodeid);
4027 static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4029 char name[DLM_RESNAME_MAXLEN + 1];
4030 struct dlm_message *ms;
4031 struct dlm_mhandle *mh;
4036 memset(name, 0, sizeof(name));
4037 memcpy(name, ms_name, len);
4039 hash = jhash(name, len, 0);
4040 b = hash & (ls->ls_rsbtbl_size - 1);
4042 dir_nodeid = dlm_hash2nodeid(ls, hash);
4044 log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4046 spin_lock(&ls->ls_rsbtbl[b].lock);
4047 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4049 spin_unlock(&ls->ls_rsbtbl[b].lock);
4050 log_error(ls, "repeat_remove on keep %s", name);
4054 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4056 spin_unlock(&ls->ls_rsbtbl[b].lock);
4057 log_error(ls, "repeat_remove on toss %s", name);
4061 /* use ls->remove_name2 to avoid conflict with shrink? */
4063 spin_lock(&ls->ls_remove_spin);
4064 ls->ls_remove_len = len;
4065 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4066 spin_unlock(&ls->ls_remove_spin);
4067 spin_unlock(&ls->ls_rsbtbl[b].lock);
4069 rv = _create_message(ls, sizeof(struct dlm_message) + len,
4070 dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4074 memcpy(ms->m_extra, name, len);
4077 send_message(mh, ms);
4080 spin_lock(&ls->ls_remove_spin);
4081 ls->ls_remove_len = 0;
4082 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4083 spin_unlock(&ls->ls_remove_spin);
4086 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4088 struct dlm_lkb *lkb;
4091 int error, namelen = 0;
4093 from_nodeid = ms->m_header.h_nodeid;
4095 error = create_lkb(ls, &lkb);
4099 receive_flags(lkb, ms);
4100 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4101 error = receive_request_args(ls, lkb, ms);
4107 /* The dir node is the authority on whether we are the master
4108 for this rsb or not, so if the master sends us a request, we should
4109 recreate the rsb if we've destroyed it. This race happens when we
4110 send a remove message to the dir node at the same time that the dir
4111 node sends us a request for the rsb. */
4113 namelen = receive_extralen(ms);
4115 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4116 R_RECEIVE_REQUEST, &r);
4124 if (r->res_master_nodeid != dlm_our_nodeid()) {
4125 error = validate_master_nodeid(ls, r, from_nodeid);
4135 error = do_request(r, lkb);
4136 send_request_reply(r, lkb, error);
4137 do_request_effects(r, lkb, error);
4142 if (error == -EINPROGRESS)
4149 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4150 and do this receive_request again from process_lookup_list once
4151 we get the lookup reply. This would avoid a many repeated
4152 ENOTBLK request failures when the lookup reply designating us
4153 as master is delayed. */
4155 /* We could repeatedly return -EBADR here if our send_remove() is
4156 delayed in being sent/arriving/being processed on the dir node.
4157 Another node would repeatedly lookup up the master, and the dir
4158 node would continue returning our nodeid until our send_remove
4161 We send another remove message in case our previous send_remove
4162 was lost/ignored/missed somehow. */
4164 if (error != -ENOTBLK) {
4165 log_limit(ls, "receive_request %x from %d %d",
4166 ms->m_lkid, from_nodeid, error);
4169 if (namelen && error == -EBADR) {
4170 send_repeat_remove(ls, ms->m_extra, namelen);
4174 setup_stub_lkb(ls, ms);
4175 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4179 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4181 struct dlm_lkb *lkb;
4183 int error, reply = 1;
4185 error = find_lkb(ls, ms->m_remid, &lkb);
4189 if (lkb->lkb_remid != ms->m_lkid) {
4190 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4191 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4192 (unsigned long long)lkb->lkb_recover_seq,
4193 ms->m_header.h_nodeid, ms->m_lkid);
4199 r = lkb->lkb_resource;
4204 error = validate_message(lkb, ms);
4208 receive_flags(lkb, ms);
4210 error = receive_convert_args(ls, lkb, ms);
4212 send_convert_reply(r, lkb, error);
4216 reply = !down_conversion(lkb);
4218 error = do_convert(r, lkb);
4220 send_convert_reply(r, lkb, error);
4221 do_convert_effects(r, lkb, error);
4229 setup_stub_lkb(ls, ms);
4230 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4234 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4236 struct dlm_lkb *lkb;
4240 error = find_lkb(ls, ms->m_remid, &lkb);
4244 if (lkb->lkb_remid != ms->m_lkid) {
4245 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4246 lkb->lkb_id, lkb->lkb_remid,
4247 ms->m_header.h_nodeid, ms->m_lkid);
4253 r = lkb->lkb_resource;
4258 error = validate_message(lkb, ms);
4262 receive_flags(lkb, ms);
4264 error = receive_unlock_args(ls, lkb, ms);
4266 send_unlock_reply(r, lkb, error);
4270 error = do_unlock(r, lkb);
4271 send_unlock_reply(r, lkb, error);
4272 do_unlock_effects(r, lkb, error);
4280 setup_stub_lkb(ls, ms);
4281 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4285 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4287 struct dlm_lkb *lkb;
4291 error = find_lkb(ls, ms->m_remid, &lkb);
4295 receive_flags(lkb, ms);
4297 r = lkb->lkb_resource;
4302 error = validate_message(lkb, ms);
4306 error = do_cancel(r, lkb);
4307 send_cancel_reply(r, lkb, error);
4308 do_cancel_effects(r, lkb, error);
4316 setup_stub_lkb(ls, ms);
4317 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4321 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4323 struct dlm_lkb *lkb;
4327 error = find_lkb(ls, ms->m_remid, &lkb);
4331 r = lkb->lkb_resource;
4336 error = validate_message(lkb, ms);
4340 receive_flags_reply(lkb, ms);
4341 if (is_altmode(lkb))
4342 munge_altmode(lkb, ms);
4343 grant_lock_pc(r, lkb, ms);
4344 queue_cast(r, lkb, 0);
4352 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4354 struct dlm_lkb *lkb;
4358 error = find_lkb(ls, ms->m_remid, &lkb);
4362 r = lkb->lkb_resource;
4367 error = validate_message(lkb, ms);
4371 queue_bast(r, lkb, ms->m_bastmode);
4372 lkb->lkb_highbast = ms->m_bastmode;
4380 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4382 int len, error, ret_nodeid, from_nodeid, our_nodeid;
4384 from_nodeid = ms->m_header.h_nodeid;
4385 our_nodeid = dlm_our_nodeid();
4387 len = receive_extralen(ms);
4389 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4392 /* Optimization: we're master so treat lookup as a request */
4393 if (!error && ret_nodeid == our_nodeid) {
4394 receive_request(ls, ms);
4397 send_lookup_reply(ls, ms, ret_nodeid, error);
4400 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4402 char name[DLM_RESNAME_MAXLEN+1];
4405 int rv, len, dir_nodeid, from_nodeid;
4407 from_nodeid = ms->m_header.h_nodeid;
4409 len = receive_extralen(ms);
4411 if (len > DLM_RESNAME_MAXLEN) {
4412 log_error(ls, "receive_remove from %d bad len %d",
4417 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4418 if (dir_nodeid != dlm_our_nodeid()) {
4419 log_error(ls, "receive_remove from %d bad nodeid %d",
4420 from_nodeid, dir_nodeid);
4424 /* Look for name on rsbtbl.toss, if it's there, kill it.
4425 If it's on rsbtbl.keep, it's being used, and we should ignore this
4426 message. This is an expected race between the dir node sending a
4427 request to the master node at the same time as the master node sends
4428 a remove to the dir node. The resolution to that race is for the
4429 dir node to ignore the remove message, and the master node to
4430 recreate the master rsb when it gets a request from the dir node for
4431 an rsb it doesn't have. */
4433 memset(name, 0, sizeof(name));
4434 memcpy(name, ms->m_extra, len);
4436 hash = jhash(name, len, 0);
4437 b = hash & (ls->ls_rsbtbl_size - 1);
4439 spin_lock(&ls->ls_rsbtbl[b].lock);
4441 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4443 /* verify the rsb is on keep list per comment above */
4444 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4446 /* should not happen */
4447 log_error(ls, "receive_remove from %d not found %s",
4449 spin_unlock(&ls->ls_rsbtbl[b].lock);
4452 if (r->res_master_nodeid != from_nodeid) {
4453 /* should not happen */
4454 log_error(ls, "receive_remove keep from %d master %d",
4455 from_nodeid, r->res_master_nodeid);
4457 spin_unlock(&ls->ls_rsbtbl[b].lock);
4461 log_debug(ls, "receive_remove from %d master %d first %x %s",
4462 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4464 spin_unlock(&ls->ls_rsbtbl[b].lock);
4468 if (r->res_master_nodeid != from_nodeid) {
4469 log_error(ls, "receive_remove toss from %d master %d",
4470 from_nodeid, r->res_master_nodeid);
4472 spin_unlock(&ls->ls_rsbtbl[b].lock);
4476 if (kref_put(&r->res_ref, kill_rsb)) {
4477 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4478 spin_unlock(&ls->ls_rsbtbl[b].lock);
4481 log_error(ls, "receive_remove from %d rsb ref error",
4484 spin_unlock(&ls->ls_rsbtbl[b].lock);
4488 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4490 do_purge(ls, ms->m_nodeid, ms->m_pid);
4493 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4495 struct dlm_lkb *lkb;
4497 int error, mstype, result;
4498 int from_nodeid = ms->m_header.h_nodeid;
4500 error = find_lkb(ls, ms->m_remid, &lkb);
4504 r = lkb->lkb_resource;
4508 error = validate_message(lkb, ms);
4512 mstype = lkb->lkb_wait_type;
4513 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4515 log_error(ls, "receive_request_reply %x remote %d %x result %d",
4516 lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4521 /* Optimization: the dir node was also the master, so it took our
4522 lookup as a request and sent request reply instead of lookup reply */
4523 if (mstype == DLM_MSG_LOOKUP) {
4524 r->res_master_nodeid = from_nodeid;
4525 r->res_nodeid = from_nodeid;
4526 lkb->lkb_nodeid = from_nodeid;
4529 /* this is the value returned from do_request() on the master */
4530 result = ms->m_result;
4534 /* request would block (be queued) on remote master */
4535 queue_cast(r, lkb, -EAGAIN);
4536 confirm_master(r, -EAGAIN);
4537 unhold_lkb(lkb); /* undoes create_lkb() */
4542 /* request was queued or granted on remote master */
4543 receive_flags_reply(lkb, ms);
4544 lkb->lkb_remid = ms->m_lkid;
4545 if (is_altmode(lkb))
4546 munge_altmode(lkb, ms);
4548 add_lkb(r, lkb, DLM_LKSTS_WAITING);
4551 grant_lock_pc(r, lkb, ms);
4552 queue_cast(r, lkb, 0);
4554 confirm_master(r, result);
4559 /* find_rsb failed to find rsb or rsb wasn't master */
4560 log_limit(ls, "receive_request_reply %x from %d %d "
4561 "master %d dir %d first %x %s", lkb->lkb_id,
4562 from_nodeid, result, r->res_master_nodeid,
4563 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4565 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4566 r->res_master_nodeid != dlm_our_nodeid()) {
4567 /* cause _request_lock->set_master->send_lookup */
4568 r->res_master_nodeid = 0;
4570 lkb->lkb_nodeid = -1;
4573 if (is_overlap(lkb)) {
4574 /* we'll ignore error in cancel/unlock reply */
4575 queue_cast_overlap(r, lkb);
4576 confirm_master(r, result);
4577 unhold_lkb(lkb); /* undoes create_lkb() */
4579 _request_lock(r, lkb);
4581 if (r->res_master_nodeid == dlm_our_nodeid())
4582 confirm_master(r, 0);
4587 log_error(ls, "receive_request_reply %x error %d",
4588 lkb->lkb_id, result);
4591 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4592 log_debug(ls, "receive_request_reply %x result %d unlock",
4593 lkb->lkb_id, result);
4594 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4595 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4596 send_unlock(r, lkb);
4597 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4598 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4599 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4600 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4601 send_cancel(r, lkb);
4603 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4604 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4613 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4614 struct dlm_message *ms)
4616 /* this is the value returned from do_convert() on the master */
4617 switch (ms->m_result) {
4619 /* convert would block (be queued) on remote master */
4620 queue_cast(r, lkb, -EAGAIN);
4624 receive_flags_reply(lkb, ms);
4625 revert_lock_pc(r, lkb);
4626 queue_cast(r, lkb, -EDEADLK);
4630 /* convert was queued on remote master */
4631 receive_flags_reply(lkb, ms);
4632 if (is_demoted(lkb))
4635 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4640 /* convert was granted on remote master */
4641 receive_flags_reply(lkb, ms);
4642 if (is_demoted(lkb))
4644 grant_lock_pc(r, lkb, ms);
4645 queue_cast(r, lkb, 0);
4649 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4650 lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4657 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4659 struct dlm_rsb *r = lkb->lkb_resource;
4665 error = validate_message(lkb, ms);
4669 /* stub reply can happen with waiters_mutex held */
4670 error = remove_from_waiters_ms(lkb, ms);
4674 __receive_convert_reply(r, lkb, ms);
4680 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4682 struct dlm_lkb *lkb;
4685 error = find_lkb(ls, ms->m_remid, &lkb);
4689 _receive_convert_reply(lkb, ms);
4694 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4696 struct dlm_rsb *r = lkb->lkb_resource;
4702 error = validate_message(lkb, ms);
4706 /* stub reply can happen with waiters_mutex held */
4707 error = remove_from_waiters_ms(lkb, ms);
4711 /* this is the value returned from do_unlock() on the master */
4713 switch (ms->m_result) {
4715 receive_flags_reply(lkb, ms);
4716 remove_lock_pc(r, lkb);
4717 queue_cast(r, lkb, -DLM_EUNLOCK);
4722 log_error(r->res_ls, "receive_unlock_reply %x error %d",
4723 lkb->lkb_id, ms->m_result);
4730 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4732 struct dlm_lkb *lkb;
4735 error = find_lkb(ls, ms->m_remid, &lkb);
4739 _receive_unlock_reply(lkb, ms);
4744 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4746 struct dlm_rsb *r = lkb->lkb_resource;
4752 error = validate_message(lkb, ms);
4756 /* stub reply can happen with waiters_mutex held */
4757 error = remove_from_waiters_ms(lkb, ms);
4761 /* this is the value returned from do_cancel() on the master */
4763 switch (ms->m_result) {
4765 receive_flags_reply(lkb, ms);
4766 revert_lock_pc(r, lkb);
4767 queue_cast(r, lkb, -DLM_ECANCEL);
4772 log_error(r->res_ls, "receive_cancel_reply %x error %d",
4773 lkb->lkb_id, ms->m_result);
4780 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4782 struct dlm_lkb *lkb;
4785 error = find_lkb(ls, ms->m_remid, &lkb);
4789 _receive_cancel_reply(lkb, ms);
4794 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4796 struct dlm_lkb *lkb;
4798 int error, ret_nodeid;
4799 int do_lookup_list = 0;
4801 error = find_lkb(ls, ms->m_lkid, &lkb);
4803 log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4807 /* ms->m_result is the value returned by dlm_master_lookup on dir node
4808 FIXME: will a non-zero error ever be returned? */
4810 r = lkb->lkb_resource;
4814 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4818 ret_nodeid = ms->m_nodeid;
4820 /* We sometimes receive a request from the dir node for this
4821 rsb before we've received the dir node's loookup_reply for it.
4822 The request from the dir node implies we're the master, so we set
4823 ourself as master in receive_request_reply, and verify here that
4824 we are indeed the master. */
4826 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4827 /* This should never happen */
4828 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4829 "master %d dir %d our %d first %x %s",
4830 lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4831 r->res_master_nodeid, r->res_dir_nodeid,
4832 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4835 if (ret_nodeid == dlm_our_nodeid()) {
4836 r->res_master_nodeid = ret_nodeid;
4839 r->res_first_lkid = 0;
4840 } else if (ret_nodeid == -1) {
4841 /* the remote node doesn't believe it's the dir node */
4842 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4843 lkb->lkb_id, ms->m_header.h_nodeid);
4844 r->res_master_nodeid = 0;
4846 lkb->lkb_nodeid = -1;
4848 /* set_master() will set lkb_nodeid from r */
4849 r->res_master_nodeid = ret_nodeid;
4850 r->res_nodeid = ret_nodeid;
4853 if (is_overlap(lkb)) {
4854 log_debug(ls, "receive_lookup_reply %x unlock %x",
4855 lkb->lkb_id, lkb->lkb_flags);
4856 queue_cast_overlap(r, lkb);
4857 unhold_lkb(lkb); /* undoes create_lkb() */
4861 _request_lock(r, lkb);
4865 process_lookup_list(r);
4872 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4875 int error = 0, noent = 0;
4877 if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4878 log_limit(ls, "receive %d from non-member %d %x %x %d",
4879 ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4880 ms->m_remid, ms->m_result);
4884 switch (ms->m_type) {
4886 /* messages sent to a master node */
4888 case DLM_MSG_REQUEST:
4889 error = receive_request(ls, ms);
4892 case DLM_MSG_CONVERT:
4893 error = receive_convert(ls, ms);
4896 case DLM_MSG_UNLOCK:
4897 error = receive_unlock(ls, ms);
4900 case DLM_MSG_CANCEL:
4902 error = receive_cancel(ls, ms);
4905 /* messages sent from a master node (replies to above) */
4907 case DLM_MSG_REQUEST_REPLY:
4908 error = receive_request_reply(ls, ms);
4911 case DLM_MSG_CONVERT_REPLY:
4912 error = receive_convert_reply(ls, ms);
4915 case DLM_MSG_UNLOCK_REPLY:
4916 error = receive_unlock_reply(ls, ms);
4919 case DLM_MSG_CANCEL_REPLY:
4920 error = receive_cancel_reply(ls, ms);
4923 /* messages sent from a master node (only two types of async msg) */
4927 error = receive_grant(ls, ms);
4932 error = receive_bast(ls, ms);
4935 /* messages sent to a dir node */
4937 case DLM_MSG_LOOKUP:
4938 receive_lookup(ls, ms);
4941 case DLM_MSG_REMOVE:
4942 receive_remove(ls, ms);
4945 /* messages sent from a dir node (remove has no reply) */
4947 case DLM_MSG_LOOKUP_REPLY:
4948 receive_lookup_reply(ls, ms);
4951 /* other messages */
4954 receive_purge(ls, ms);
4958 log_error(ls, "unknown message type %d", ms->m_type);
4962 * When checking for ENOENT, we're checking the result of
4963 * find_lkb(m_remid):
4965 * The lock id referenced in the message wasn't found. This may
4966 * happen in normal usage for the async messages and cancel, so
4967 * only use log_debug for them.
4969 * Some errors are expected and normal.
4972 if (error == -ENOENT && noent) {
4973 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4974 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4975 ms->m_lkid, saved_seq);
4976 } else if (error == -ENOENT) {
4977 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4978 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4979 ms->m_lkid, saved_seq);
4981 if (ms->m_type == DLM_MSG_CONVERT)
4982 dlm_dump_rsb_hash(ls, ms->m_hash);
4985 if (error == -EINVAL) {
4986 log_error(ls, "receive %d inval from %d lkid %x remid %x "
4988 ms->m_type, ms->m_header.h_nodeid,
4989 ms->m_lkid, ms->m_remid, saved_seq);
4993 /* If the lockspace is in recovery mode (locking stopped), then normal
4994 messages are saved on the requestqueue for processing after recovery is
4995 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
4996 messages off the requestqueue before we process new ones. This occurs right
4997 after recovery completes when we transition from saving all messages on
4998 requestqueue, to processing all the saved messages, to processing new
4999 messages as they arrive. */
5001 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
5004 if (dlm_locking_stopped(ls)) {
5005 /* If we were a member of this lockspace, left, and rejoined,
5006 other nodes may still be sending us messages from the
5007 lockspace generation before we left. */
5008 if (!ls->ls_generation) {
5009 log_limit(ls, "receive %d from %d ignore old gen",
5010 ms->m_type, nodeid);
5014 dlm_add_requestqueue(ls, nodeid, ms);
5016 dlm_wait_requestqueue(ls);
5017 _receive_message(ls, ms, 0);
5021 /* This is called by dlm_recoverd to process messages that were saved on
5022 the requestqueue. */
5024 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5027 _receive_message(ls, ms, saved_seq);
5030 /* This is called by the midcomms layer when something is received for
5031 the lockspace. It could be either a MSG (normal message sent as part of
5032 standard locking activity) or an RCOM (recovery message sent as part of
5033 lockspace recovery). */
5035 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5037 struct dlm_header *hd = &p->header;
5041 switch (hd->h_cmd) {
5043 dlm_message_in(&p->message);
5044 type = p->message.m_type;
5047 dlm_rcom_in(&p->rcom);
5048 type = p->rcom.rc_type;
5051 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5055 if (hd->h_nodeid != nodeid) {
5056 log_print("invalid h_nodeid %d from %d lockspace %x",
5057 hd->h_nodeid, nodeid, hd->h_lockspace);
5061 ls = dlm_find_lockspace_global(hd->h_lockspace);
5063 if (dlm_config.ci_log_debug) {
5064 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5065 "%u from %d cmd %d type %d\n",
5066 hd->h_lockspace, nodeid, hd->h_cmd, type);
5069 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5070 dlm_send_ls_not_ready(nodeid, &p->rcom);
5074 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5075 be inactive (in this ls) before transitioning to recovery mode */
5077 down_read(&ls->ls_recv_active);
5078 if (hd->h_cmd == DLM_MSG)
5079 dlm_receive_message(ls, &p->message, nodeid);
5081 dlm_receive_rcom(ls, &p->rcom, nodeid);
5082 up_read(&ls->ls_recv_active);
5084 dlm_put_lockspace(ls);
5087 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5088 struct dlm_message *ms_stub)
5090 if (middle_conversion(lkb)) {
5092 memset(ms_stub, 0, sizeof(struct dlm_message));
5093 ms_stub->m_flags = DLM_IFL_STUB_MS;
5094 ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5095 ms_stub->m_result = -EINPROGRESS;
5096 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5097 _receive_convert_reply(lkb, ms_stub);
5099 /* Same special case as in receive_rcom_lock_args() */
5100 lkb->lkb_grmode = DLM_LOCK_IV;
5101 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5104 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5105 lkb->lkb_flags |= DLM_IFL_RESEND;
5108 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5109 conversions are async; there's no reply from the remote master */
5112 /* A waiting lkb needs recovery if the master node has failed, or
5113 the master node is changing (only when no directory is used) */
5115 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5118 if (dlm_no_directory(ls))
5121 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5127 /* Recovery for locks that are waiting for replies from nodes that are now
5128 gone. We can just complete unlocks and cancels by faking a reply from the
5129 dead node. Requests and up-conversions we flag to be resent after
5130 recovery. Down-conversions can just be completed with a fake reply like
5131 unlocks. Conversions between PR and CW need special attention. */
5133 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5135 struct dlm_lkb *lkb, *safe;
5136 struct dlm_message *ms_stub;
5137 int wait_type, stub_unlock_result, stub_cancel_result;
5140 ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
5144 mutex_lock(&ls->ls_waiters_mutex);
5146 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5148 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5150 /* exclude debug messages about unlocks because there can be so
5151 many and they aren't very interesting */
5153 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5154 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5155 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5159 lkb->lkb_resource->res_nodeid,
5161 lkb->lkb_wait_nodeid,
5165 /* all outstanding lookups, regardless of destination will be
5166 resent after recovery is done */
5168 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5169 lkb->lkb_flags |= DLM_IFL_RESEND;
5173 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5176 wait_type = lkb->lkb_wait_type;
5177 stub_unlock_result = -DLM_EUNLOCK;
5178 stub_cancel_result = -DLM_ECANCEL;
5180 /* Main reply may have been received leaving a zero wait_type,
5181 but a reply for the overlapping op may not have been
5182 received. In that case we need to fake the appropriate
5183 reply for the overlap op. */
5186 if (is_overlap_cancel(lkb)) {
5187 wait_type = DLM_MSG_CANCEL;
5188 if (lkb->lkb_grmode == DLM_LOCK_IV)
5189 stub_cancel_result = 0;
5191 if (is_overlap_unlock(lkb)) {
5192 wait_type = DLM_MSG_UNLOCK;
5193 if (lkb->lkb_grmode == DLM_LOCK_IV)
5194 stub_unlock_result = -ENOENT;
5197 log_debug(ls, "rwpre overlap %x %x %d %d %d",
5198 lkb->lkb_id, lkb->lkb_flags, wait_type,
5199 stub_cancel_result, stub_unlock_result);
5202 switch (wait_type) {
5204 case DLM_MSG_REQUEST:
5205 lkb->lkb_flags |= DLM_IFL_RESEND;
5208 case DLM_MSG_CONVERT:
5209 recover_convert_waiter(ls, lkb, ms_stub);
5212 case DLM_MSG_UNLOCK:
5214 memset(ms_stub, 0, sizeof(struct dlm_message));
5215 ms_stub->m_flags = DLM_IFL_STUB_MS;
5216 ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5217 ms_stub->m_result = stub_unlock_result;
5218 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5219 _receive_unlock_reply(lkb, ms_stub);
5223 case DLM_MSG_CANCEL:
5225 memset(ms_stub, 0, sizeof(struct dlm_message));
5226 ms_stub->m_flags = DLM_IFL_STUB_MS;
5227 ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5228 ms_stub->m_result = stub_cancel_result;
5229 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5230 _receive_cancel_reply(lkb, ms_stub);
5235 log_error(ls, "invalid lkb wait_type %d %d",
5236 lkb->lkb_wait_type, wait_type);
5240 mutex_unlock(&ls->ls_waiters_mutex);
5244 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5246 struct dlm_lkb *lkb;
5249 mutex_lock(&ls->ls_waiters_mutex);
5250 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5251 if (lkb->lkb_flags & DLM_IFL_RESEND) {
5257 mutex_unlock(&ls->ls_waiters_mutex);
5264 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5265 master or dir-node for r. Processing the lkb may result in it being placed
5268 /* We do this after normal locking has been enabled and any saved messages
5269 (in requestqueue) have been processed. We should be confident that at
5270 this point we won't get or process a reply to any of these waiting
5271 operations. But, new ops may be coming in on the rsbs/locks here from
5272 userspace or remotely. */
5274 /* there may have been an overlap unlock/cancel prior to recovery or after
5275 recovery. if before, the lkb may still have a pos wait_count; if after, the
5276 overlap flag would just have been set and nothing new sent. we can be
5277 confident here than any replies to either the initial op or overlap ops
5278 prior to recovery have been received. */
5280 int dlm_recover_waiters_post(struct dlm_ls *ls)
5282 struct dlm_lkb *lkb;
5284 int error = 0, mstype, err, oc, ou;
5287 if (dlm_locking_stopped(ls)) {
5288 log_debug(ls, "recover_waiters_post aborted");
5293 lkb = find_resend_waiter(ls);
5297 r = lkb->lkb_resource;
5301 mstype = lkb->lkb_wait_type;
5302 oc = is_overlap_cancel(lkb);
5303 ou = is_overlap_unlock(lkb);
5306 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5307 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5308 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5309 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5310 dlm_dir_nodeid(r), oc, ou);
5312 /* At this point we assume that we won't get a reply to any
5313 previous op or overlap op on this lock. First, do a big
5314 remove_from_waiters() for all previous ops. */
5316 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5317 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5318 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5319 lkb->lkb_wait_type = 0;
5320 /* drop all wait_count references we still
5321 * hold a reference for this iteration.
5323 while (lkb->lkb_wait_count) {
5324 lkb->lkb_wait_count--;
5327 mutex_lock(&ls->ls_waiters_mutex);
5328 list_del_init(&lkb->lkb_wait_reply);
5329 mutex_unlock(&ls->ls_waiters_mutex);
5332 /* do an unlock or cancel instead of resending */
5334 case DLM_MSG_LOOKUP:
5335 case DLM_MSG_REQUEST:
5336 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5338 unhold_lkb(lkb); /* undoes create_lkb() */
5340 case DLM_MSG_CONVERT:
5342 queue_cast(r, lkb, -DLM_ECANCEL);
5344 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5345 _unlock_lock(r, lkb);
5353 case DLM_MSG_LOOKUP:
5354 case DLM_MSG_REQUEST:
5355 _request_lock(r, lkb);
5357 confirm_master(r, 0);
5359 case DLM_MSG_CONVERT:
5360 _convert_lock(r, lkb);
5368 log_error(ls, "waiter %x msg %d r_nodeid %d "
5369 "dir_nodeid %d overlap %d %d",
5370 lkb->lkb_id, mstype, r->res_nodeid,
5371 dlm_dir_nodeid(r), oc, ou);
5381 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5382 struct list_head *list)
5384 struct dlm_lkb *lkb, *safe;
5386 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5387 if (!is_master_copy(lkb))
5390 /* don't purge lkbs we've added in recover_master_copy for
5391 the current recovery seq */
5393 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5398 /* this put should free the lkb */
5399 if (!dlm_put_lkb(lkb))
5400 log_error(ls, "purged mstcpy lkb not released");
5404 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5406 struct dlm_ls *ls = r->res_ls;
5408 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5409 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5410 purge_mstcpy_list(ls, r, &r->res_waitqueue);
5413 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5414 struct list_head *list,
5415 int nodeid_gone, unsigned int *count)
5417 struct dlm_lkb *lkb, *safe;
5419 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5420 if (!is_master_copy(lkb))
5423 if ((lkb->lkb_nodeid == nodeid_gone) ||
5424 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5426 /* tell recover_lvb to invalidate the lvb
5427 because a node holding EX/PW failed */
5428 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5429 (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5430 rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5435 /* this put should free the lkb */
5436 if (!dlm_put_lkb(lkb))
5437 log_error(ls, "purged dead lkb not released");
5439 rsb_set_flag(r, RSB_RECOVER_GRANT);
5446 /* Get rid of locks held by nodes that are gone. */
5448 void dlm_recover_purge(struct dlm_ls *ls)
5451 struct dlm_member *memb;
5452 int nodes_count = 0;
5453 int nodeid_gone = 0;
5454 unsigned int lkb_count = 0;
5456 /* cache one removed nodeid to optimize the common
5457 case of a single node removed */
5459 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5461 nodeid_gone = memb->nodeid;
5467 down_write(&ls->ls_root_sem);
5468 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5472 purge_dead_list(ls, r, &r->res_grantqueue,
5473 nodeid_gone, &lkb_count);
5474 purge_dead_list(ls, r, &r->res_convertqueue,
5475 nodeid_gone, &lkb_count);
5476 purge_dead_list(ls, r, &r->res_waitqueue,
5477 nodeid_gone, &lkb_count);
5483 up_write(&ls->ls_root_sem);
5486 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5487 lkb_count, nodes_count);
5490 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5495 spin_lock(&ls->ls_rsbtbl[bucket].lock);
5496 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5497 r = rb_entry(n, struct dlm_rsb, res_hashnode);
5499 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5501 if (!is_master(r)) {
5502 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5506 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5509 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5514 * Attempt to grant locks on resources that we are the master of.
5515 * Locks may have become grantable during recovery because locks
5516 * from departed nodes have been purged (or not rebuilt), allowing
5517 * previously blocked locks to now be granted. The subset of rsb's
5518 * we are interested in are those with lkb's on either the convert or
5521 * Simplest would be to go through each master rsb and check for non-empty
5522 * convert or waiting queues, and attempt to grant on those rsbs.
5523 * Checking the queues requires lock_rsb, though, for which we'd need
5524 * to release the rsbtbl lock. This would make iterating through all
5525 * rsb's very inefficient. So, we rely on earlier recovery routines
5526 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5530 void dlm_recover_grant(struct dlm_ls *ls)
5534 unsigned int count = 0;
5535 unsigned int rsb_count = 0;
5536 unsigned int lkb_count = 0;
5539 r = find_grant_rsb(ls, bucket);
5541 if (bucket == ls->ls_rsbtbl_size - 1)
5549 /* the RECOVER_GRANT flag is checked in the grant path */
5550 grant_pending_locks(r, &count);
5551 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5553 confirm_master(r, 0);
5560 log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5561 lkb_count, rsb_count);
5564 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5567 struct dlm_lkb *lkb;
5569 list_for_each_entry(lkb, head, lkb_statequeue) {
5570 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5576 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5579 struct dlm_lkb *lkb;
5581 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5584 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5587 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5593 /* needs at least dlm_rcom + rcom_lock */
5594 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5595 struct dlm_rsb *r, struct dlm_rcom *rc)
5597 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5599 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5600 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5601 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5602 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5603 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5604 lkb->lkb_flags |= DLM_IFL_MSTCPY;
5605 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5606 lkb->lkb_rqmode = rl->rl_rqmode;
5607 lkb->lkb_grmode = rl->rl_grmode;
5608 /* don't set lkb_status because add_lkb wants to itself */
5610 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5611 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5613 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5614 int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5615 sizeof(struct rcom_lock);
5616 if (lvblen > ls->ls_lvblen)
5618 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5619 if (!lkb->lkb_lvbptr)
5621 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5624 /* Conversions between PR and CW (middle modes) need special handling.
5625 The real granted mode of these converting locks cannot be determined
5626 until all locks have been rebuilt on the rsb (recover_conversion) */
5628 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5629 middle_conversion(lkb)) {
5630 rl->rl_status = DLM_LKSTS_CONVERT;
5631 lkb->lkb_grmode = DLM_LOCK_IV;
5632 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5638 /* This lkb may have been recovered in a previous aborted recovery so we need
5639 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5640 If so we just send back a standard reply. If not, we create a new lkb with
5641 the given values and send back our lkid. We send back our lkid by sending
5642 back the rcom_lock struct we got but with the remid field filled in. */
5644 /* needs at least dlm_rcom + rcom_lock */
5645 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5647 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5649 struct dlm_lkb *lkb;
5651 int from_nodeid = rc->rc_header.h_nodeid;
5654 if (rl->rl_parent_lkid) {
5655 error = -EOPNOTSUPP;
5659 remid = le32_to_cpu(rl->rl_lkid);
5661 /* In general we expect the rsb returned to be R_MASTER, but we don't
5662 have to require it. Recovery of masters on one node can overlap
5663 recovery of locks on another node, so one node can send us MSTCPY
5664 locks before we've made ourselves master of this rsb. We can still
5665 add new MSTCPY locks that we receive here without any harm; when
5666 we make ourselves master, dlm_recover_masters() won't touch the
5667 MSTCPY locks we've received early. */
5669 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5670 from_nodeid, R_RECEIVE_RECOVER, &r);
5676 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5677 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5678 from_nodeid, remid);
5683 lkb = search_remid(r, from_nodeid, remid);
5689 error = create_lkb(ls, &lkb);
5693 error = receive_rcom_lock_args(ls, lkb, r, rc);
5700 add_lkb(r, lkb, rl->rl_status);
5702 ls->ls_recover_locks_in++;
5704 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5705 rsb_set_flag(r, RSB_RECOVER_GRANT);
5708 /* this is the new value returned to the lock holder for
5709 saving in its process-copy lkb */
5710 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5712 lkb->lkb_recover_seq = ls->ls_recover_seq;
5718 if (error && error != -EEXIST)
5719 log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5720 from_nodeid, remid, error);
5721 rl->rl_result = cpu_to_le32(error);
5725 /* needs at least dlm_rcom + rcom_lock */
5726 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5728 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5730 struct dlm_lkb *lkb;
5731 uint32_t lkid, remid;
5734 lkid = le32_to_cpu(rl->rl_lkid);
5735 remid = le32_to_cpu(rl->rl_remid);
5736 result = le32_to_cpu(rl->rl_result);
5738 error = find_lkb(ls, lkid, &lkb);
5740 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5741 lkid, rc->rc_header.h_nodeid, remid, result);
5745 r = lkb->lkb_resource;
5749 if (!is_process_copy(lkb)) {
5750 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5751 lkid, rc->rc_header.h_nodeid, remid, result);
5761 /* There's a chance the new master received our lock before
5762 dlm_recover_master_reply(), this wouldn't happen if we did
5763 a barrier between recover_masters and recover_locks. */
5765 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5766 lkid, rc->rc_header.h_nodeid, remid, result);
5768 dlm_send_rcom_lock(r, lkb);
5772 lkb->lkb_remid = remid;
5775 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5776 lkid, rc->rc_header.h_nodeid, remid, result);
5779 /* an ack for dlm_recover_locks() which waits for replies from
5780 all the locks it sends to new masters */
5781 dlm_recovered_lock(r);
5790 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5791 int mode, uint32_t flags, void *name, unsigned int namelen,
5792 unsigned long timeout_cs)
5794 struct dlm_lkb *lkb;
5795 struct dlm_args args;
5798 dlm_lock_recovery(ls);
5800 error = create_lkb(ls, &lkb);
5806 if (flags & DLM_LKF_VALBLK) {
5807 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5808 if (!ua->lksb.sb_lvbptr) {
5815 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5816 fake_astfn, ua, fake_bastfn, &args);
5818 kfree(ua->lksb.sb_lvbptr);
5819 ua->lksb.sb_lvbptr = NULL;
5825 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5826 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5827 lock and that lkb_astparam is the dlm_user_args structure. */
5828 lkb->lkb_flags |= DLM_IFL_USER;
5829 error = request_lock(ls, lkb, name, namelen, &args);
5845 /* add this new lkb to the per-process list of locks */
5846 spin_lock(&ua->proc->locks_spin);
5848 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5849 spin_unlock(&ua->proc->locks_spin);
5851 dlm_unlock_recovery(ls);
5855 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5856 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5857 unsigned long timeout_cs)
5859 struct dlm_lkb *lkb;
5860 struct dlm_args args;
5861 struct dlm_user_args *ua;
5864 dlm_lock_recovery(ls);
5866 error = find_lkb(ls, lkid, &lkb);
5870 /* user can change the params on its lock when it converts it, or
5871 add an lvb that didn't exist before */
5875 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5876 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5877 if (!ua->lksb.sb_lvbptr) {
5882 if (lvb_in && ua->lksb.sb_lvbptr)
5883 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5885 ua->xid = ua_tmp->xid;
5886 ua->castparam = ua_tmp->castparam;
5887 ua->castaddr = ua_tmp->castaddr;
5888 ua->bastparam = ua_tmp->bastparam;
5889 ua->bastaddr = ua_tmp->bastaddr;
5890 ua->user_lksb = ua_tmp->user_lksb;
5892 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5893 fake_astfn, ua, fake_bastfn, &args);
5897 error = convert_lock(ls, lkb, &args);
5899 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5904 dlm_unlock_recovery(ls);
5910 * The caller asks for an orphan lock on a given resource with a given mode.
5911 * If a matching lock exists, it's moved to the owner's list of locks and
5912 * the lkid is returned.
5915 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5916 int mode, uint32_t flags, void *name, unsigned int namelen,
5917 unsigned long timeout_cs, uint32_t *lkid)
5919 struct dlm_lkb *lkb;
5920 struct dlm_user_args *ua;
5921 int found_other_mode = 0;
5925 mutex_lock(&ls->ls_orphans_mutex);
5926 list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5927 if (lkb->lkb_resource->res_length != namelen)
5929 if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5931 if (lkb->lkb_grmode != mode) {
5932 found_other_mode = 1;
5937 list_del_init(&lkb->lkb_ownqueue);
5938 lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5939 *lkid = lkb->lkb_id;
5942 mutex_unlock(&ls->ls_orphans_mutex);
5944 if (!found && found_other_mode) {
5954 lkb->lkb_exflags = flags;
5955 lkb->lkb_ownpid = (int) current->pid;
5959 ua->proc = ua_tmp->proc;
5960 ua->xid = ua_tmp->xid;
5961 ua->castparam = ua_tmp->castparam;
5962 ua->castaddr = ua_tmp->castaddr;
5963 ua->bastparam = ua_tmp->bastparam;
5964 ua->bastaddr = ua_tmp->bastaddr;
5965 ua->user_lksb = ua_tmp->user_lksb;
5968 * The lkb reference from the ls_orphans list was not
5969 * removed above, and is now considered the reference
5970 * for the proc locks list.
5973 spin_lock(&ua->proc->locks_spin);
5974 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5975 spin_unlock(&ua->proc->locks_spin);
5981 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5982 uint32_t flags, uint32_t lkid, char *lvb_in)
5984 struct dlm_lkb *lkb;
5985 struct dlm_args args;
5986 struct dlm_user_args *ua;
5989 dlm_lock_recovery(ls);
5991 error = find_lkb(ls, lkid, &lkb);
5997 if (lvb_in && ua->lksb.sb_lvbptr)
5998 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5999 if (ua_tmp->castparam)
6000 ua->castparam = ua_tmp->castparam;
6001 ua->user_lksb = ua_tmp->user_lksb;
6003 error = set_unlock_args(flags, ua, &args);
6007 error = unlock_lock(ls, lkb, &args);
6009 if (error == -DLM_EUNLOCK)
6011 /* from validate_unlock_args() */
6012 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6017 spin_lock(&ua->proc->locks_spin);
6018 /* dlm_user_add_cb() may have already taken lkb off the proc list */
6019 if (!list_empty(&lkb->lkb_ownqueue))
6020 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6021 spin_unlock(&ua->proc->locks_spin);
6025 dlm_unlock_recovery(ls);
6030 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6031 uint32_t flags, uint32_t lkid)
6033 struct dlm_lkb *lkb;
6034 struct dlm_args args;
6035 struct dlm_user_args *ua;
6038 dlm_lock_recovery(ls);
6040 error = find_lkb(ls, lkid, &lkb);
6045 if (ua_tmp->castparam)
6046 ua->castparam = ua_tmp->castparam;
6047 ua->user_lksb = ua_tmp->user_lksb;
6049 error = set_unlock_args(flags, ua, &args);
6053 error = cancel_lock(ls, lkb, &args);
6055 if (error == -DLM_ECANCEL)
6057 /* from validate_unlock_args() */
6058 if (error == -EBUSY)
6063 dlm_unlock_recovery(ls);
6068 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6070 struct dlm_lkb *lkb;
6071 struct dlm_args args;
6072 struct dlm_user_args *ua;
6076 dlm_lock_recovery(ls);
6078 error = find_lkb(ls, lkid, &lkb);
6084 error = set_unlock_args(flags, ua, &args);
6088 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6090 r = lkb->lkb_resource;
6094 error = validate_unlock_args(lkb, &args);
6097 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6099 error = _cancel_lock(r, lkb);
6104 if (error == -DLM_ECANCEL)
6106 /* from validate_unlock_args() */
6107 if (error == -EBUSY)
6112 dlm_unlock_recovery(ls);
6116 /* lkb's that are removed from the waiters list by revert are just left on the
6117 orphans list with the granted orphan locks, to be freed by purge */
6119 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6121 struct dlm_args args;
6124 hold_lkb(lkb); /* reference for the ls_orphans list */
6125 mutex_lock(&ls->ls_orphans_mutex);
6126 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6127 mutex_unlock(&ls->ls_orphans_mutex);
6129 set_unlock_args(0, lkb->lkb_ua, &args);
6131 error = cancel_lock(ls, lkb, &args);
6132 if (error == -DLM_ECANCEL)
6137 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6138 granted. Regardless of what rsb queue the lock is on, it's removed and
6139 freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
6140 if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6142 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6144 struct dlm_args args;
6147 set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6148 lkb->lkb_ua, &args);
6150 error = unlock_lock(ls, lkb, &args);
6151 if (error == -DLM_EUNLOCK)
6156 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6157 (which does lock_rsb) due to deadlock with receiving a message that does
6158 lock_rsb followed by dlm_user_add_cb() */
6160 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6161 struct dlm_user_proc *proc)
6163 struct dlm_lkb *lkb = NULL;
6165 mutex_lock(&ls->ls_clear_proc_locks);
6166 if (list_empty(&proc->locks))
6169 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6170 list_del_init(&lkb->lkb_ownqueue);
6172 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6173 lkb->lkb_flags |= DLM_IFL_ORPHAN;
6175 lkb->lkb_flags |= DLM_IFL_DEAD;
6177 mutex_unlock(&ls->ls_clear_proc_locks);
6181 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6182 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6183 which we clear here. */
6185 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6186 list, and no more device_writes should add lkb's to proc->locks list; so we
6187 shouldn't need to take asts_spin or locks_spin here. this assumes that
6188 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6191 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6193 struct dlm_lkb *lkb, *safe;
6195 dlm_lock_recovery(ls);
6198 lkb = del_proc_lock(ls, proc);
6202 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6203 orphan_proc_lock(ls, lkb);
6205 unlock_proc_lock(ls, lkb);
6207 /* this removes the reference for the proc->locks list
6208 added by dlm_user_request, it may result in the lkb
6214 mutex_lock(&ls->ls_clear_proc_locks);
6216 /* in-progress unlocks */
6217 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6218 list_del_init(&lkb->lkb_ownqueue);
6219 lkb->lkb_flags |= DLM_IFL_DEAD;
6223 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6224 memset(&lkb->lkb_callbacks, 0,
6225 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6226 list_del_init(&lkb->lkb_cb_list);
6230 mutex_unlock(&ls->ls_clear_proc_locks);
6231 dlm_unlock_recovery(ls);
6234 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6236 struct dlm_lkb *lkb, *safe;
6240 spin_lock(&proc->locks_spin);
6241 if (!list_empty(&proc->locks)) {
6242 lkb = list_entry(proc->locks.next, struct dlm_lkb,
6244 list_del_init(&lkb->lkb_ownqueue);
6246 spin_unlock(&proc->locks_spin);
6251 lkb->lkb_flags |= DLM_IFL_DEAD;
6252 unlock_proc_lock(ls, lkb);
6253 dlm_put_lkb(lkb); /* ref from proc->locks list */
6256 spin_lock(&proc->locks_spin);
6257 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6258 list_del_init(&lkb->lkb_ownqueue);
6259 lkb->lkb_flags |= DLM_IFL_DEAD;
6262 spin_unlock(&proc->locks_spin);
6264 spin_lock(&proc->asts_spin);
6265 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6266 memset(&lkb->lkb_callbacks, 0,
6267 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6268 list_del_init(&lkb->lkb_cb_list);
6271 spin_unlock(&proc->asts_spin);
6274 /* pid of 0 means purge all orphans */
6276 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6278 struct dlm_lkb *lkb, *safe;
6280 mutex_lock(&ls->ls_orphans_mutex);
6281 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6282 if (pid && lkb->lkb_ownpid != pid)
6284 unlock_proc_lock(ls, lkb);
6285 list_del_init(&lkb->lkb_ownqueue);
6288 mutex_unlock(&ls->ls_orphans_mutex);
6291 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6293 struct dlm_message *ms;
6294 struct dlm_mhandle *mh;
6297 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6298 DLM_MSG_PURGE, &ms, &mh);
6301 ms->m_nodeid = nodeid;
6304 return send_message(mh, ms);
6307 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6308 int nodeid, int pid)
6312 if (nodeid && (nodeid != dlm_our_nodeid())) {
6313 error = send_purge(ls, nodeid, pid);
6315 dlm_lock_recovery(ls);
6316 if (pid == current->pid)
6317 purge_proc_locks(ls, proc);
6319 do_purge(ls, nodeid, pid);
6320 dlm_unlock_recovery(ls);