1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53 #include "cluster/masklog.h"
55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57 static int dlm_recovery_thread(void *data);
58 static int dlm_do_recovery(struct dlm_ctxt *dlm);
60 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
61 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
62 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
63 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
64 u8 request_from, u8 dead_node);
65 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm);
67 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
68 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
69 const char *lockname, int namelen,
70 int total_locks, u64 cookie,
72 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
73 struct dlm_migratable_lockres *mres,
75 struct dlm_lock_resource *res,
77 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
78 struct dlm_lock_resource *res,
79 struct dlm_migratable_lockres *mres);
80 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
81 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
82 u8 dead_node, u8 send_to);
83 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
84 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
85 struct list_head *list, u8 dead_node);
86 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
87 u8 dead_node, u8 new_master);
88 static void dlm_reco_ast(void *astdata);
89 static void dlm_reco_bast(void *astdata, int blocked_type);
90 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
91 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
93 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
94 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
95 struct dlm_lock_resource *res,
98 static u64 dlm_get_next_mig_cookie(void);
100 static DEFINE_SPINLOCK(dlm_reco_state_lock);
101 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
102 static u64 dlm_mig_cookie = 1;
104 static u64 dlm_get_next_mig_cookie(void)
107 spin_lock(&dlm_mig_cookie_lock);
109 if (dlm_mig_cookie == (~0ULL))
113 spin_unlock(&dlm_mig_cookie_lock);
117 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
120 assert_spin_locked(&dlm->spinlock);
121 if (dlm->reco.dead_node != dead_node)
122 mlog(0, "%s: changing dead_node from %u to %u\n",
123 dlm->name, dlm->reco.dead_node, dead_node);
124 dlm->reco.dead_node = dead_node;
127 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
130 assert_spin_locked(&dlm->spinlock);
131 mlog(0, "%s: changing new_master from %u to %u\n",
132 dlm->name, dlm->reco.new_master, master);
133 dlm->reco.new_master = master;
136 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
138 assert_spin_locked(&dlm->spinlock);
139 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
140 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
141 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
144 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
146 spin_lock(&dlm->spinlock);
147 __dlm_reset_recovery(dlm);
148 spin_unlock(&dlm->spinlock);
151 /* Worker function used during recovery. */
152 void dlm_dispatch_work(struct work_struct *work)
154 struct dlm_ctxt *dlm =
155 container_of(work, struct dlm_ctxt, dispatched_work);
157 struct dlm_work_item *item, *next;
158 dlm_workfunc_t *workfunc;
161 spin_lock(&dlm->work_lock);
162 list_splice_init(&dlm->work_list, &tmp_list);
163 spin_unlock(&dlm->work_lock);
165 list_for_each_entry(item, &tmp_list, list) {
168 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
170 list_for_each_entry_safe(item, next, &tmp_list, list) {
171 workfunc = item->func;
172 list_del_init(&item->list);
174 /* already have ref on dlm to avoid having
175 * it disappear. just double-check. */
176 BUG_ON(item->dlm != dlm);
178 /* this is allowed to sleep and
179 * call network stuff */
180 workfunc(item, item->data);
191 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
193 /* wake the recovery thread
194 * this will wake the reco thread in one of three places
195 * 1) sleeping with no recovery happening
196 * 2) sleeping with recovery mastered elsewhere
197 * 3) recovery mastered here, waiting on reco data */
199 wake_up(&dlm->dlm_reco_thread_wq);
202 /* Launch the recovery thread */
203 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
205 mlog(0, "starting dlm recovery thread...\n");
207 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
208 "dlm_reco-%s", dlm->name);
209 if (IS_ERR(dlm->dlm_reco_thread_task)) {
210 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
211 dlm->dlm_reco_thread_task = NULL;
218 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
220 if (dlm->dlm_reco_thread_task) {
221 mlog(0, "waiting for dlm recovery thread to exit\n");
222 kthread_stop(dlm->dlm_reco_thread_task);
223 dlm->dlm_reco_thread_task = NULL;
230 * this is lame, but here's how recovery works...
231 * 1) all recovery threads cluster wide will work on recovering
233 * 2) negotiate who will take over all the locks for the dead node.
234 * thats right... ALL the locks.
235 * 3) once a new master is chosen, everyone scans all locks
236 * and moves aside those mastered by the dead guy
237 * 4) each of these locks should be locked until recovery is done
238 * 5) the new master collects up all of secondary lock queue info
239 * one lock at a time, forcing each node to communicate back
241 * 6) each secondary lock queue responds with the full known lock info
242 * 7) once the new master has run all its locks, it sends a ALLDONE!
243 * message to everyone
244 * 8) upon receiving this message, the secondary queue node unlocks
245 * and responds to the ALLDONE
246 * 9) once the new master gets responses from everyone, he unlocks
247 * everything and recovery for this dead node is done
248 *10) go back to 2) while there are still dead nodes
252 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
254 struct dlm_reco_node_data *ndata;
255 struct dlm_lock_resource *res;
257 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
258 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
259 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
260 dlm->reco.dead_node, dlm->reco.new_master);
262 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
263 char *st = "unknown";
264 switch (ndata->state) {
265 case DLM_RECO_NODE_DATA_INIT:
268 case DLM_RECO_NODE_DATA_REQUESTING:
271 case DLM_RECO_NODE_DATA_DEAD:
274 case DLM_RECO_NODE_DATA_RECEIVING:
277 case DLM_RECO_NODE_DATA_REQUESTED:
280 case DLM_RECO_NODE_DATA_DONE:
283 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
284 st = "finalize-sent";
290 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
291 dlm->name, ndata->node_num, st);
293 list_for_each_entry(res, &dlm->reco.resources, recovering) {
294 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
295 dlm->name, res->lockname.len, res->lockname.name);
299 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
301 static int dlm_recovery_thread(void *data)
304 struct dlm_ctxt *dlm = data;
305 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
307 mlog(0, "dlm thread running for %s...\n", dlm->name);
309 while (!kthread_should_stop()) {
310 if (dlm_domain_fully_joined(dlm)) {
311 status = dlm_do_recovery(dlm);
312 if (status == -EAGAIN) {
313 /* do not sleep, recheck immediately. */
320 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
321 kthread_should_stop(),
325 mlog(0, "quitting DLM recovery thread\n");
329 /* returns true when the recovery master has contacted us */
330 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
333 spin_lock(&dlm->spinlock);
334 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
335 spin_unlock(&dlm->spinlock);
339 /* returns true if node is no longer in the domain
340 * could be dead or just not joined */
341 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
344 spin_lock(&dlm->spinlock);
345 dead = !test_bit(node, dlm->domain_map);
346 spin_unlock(&dlm->spinlock);
350 /* returns true if node is no longer in the domain
351 * could be dead or just not joined */
352 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
355 spin_lock(&dlm->spinlock);
356 recovered = !test_bit(node, dlm->recovery_map);
357 spin_unlock(&dlm->spinlock);
362 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
364 if (dlm_is_node_dead(dlm, node))
367 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
368 "domain %s\n", node, dlm->name);
371 wait_event_timeout(dlm->dlm_reco_thread_wq,
372 dlm_is_node_dead(dlm, node),
373 msecs_to_jiffies(timeout));
375 wait_event(dlm->dlm_reco_thread_wq,
376 dlm_is_node_dead(dlm, node));
379 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
381 if (dlm_is_node_recovered(dlm, node))
384 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
385 "domain %s\n", node, dlm->name);
388 wait_event_timeout(dlm->dlm_reco_thread_wq,
389 dlm_is_node_recovered(dlm, node),
390 msecs_to_jiffies(timeout));
392 wait_event(dlm->dlm_reco_thread_wq,
393 dlm_is_node_recovered(dlm, node));
396 /* callers of the top-level api calls (dlmlock/dlmunlock) should
397 * block on the dlm->reco.event when recovery is in progress.
398 * the dlm recovery thread will set this state when it begins
399 * recovering a dead node (as the new master or not) and clear
400 * the state and wake as soon as all affected lock resources have
401 * been marked with the RECOVERY flag */
402 static int dlm_in_recovery(struct dlm_ctxt *dlm)
405 spin_lock(&dlm->spinlock);
406 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
407 spin_unlock(&dlm->spinlock);
412 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
414 if (dlm_in_recovery(dlm)) {
415 mlog(0, "%s: reco thread %d in recovery: "
416 "state=%d, master=%u, dead=%u\n",
417 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
418 dlm->reco.state, dlm->reco.new_master,
419 dlm->reco.dead_node);
421 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
424 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
426 assert_spin_locked(&dlm->spinlock);
427 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
428 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
429 dlm->name, dlm->reco.dead_node);
430 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
433 static void dlm_end_recovery(struct dlm_ctxt *dlm)
435 spin_lock(&dlm->spinlock);
436 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
437 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
438 spin_unlock(&dlm->spinlock);
439 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
440 wake_up(&dlm->reco.event);
443 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
445 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
446 "dead node %u in domain %s\n", dlm->reco.new_master,
447 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
448 dlm->reco.dead_node, dlm->name);
451 static int dlm_do_recovery(struct dlm_ctxt *dlm)
456 spin_lock(&dlm->spinlock);
458 if (dlm->migrate_done) {
459 mlog(0, "%s: no need do recovery after migrating all "
460 "lock resources\n", dlm->name);
461 spin_unlock(&dlm->spinlock);
465 /* check to see if the new master has died */
466 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
467 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
468 mlog(0, "new master %u died while recovering %u!\n",
469 dlm->reco.new_master, dlm->reco.dead_node);
470 /* unset the new_master, leave dead_node */
471 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
474 /* select a target to recover */
475 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
478 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
479 if (bit >= O2NM_MAX_NODES || bit < 0)
480 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
482 dlm_set_reco_dead_node(dlm, bit);
483 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
485 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
486 dlm->reco.dead_node);
487 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
490 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
491 // mlog(0, "nothing to recover! sleeping now!\n");
492 spin_unlock(&dlm->spinlock);
493 /* return to main thread loop and sleep. */
496 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
497 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
498 dlm->reco.dead_node);
500 /* take write barrier */
501 /* (stops the list reshuffling thread, proxy ast handling) */
502 dlm_begin_recovery(dlm);
504 spin_unlock(&dlm->spinlock);
506 if (dlm->reco.new_master == dlm->node_num)
509 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
510 /* choose a new master, returns 0 if this node
511 * is the master, -EEXIST if it's another node.
512 * this does not return until a new master is chosen
513 * or recovery completes entirely. */
514 ret = dlm_pick_recovery_master(dlm);
516 /* already notified everyone. go. */
519 mlog(0, "another node will master this recovery session.\n");
522 dlm_print_recovery_master(dlm);
524 /* it is safe to start everything back up here
525 * because all of the dead node's lock resources
526 * have been marked as in-recovery */
527 dlm_end_recovery(dlm);
529 /* sleep out in main dlm_recovery_thread loop. */
533 dlm_print_recovery_master(dlm);
535 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
537 /* we should never hit this anymore */
538 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
539 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
540 /* yield a bit to allow any final network messages
541 * to get handled on remaining nodes */
544 /* success! see if any other nodes need recovery */
545 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
546 dlm->name, dlm->reco.dead_node, dlm->node_num);
547 spin_lock(&dlm->spinlock);
548 __dlm_reset_recovery(dlm);
549 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
550 spin_unlock(&dlm->spinlock);
552 dlm_end_recovery(dlm);
554 /* continue and look for another dead node */
558 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
561 struct dlm_reco_node_data *ndata;
567 /* we have become recovery master. there is no escaping
568 * this, so just keep trying until we get it. */
569 status = dlm_init_recovery_area(dlm, dead_node);
571 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
572 "retrying\n", dlm->name);
575 } while (status != 0);
577 /* safe to access the node data list without a lock, since this
578 * process is the only one to change the list */
579 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
580 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
581 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
583 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
586 if (ndata->node_num == dlm->node_num) {
587 ndata->state = DLM_RECO_NODE_DATA_DONE;
592 status = dlm_request_all_locks(dlm, ndata->node_num,
596 if (dlm_is_host_down(status)) {
597 /* node died, ignore it for recovery */
599 ndata->state = DLM_RECO_NODE_DATA_DEAD;
600 /* wait for the domain map to catch up
601 * with the network state. */
602 wait_event_timeout(dlm->dlm_reco_thread_wq,
603 dlm_is_node_dead(dlm,
605 msecs_to_jiffies(1000));
606 mlog(0, "waited 1 sec for %u, "
607 "dead? %s\n", ndata->node_num,
608 dlm_is_node_dead(dlm, ndata->node_num) ?
611 /* -ENOMEM on the other node */
612 mlog(0, "%s: node %u returned "
613 "%d during recovery, retrying "
614 "after a short wait\n",
615 dlm->name, ndata->node_num,
620 } while (status != 0);
622 spin_lock(&dlm_reco_state_lock);
623 switch (ndata->state) {
624 case DLM_RECO_NODE_DATA_INIT:
625 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
626 case DLM_RECO_NODE_DATA_REQUESTED:
629 case DLM_RECO_NODE_DATA_DEAD:
630 mlog(0, "node %u died after requesting "
631 "recovery info for node %u\n",
632 ndata->node_num, dead_node);
633 /* fine. don't need this node's info.
634 * continue without it. */
636 case DLM_RECO_NODE_DATA_REQUESTING:
637 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
638 mlog(0, "now receiving recovery data from "
639 "node %u for dead node %u\n",
640 ndata->node_num, dead_node);
642 case DLM_RECO_NODE_DATA_RECEIVING:
643 mlog(0, "already receiving recovery data from "
644 "node %u for dead node %u\n",
645 ndata->node_num, dead_node);
647 case DLM_RECO_NODE_DATA_DONE:
648 mlog(0, "already DONE receiving recovery data "
649 "from node %u for dead node %u\n",
650 ndata->node_num, dead_node);
653 spin_unlock(&dlm_reco_state_lock);
656 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
658 /* nodes should be sending reco data now
659 * just need to wait */
662 /* check all the nodes now to see if we are
663 * done, or if anyone died */
665 spin_lock(&dlm_reco_state_lock);
666 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
667 mlog(0, "checking recovery state of node %u\n",
669 switch (ndata->state) {
670 case DLM_RECO_NODE_DATA_INIT:
671 case DLM_RECO_NODE_DATA_REQUESTING:
672 mlog(ML_ERROR, "bad ndata state for "
673 "node %u: state=%d\n",
674 ndata->node_num, ndata->state);
677 case DLM_RECO_NODE_DATA_DEAD:
678 mlog(0, "node %u died after "
679 "requesting recovery info for "
680 "node %u\n", ndata->node_num,
683 case DLM_RECO_NODE_DATA_RECEIVING:
684 case DLM_RECO_NODE_DATA_REQUESTED:
685 mlog(0, "%s: node %u still in state %s\n",
686 dlm->name, ndata->node_num,
687 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
688 "receiving" : "requested");
691 case DLM_RECO_NODE_DATA_DONE:
692 mlog(0, "%s: node %u state is done\n",
693 dlm->name, ndata->node_num);
695 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
696 mlog(0, "%s: node %u state is finalize\n",
697 dlm->name, ndata->node_num);
701 spin_unlock(&dlm_reco_state_lock);
703 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
704 all_nodes_done?"yes":"no");
705 if (all_nodes_done) {
708 /* Set this flag on recovery master to avoid
709 * a new recovery for another dead node start
710 * before the recovery is not done. That may
711 * cause recovery hung.*/
712 spin_lock(&dlm->spinlock);
713 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
714 spin_unlock(&dlm->spinlock);
716 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
717 * just send a finalize message to everyone and
719 mlog(0, "all nodes are done! send finalize\n");
720 ret = dlm_send_finalize_reco_message(dlm);
724 spin_lock(&dlm->spinlock);
725 dlm_finish_local_lockres_recovery(dlm, dead_node,
727 spin_unlock(&dlm->spinlock);
728 mlog(0, "should be done with recovery!\n");
730 mlog(0, "finishing recovery of %s at %lu, "
731 "dead=%u, this=%u, new=%u\n", dlm->name,
732 jiffies, dlm->reco.dead_node,
733 dlm->node_num, dlm->reco.new_master);
736 /* rescan everything marked dirty along the way */
737 dlm_kick_thread(dlm, NULL);
740 /* wait to be signalled, with periodic timeout
741 * to check for node death */
742 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
743 kthread_should_stop(),
744 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
749 dlm_destroy_recovery_area(dlm);
754 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
757 struct dlm_reco_node_data *ndata;
759 spin_lock(&dlm->spinlock);
760 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
761 /* nodes can only be removed (by dying) after dropping
762 * this lock, and death will be trapped later, so this should do */
763 spin_unlock(&dlm->spinlock);
766 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
767 if (num >= O2NM_MAX_NODES) {
770 BUG_ON(num == dead_node);
772 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
774 dlm_destroy_recovery_area(dlm);
777 ndata->node_num = num;
778 ndata->state = DLM_RECO_NODE_DATA_INIT;
779 spin_lock(&dlm_reco_state_lock);
780 list_add_tail(&ndata->list, &dlm->reco.node_data);
781 spin_unlock(&dlm_reco_state_lock);
788 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm)
790 struct dlm_reco_node_data *ndata, *next;
793 spin_lock(&dlm_reco_state_lock);
794 list_splice_init(&dlm->reco.node_data, &tmplist);
795 spin_unlock(&dlm_reco_state_lock);
797 list_for_each_entry_safe(ndata, next, &tmplist, list) {
798 list_del_init(&ndata->list);
803 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
806 struct dlm_lock_request lr;
813 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
814 "to %u\n", dead_node, request_from);
816 memset(&lr, 0, sizeof(lr));
817 lr.node_idx = dlm->node_num;
818 lr.dead_node = dead_node;
821 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
822 &lr, sizeof(lr), request_from, &status);
824 /* negative status is handled by caller */
826 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
827 "to recover dead node %u\n", dlm->name, ret,
828 request_from, dead_node);
831 // return from here, then
832 // sleep until all received or error
837 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
840 struct dlm_ctxt *dlm = data;
841 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
843 struct dlm_work_item *item = NULL;
848 if (lr->dead_node != dlm->reco.dead_node) {
849 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
850 "dead_node is %u\n", dlm->name, lr->node_idx,
851 lr->dead_node, dlm->reco.dead_node);
852 dlm_print_reco_node_status(dlm);
857 BUG_ON(lr->dead_node != dlm->reco.dead_node);
859 item = kzalloc(sizeof(*item), GFP_NOFS);
865 /* this will get freed by dlm_request_all_locks_worker */
866 buf = (char *) __get_free_page(GFP_NOFS);
873 /* queue up work for dlm_request_all_locks_worker */
874 dlm_grab(dlm); /* get an extra ref for the work item */
875 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
876 item->u.ral.reco_master = lr->node_idx;
877 item->u.ral.dead_node = lr->dead_node;
878 spin_lock(&dlm->work_lock);
879 list_add_tail(&item->list, &dlm->work_list);
880 spin_unlock(&dlm->work_lock);
881 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
887 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
889 struct dlm_migratable_lockres *mres;
890 struct dlm_lock_resource *res;
891 struct dlm_ctxt *dlm;
892 LIST_HEAD(resources);
894 u8 dead_node, reco_master;
895 int skip_all_done = 0;
898 dead_node = item->u.ral.dead_node;
899 reco_master = item->u.ral.reco_master;
900 mres = (struct dlm_migratable_lockres *)data;
902 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
903 dlm->name, dead_node, reco_master);
905 if (dead_node != dlm->reco.dead_node ||
906 reco_master != dlm->reco.new_master) {
907 /* worker could have been created before the recovery master
908 * died. if so, do not continue, but do not error. */
909 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
910 mlog(ML_NOTICE, "%s: will not send recovery state, "
911 "recovery master %u died, thread=(dead=%u,mas=%u)"
912 " current=(dead=%u,mas=%u)\n", dlm->name,
913 reco_master, dead_node, reco_master,
914 dlm->reco.dead_node, dlm->reco.new_master);
916 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
917 "master=%u), request(dead=%u, master=%u)\n",
918 dlm->name, dlm->reco.dead_node,
919 dlm->reco.new_master, dead_node, reco_master);
924 /* lock resources should have already been moved to the
925 * dlm->reco.resources list. now move items from that list
926 * to a temp list if the dead owner matches. note that the
927 * whole cluster recovers only one node at a time, so we
928 * can safely move UNKNOWN lock resources for each recovery
930 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
932 /* now we can begin blasting lockreses without the dlm lock */
934 /* any errors returned will be due to the new_master dying,
935 * the dlm_reco_thread should detect this */
936 list_for_each_entry(res, &resources, recovering) {
937 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
940 mlog(ML_ERROR, "%s: node %u went down while sending "
941 "recovery state for dead node %u, ret=%d\n", dlm->name,
942 reco_master, dead_node, ret);
948 /* move the resources back to the list */
949 spin_lock(&dlm->spinlock);
950 list_splice_init(&resources, &dlm->reco.resources);
951 spin_unlock(&dlm->spinlock);
953 if (!skip_all_done) {
954 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
956 mlog(ML_ERROR, "%s: node %u went down while sending "
957 "recovery all-done for dead node %u, ret=%d\n",
958 dlm->name, reco_master, dead_node, ret);
962 free_page((unsigned long)data);
966 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
969 struct dlm_reco_data_done done_msg;
971 memset(&done_msg, 0, sizeof(done_msg));
972 done_msg.node_idx = dlm->node_num;
973 done_msg.dead_node = dead_node;
974 mlog(0, "sending DATA DONE message to %u, "
975 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
978 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
979 sizeof(done_msg), send_to, &tmpret);
981 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
982 "to recover dead node %u\n", dlm->name, ret, send_to,
984 if (!dlm_is_host_down(ret)) {
993 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
996 struct dlm_ctxt *dlm = data;
997 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
998 struct dlm_reco_node_data *ndata = NULL;
1004 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
1005 "node_idx=%u, this node=%u\n", done->dead_node,
1006 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1008 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
1009 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
1010 "node_idx=%u, this node=%u\n", done->dead_node,
1011 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1013 spin_lock(&dlm_reco_state_lock);
1014 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
1015 if (ndata->node_num != done->node_idx)
1018 switch (ndata->state) {
1019 /* should have moved beyond INIT but not to FINALIZE yet */
1020 case DLM_RECO_NODE_DATA_INIT:
1021 case DLM_RECO_NODE_DATA_DEAD:
1022 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1023 mlog(ML_ERROR, "bad ndata state for node %u:"
1024 " state=%d\n", ndata->node_num,
1028 /* these states are possible at this point, anywhere along
1029 * the line of recovery */
1030 case DLM_RECO_NODE_DATA_DONE:
1031 case DLM_RECO_NODE_DATA_RECEIVING:
1032 case DLM_RECO_NODE_DATA_REQUESTED:
1033 case DLM_RECO_NODE_DATA_REQUESTING:
1034 mlog(0, "node %u is DONE sending "
1038 ndata->state = DLM_RECO_NODE_DATA_DONE;
1043 spin_unlock(&dlm_reco_state_lock);
1045 /* wake the recovery thread, some node is done */
1047 dlm_kick_recovery_thread(dlm);
1050 mlog(ML_ERROR, "failed to find recovery node data for node "
1051 "%u\n", done->node_idx);
1054 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1058 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1059 struct list_head *list,
1062 struct dlm_lock_resource *res, *next;
1063 struct dlm_lock *lock;
1065 spin_lock(&dlm->spinlock);
1066 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1067 /* always prune any $RECOVERY entries for dead nodes,
1068 * otherwise hangs can occur during later recovery */
1069 if (dlm_is_recovery_lock(res->lockname.name,
1070 res->lockname.len)) {
1071 spin_lock(&res->spinlock);
1072 list_for_each_entry(lock, &res->granted, list) {
1073 if (lock->ml.node == dead_node) {
1074 mlog(0, "AHA! there was "
1075 "a $RECOVERY lock for dead "
1077 dead_node, dlm->name);
1078 list_del_init(&lock->list);
1080 /* Can't schedule DLM_UNLOCK_FREE_LOCK
1086 spin_unlock(&res->spinlock);
1090 if (res->owner == dead_node) {
1091 mlog(0, "found lockres owned by dead node while "
1092 "doing recovery for node %u. sending it.\n",
1094 list_move_tail(&res->recovering, list);
1095 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1096 mlog(0, "found UNKNOWN owner while doing recovery "
1097 "for node %u. sending it.\n", dead_node);
1098 list_move_tail(&res->recovering, list);
1101 spin_unlock(&dlm->spinlock);
1104 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1106 int total_locks = 0;
1107 struct list_head *iter, *queue = &res->granted;
1110 for (i=0; i<3; i++) {
1111 list_for_each(iter, queue)
1119 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1120 struct dlm_migratable_lockres *mres,
1122 struct dlm_lock_resource *res,
1125 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1126 int mres_total_locks = be32_to_cpu(mres->total_locks);
1127 int sz, ret = 0, status = 0;
1128 u8 orig_flags = mres->flags,
1129 orig_master = mres->master;
1131 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1132 if (!mres->num_locks)
1135 sz = sizeof(struct dlm_migratable_lockres) +
1136 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1138 /* add an all-done flag if we reached the last lock */
1139 orig_flags = mres->flags;
1140 BUG_ON(total_locks > mres_total_locks);
1141 if (total_locks == mres_total_locks)
1142 mres->flags |= DLM_MRES_ALL_DONE;
1144 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1145 dlm->name, res->lockname.len, res->lockname.name,
1146 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1150 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1151 sz, send_to, &status);
1153 /* XXX: negative status is not handled.
1154 * this will end up killing this node. */
1155 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1156 "node %u (%s)\n", dlm->name, mres->lockname_len,
1157 mres->lockname, ret, send_to,
1158 (orig_flags & DLM_MRES_MIGRATION ?
1159 "migration" : "recovery"));
1161 /* might get an -ENOMEM back here */
1166 if (ret == -EFAULT) {
1167 mlog(ML_ERROR, "node %u told me to kill "
1168 "myself!\n", send_to);
1174 /* zero and reinit the message buffer */
1175 dlm_init_migratable_lockres(mres, res->lockname.name,
1176 res->lockname.len, mres_total_locks,
1177 mig_cookie, orig_flags, orig_master);
1181 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1182 const char *lockname, int namelen,
1183 int total_locks, u64 cookie,
1184 u8 flags, u8 master)
1186 /* mres here is one full page */
1188 mres->lockname_len = namelen;
1189 memcpy(mres->lockname, lockname, namelen);
1190 mres->num_locks = 0;
1191 mres->total_locks = cpu_to_be32(total_locks);
1192 mres->mig_cookie = cpu_to_be64(cookie);
1193 mres->flags = flags;
1194 mres->master = master;
1197 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1198 struct dlm_migratable_lockres *mres,
1204 /* Ignore lvb in all locks in the blocked list */
1205 if (queue == DLM_BLOCKED_LIST)
1208 /* Only consider lvbs in locks with granted EX or PR lock levels */
1209 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1212 if (dlm_lvb_is_empty(mres->lvb)) {
1213 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1217 /* Ensure the lvb copied for migration matches in other valid locks */
1218 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1221 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1223 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1224 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1225 lock->lockres->lockname.len, lock->lockres->lockname.name,
1227 dlm_print_one_lock_resource(lock->lockres);
1231 /* returns 1 if this lock fills the network structure,
1233 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1234 struct dlm_migratable_lockres *mres, int queue)
1236 struct dlm_migratable_lock *ml;
1237 int lock_num = mres->num_locks;
1239 ml = &(mres->ml[lock_num]);
1240 ml->cookie = lock->ml.cookie;
1241 ml->type = lock->ml.type;
1242 ml->convert_type = lock->ml.convert_type;
1243 ml->highest_blocked = lock->ml.highest_blocked;
1246 ml->flags = lock->lksb->flags;
1247 dlm_prepare_lvb_for_migration(lock, mres, queue);
1249 ml->node = lock->ml.node;
1251 /* we reached the max, send this network message */
1252 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1257 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1258 struct dlm_migratable_lockres *mres)
1260 struct dlm_lock dummy;
1261 memset(&dummy, 0, sizeof(dummy));
1262 dummy.ml.cookie = 0;
1263 dummy.ml.type = LKM_IVMODE;
1264 dummy.ml.convert_type = LKM_IVMODE;
1265 dummy.ml.highest_blocked = LKM_IVMODE;
1267 dummy.ml.node = dlm->node_num;
1268 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1271 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1272 struct dlm_migratable_lock *ml,
1275 if (unlikely(ml->cookie == 0 &&
1276 ml->type == LKM_IVMODE &&
1277 ml->convert_type == LKM_IVMODE &&
1278 ml->highest_blocked == LKM_IVMODE &&
1279 ml->list == DLM_BLOCKED_LIST)) {
1280 *nodenum = ml->node;
1286 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1287 struct dlm_migratable_lockres *mres,
1288 u8 send_to, u8 flags)
1290 struct list_head *queue;
1293 struct dlm_lock *lock;
1296 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1298 mlog(0, "sending to %u\n", send_to);
1300 total_locks = dlm_num_locks_in_lockres(res);
1301 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1302 /* rare, but possible */
1303 mlog(0, "argh. lockres has %d locks. this will "
1304 "require more than one network packet to "
1305 "migrate\n", total_locks);
1306 mig_cookie = dlm_get_next_mig_cookie();
1309 dlm_init_migratable_lockres(mres, res->lockname.name,
1310 res->lockname.len, total_locks,
1311 mig_cookie, flags, res->owner);
1314 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1315 queue = dlm_list_idx_to_ptr(res, i);
1316 list_for_each_entry(lock, queue, list) {
1317 /* add another lock. */
1319 if (!dlm_add_lock_to_array(lock, mres, i))
1322 /* this filled the lock message,
1323 * we must send it immediately. */
1324 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1330 if (total_locks == 0) {
1331 /* send a dummy lock to indicate a mastery reference only */
1332 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1333 dlm->name, res->lockname.len, res->lockname.name,
1334 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1336 dlm_add_dummy_lock(dlm, mres);
1338 /* flush any remaining locks */
1339 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1345 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1347 if (!dlm_is_host_down(ret))
1349 mlog(0, "%s: node %u went down while sending %s "
1350 "lockres %.*s\n", dlm->name, send_to,
1351 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1352 res->lockname.len, res->lockname.name);
1359 * this message will contain no more than one page worth of
1360 * recovery data, and it will work on only one lockres.
1361 * there may be many locks in this page, and we may need to wait
1362 * for additional packets to complete all the locks (rare, but
1366 * NOTE: the allocation error cases here are scary
1367 * we really cannot afford to fail an alloc in recovery
1368 * do we spin? returning an error only delays the problem really
1371 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1374 struct dlm_ctxt *dlm = data;
1375 struct dlm_migratable_lockres *mres =
1376 (struct dlm_migratable_lockres *)msg->buf;
1381 struct dlm_work_item *item = NULL;
1382 struct dlm_lock_resource *res = NULL;
1388 if (!dlm_joined(dlm)) {
1389 mlog(ML_ERROR, "Domain %s not joined! "
1390 "lockres %.*s, master %u\n",
1391 dlm->name, mres->lockname_len,
1392 mres->lockname, mres->master);
1397 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1399 real_master = mres->master;
1400 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1401 /* cannot migrate a lockres with no master */
1402 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1405 mlog(0, "%s message received from node %u\n",
1406 (mres->flags & DLM_MRES_RECOVERY) ?
1407 "recovery" : "migration", mres->master);
1408 if (mres->flags & DLM_MRES_ALL_DONE)
1409 mlog(0, "all done flag. all lockres data received!\n");
1412 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1413 item = kzalloc(sizeof(*item), GFP_NOFS);
1417 /* lookup the lock to see if we have a secondary queue for this
1418 * already... just add the locks in and this will have its owner
1419 * and RECOVERY flag changed when it completes. */
1420 hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
1421 spin_lock(&dlm->spinlock);
1422 res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
1425 /* this will get a ref on res */
1426 /* mark it as recovering/migrating and hash it */
1427 spin_lock(&res->spinlock);
1428 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
1429 mlog(0, "%s: node is attempting to migrate "
1430 "lockres %.*s, but marked as dropping "
1431 " ref!\n", dlm->name,
1432 mres->lockname_len, mres->lockname);
1434 spin_unlock(&res->spinlock);
1435 spin_unlock(&dlm->spinlock);
1436 dlm_lockres_put(res);
1440 if (mres->flags & DLM_MRES_RECOVERY) {
1441 res->state |= DLM_LOCK_RES_RECOVERING;
1443 if (res->state & DLM_LOCK_RES_MIGRATING) {
1444 /* this is at least the second
1445 * lockres message */
1446 mlog(0, "lock %.*s is already migrating\n",
1449 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1450 /* caller should BUG */
1451 mlog(ML_ERROR, "node is attempting to migrate "
1452 "lock %.*s, but marked as recovering!\n",
1453 mres->lockname_len, mres->lockname);
1455 spin_unlock(&res->spinlock);
1456 spin_unlock(&dlm->spinlock);
1457 dlm_lockres_put(res);
1460 res->state |= DLM_LOCK_RES_MIGRATING;
1462 spin_unlock(&res->spinlock);
1463 spin_unlock(&dlm->spinlock);
1465 spin_unlock(&dlm->spinlock);
1466 /* need to allocate, just like if it was
1467 * mastered here normally */
1468 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1472 /* to match the ref that we would have gotten if
1473 * dlm_lookup_lockres had succeeded */
1474 dlm_lockres_get(res);
1476 /* mark it as recovering/migrating and hash it */
1477 if (mres->flags & DLM_MRES_RECOVERY)
1478 res->state |= DLM_LOCK_RES_RECOVERING;
1480 res->state |= DLM_LOCK_RES_MIGRATING;
1482 spin_lock(&dlm->spinlock);
1483 __dlm_insert_lockres(dlm, res);
1484 spin_unlock(&dlm->spinlock);
1486 /* Add an extra ref for this lock-less lockres lest the
1487 * dlm_thread purges it before we get the chance to add
1489 dlm_lockres_get(res);
1491 /* There are three refs that need to be put.
1493 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1494 * 3. dlm_lookup_lockres()
1495 * The first one is handled at the end of this function. The
1496 * other two are handled in the worker thread after locks have
1497 * been attached. Yes, we don't wait for purge time to match
1498 * kref_init. The lockres will still have atleast one ref
1499 * added because it is in the hash __dlm_insert_lockres() */
1502 /* now that the new lockres is inserted,
1503 * make it usable by other processes */
1504 spin_lock(&res->spinlock);
1505 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1506 spin_unlock(&res->spinlock);
1510 /* at this point we have allocated everything we need,
1511 * and we have a hashed lockres with an extra ref and
1512 * the proper res->state flags. */
1514 spin_lock(&res->spinlock);
1515 /* drop this either when master requery finds a different master
1516 * or when a lock is added by the recovery worker */
1517 dlm_lockres_grab_inflight_ref(dlm, res);
1518 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1519 /* migration cannot have an unknown master */
1520 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1521 mlog(0, "recovery has passed me a lockres with an "
1522 "unknown owner.. will need to requery: "
1523 "%.*s\n", mres->lockname_len, mres->lockname);
1525 /* take a reference now to pin the lockres, drop it
1526 * when locks are added in the worker */
1527 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1529 spin_unlock(&res->spinlock);
1531 /* queue up work for dlm_mig_lockres_worker */
1532 dlm_grab(dlm); /* get an extra ref for the work item */
1533 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1534 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1535 item->u.ml.lockres = res; /* already have a ref */
1536 item->u.ml.real_master = real_master;
1537 item->u.ml.extra_ref = extra_refs;
1538 spin_lock(&dlm->work_lock);
1539 list_add_tail(&item->list, &dlm->work_list);
1540 spin_unlock(&dlm->work_lock);
1541 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1544 /* One extra ref taken needs to be put here */
1546 dlm_lockres_put(res);
1559 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1561 struct dlm_ctxt *dlm;
1562 struct dlm_migratable_lockres *mres;
1564 struct dlm_lock_resource *res;
1569 mres = (struct dlm_migratable_lockres *)data;
1571 res = item->u.ml.lockres;
1572 real_master = item->u.ml.real_master;
1573 extra_ref = item->u.ml.extra_ref;
1575 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1576 /* this case is super-rare. only occurs if
1577 * node death happens during migration. */
1579 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1581 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1585 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1586 mlog(0, "lockres %.*s not claimed. "
1587 "this node will take it.\n",
1588 res->lockname.len, res->lockname.name);
1590 spin_lock(&res->spinlock);
1591 dlm_lockres_drop_inflight_ref(dlm, res);
1592 spin_unlock(&res->spinlock);
1593 mlog(0, "master needs to respond to sender "
1594 "that node %u still owns %.*s\n",
1595 real_master, res->lockname.len,
1596 res->lockname.name);
1597 /* cannot touch this lockres */
1602 ret = dlm_process_recovery_data(dlm, res, mres);
1604 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1606 mlog(0, "dlm_process_recovery_data succeeded\n");
1608 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1609 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1610 ret = dlm_finish_migration(dlm, res, mres->master);
1616 /* See comment in dlm_mig_lockres_handler() */
1619 dlm_lockres_put(res);
1620 dlm_lockres_put(res);
1627 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1628 struct dlm_lock_resource *res,
1631 struct dlm_node_iter iter;
1635 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1637 /* we only reach here if one of the two nodes in a
1638 * migration died while the migration was in progress.
1639 * at this point we need to requery the master. we
1640 * know that the new_master got as far as creating
1641 * an mle on at least one node, but we do not know
1642 * if any nodes had actually cleared the mle and set
1643 * the master to the new_master. the old master
1644 * is supposed to set the owner to UNKNOWN in the
1645 * event of a new_master death, so the only possible
1646 * responses that we can get from nodes here are
1647 * that the master is new_master, or that the master
1649 * if all nodes come back with UNKNOWN then we know
1650 * the lock needs remastering here.
1651 * if any node comes back with a valid master, check
1652 * to see if that master is the one that we are
1653 * recovering. if so, then the new_master died and
1654 * we need to remaster this lock. if not, then the
1655 * new_master survived and that node will respond to
1656 * other nodes about the owner.
1657 * if there is an owner, this node needs to dump this
1658 * lockres and alert the sender that this lockres
1660 spin_lock(&dlm->spinlock);
1661 dlm_node_iter_init(dlm->domain_map, &iter);
1662 spin_unlock(&dlm->spinlock);
1664 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1665 /* do not send to self */
1666 if (nodenum == dlm->node_num)
1668 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1671 if (!dlm_is_host_down(ret))
1673 /* host is down, so answer for that node would be
1674 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1676 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1677 mlog(0, "lock master is %u\n", *real_master);
1685 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1686 u8 nodenum, u8 *real_master)
1689 struct dlm_master_requery req;
1690 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1692 memset(&req, 0, sizeof(req));
1693 req.node_idx = dlm->node_num;
1694 req.namelen = res->lockname.len;
1695 memcpy(req.name, res->lockname.name, res->lockname.len);
1698 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1699 &req, sizeof(req), nodenum, &status);
1701 mlog(ML_ERROR, "Error %d when sending message %u (key "
1702 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1704 else if (status == -ENOMEM) {
1710 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1711 *real_master = (u8) (status & 0xff);
1712 mlog(0, "node %u responded to master requery with %u\n",
1713 nodenum, *real_master);
1720 /* this function cannot error, so unless the sending
1721 * or receiving of the message failed, the owner can
1723 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1726 struct dlm_ctxt *dlm = data;
1727 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1728 struct dlm_lock_resource *res = NULL;
1730 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1731 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1734 if (!dlm_grab(dlm)) {
1735 /* since the domain has gone away on this
1736 * node, the proper response is UNKNOWN */
1740 hash = dlm_lockid_hash(req->name, req->namelen);
1742 spin_lock(&dlm->spinlock);
1743 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1745 spin_lock(&res->spinlock);
1746 master = res->owner;
1747 if (master == dlm->node_num) {
1748 int ret = dlm_dispatch_assert_master(dlm, res,
1752 spin_unlock(&res->spinlock);
1753 dlm_lockres_put(res);
1754 spin_unlock(&dlm->spinlock);
1756 /* sender will take care of this and retry */
1760 __dlm_lockres_grab_inflight_worker(dlm, res);
1761 spin_unlock(&res->spinlock);
1764 /* put.. incase we are not the master */
1765 spin_unlock(&res->spinlock);
1766 dlm_lockres_put(res);
1769 spin_unlock(&dlm->spinlock);
1776 static inline struct list_head *
1777 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1779 struct list_head *ret;
1780 BUG_ON(list_num < 0);
1781 BUG_ON(list_num > 2);
1782 ret = &(res->granted);
1786 /* TODO: do ast flush business
1787 * TODO: do MIGRATING and RECOVERING spinning
1791 * NOTE about in-flight requests during migration:
1793 * Before attempting the migrate, the master has marked the lockres as
1794 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1795 * requests either got queued before the MIGRATING flag got set, in which
1796 * case the lock data will reflect the change and a return message is on
1797 * the way, or the request failed to get in before MIGRATING got set. In
1798 * this case, the caller will be told to spin and wait for the MIGRATING
1799 * flag to be dropped, then recheck the master.
1800 * This holds true for the convert, cancel and unlock cases, and since lvb
1801 * updates are tied to these same messages, it applies to lvb updates as
1802 * well. For the lock case, there is no way a lock can be on the master
1803 * queue and not be on the secondary queue since the lock is always added
1804 * locally first. This means that the new target node will never be sent
1805 * a lock that he doesn't already have on the list.
1806 * In total, this means that the local lock is correct and should not be
1807 * updated to match the one sent by the master. Any messages sent back
1808 * from the master before the MIGRATING flag will bring the lock properly
1809 * up-to-date, and the change will be ordered properly for the waiter.
1810 * We will *not* attempt to modify the lock underneath the waiter.
1813 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1814 struct dlm_lock_resource *res,
1815 struct dlm_migratable_lockres *mres)
1817 struct dlm_migratable_lock *ml;
1818 struct list_head *queue, *iter;
1819 struct list_head *tmpq = NULL;
1820 struct dlm_lock *newlock = NULL;
1821 struct dlm_lockstatus *lksb = NULL;
1824 struct dlm_lock *lock;
1825 u8 from = O2NM_MAX_NODES;
1828 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1829 for (i=0; i<mres->num_locks; i++) {
1830 ml = &(mres->ml[i]);
1832 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1833 /* placeholder, just need to set the refmap bit */
1834 BUG_ON(mres->num_locks != 1);
1835 mlog(0, "%s:%.*s: dummy lock for %u\n",
1836 dlm->name, mres->lockname_len, mres->lockname,
1838 spin_lock(&res->spinlock);
1839 dlm_lockres_set_refmap_bit(dlm, res, from);
1840 spin_unlock(&res->spinlock);
1843 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1847 queue = dlm_list_num_to_pointer(res, ml->list);
1850 /* if the lock is for the local node it needs to
1851 * be moved to the proper location within the queue.
1852 * do not allocate a new lock structure. */
1853 if (ml->node == dlm->node_num) {
1854 /* MIGRATION ONLY! */
1855 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1858 spin_lock(&res->spinlock);
1859 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1860 tmpq = dlm_list_idx_to_ptr(res, j);
1861 list_for_each(iter, tmpq) {
1862 lock = list_entry(iter,
1863 struct dlm_lock, list);
1864 if (lock->ml.cookie == ml->cookie)
1872 /* lock is always created locally first, and
1873 * destroyed locally last. it must be on the list */
1876 mlog(ML_ERROR, "Could not find local lock "
1877 "with cookie %u:%llu, node %u, "
1878 "list %u, flags 0x%x, type %d, "
1879 "conv %d, highest blocked %d\n",
1880 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1881 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1882 ml->node, ml->list, ml->flags, ml->type,
1883 ml->convert_type, ml->highest_blocked);
1884 __dlm_print_one_lock_resource(res);
1888 if (lock->ml.node != ml->node) {
1889 c = lock->ml.cookie;
1890 mlog(ML_ERROR, "Mismatched node# in lock "
1891 "cookie %u:%llu, name %.*s, node %u\n",
1892 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1893 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1894 res->lockname.len, res->lockname.name,
1897 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1898 "node %u, list %u, flags 0x%x, type %d, "
1899 "conv %d, highest blocked %d\n",
1900 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1901 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1902 ml->node, ml->list, ml->flags, ml->type,
1903 ml->convert_type, ml->highest_blocked);
1904 __dlm_print_one_lock_resource(res);
1908 if (tmpq != queue) {
1910 mlog(0, "Lock cookie %u:%llu was on list %u "
1911 "instead of list %u for %.*s\n",
1912 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1913 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1914 j, ml->list, res->lockname.len,
1915 res->lockname.name);
1916 __dlm_print_one_lock_resource(res);
1917 spin_unlock(&res->spinlock);
1921 /* see NOTE above about why we do not update
1922 * to match the master here */
1924 /* move the lock to its proper place */
1925 /* do not alter lock refcount. switching lists. */
1926 list_move_tail(&lock->list, queue);
1927 spin_unlock(&res->spinlock);
1929 mlog(0, "just reordered a local lock!\n");
1933 /* lock is for another node. */
1934 newlock = dlm_new_lock(ml->type, ml->node,
1935 be64_to_cpu(ml->cookie), NULL);
1940 lksb = newlock->lksb;
1941 dlm_lock_attach_lockres(newlock, res);
1943 if (ml->convert_type != LKM_IVMODE) {
1944 BUG_ON(queue != &res->converting);
1945 newlock->ml.convert_type = ml->convert_type;
1947 lksb->flags |= (ml->flags &
1948 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1950 if (ml->type == LKM_NLMODE)
1954 * If the lock is in the blocked list it can't have a valid lvb,
1957 if (ml->list == DLM_BLOCKED_LIST)
1960 if (!dlm_lvb_is_empty(mres->lvb)) {
1961 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1962 /* other node was trying to update
1963 * lvb when node died. recreate the
1964 * lksb with the updated lvb. */
1965 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1966 /* the lock resource lvb update must happen
1967 * NOW, before the spinlock is dropped.
1968 * we no longer wait for the AST to update
1970 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1972 /* otherwise, the node is sending its
1973 * most recent valid lvb info */
1974 BUG_ON(ml->type != LKM_EXMODE &&
1975 ml->type != LKM_PRMODE);
1976 if (!dlm_lvb_is_empty(res->lvb) &&
1977 (ml->type == LKM_EXMODE ||
1978 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1980 mlog(ML_ERROR, "%s:%.*s: received bad "
1981 "lvb! type=%d\n", dlm->name,
1983 res->lockname.name, ml->type);
1984 printk("lockres lvb=[");
1985 for (i=0; i<DLM_LVB_LEN; i++)
1986 printk("%02x", res->lvb[i]);
1987 printk("]\nmigrated lvb=[");
1988 for (i=0; i<DLM_LVB_LEN; i++)
1989 printk("%02x", mres->lvb[i]);
1991 dlm_print_one_lock_resource(res);
1994 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
2000 * wrt lock queue ordering and recovery:
2001 * 1. order of locks on granted queue is
2003 * 2. order of locks on converting queue is
2004 * LOST with the node death. sorry charlie.
2005 * 3. order of locks on the blocked queue is
2007 * order of locks does not affect integrity, it
2008 * just means that a lock request may get pushed
2009 * back in line as a result of the node death.
2010 * also note that for a given node the lock order
2011 * for its secondary queue locks is preserved
2012 * relative to each other, but clearly *not*
2013 * preserved relative to locks from other nodes.
2016 spin_lock(&res->spinlock);
2017 list_for_each_entry(lock, queue, list) {
2018 if (lock->ml.cookie == ml->cookie) {
2019 c = lock->ml.cookie;
2020 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
2021 "exists on this lockres!\n", dlm->name,
2022 res->lockname.len, res->lockname.name,
2023 dlm_get_lock_cookie_node(be64_to_cpu(c)),
2024 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
2026 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
2027 "node=%u, cookie=%u:%llu, queue=%d\n",
2028 ml->type, ml->convert_type, ml->node,
2029 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
2030 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
2033 __dlm_print_one_lock_resource(res);
2039 dlm_lock_get(newlock);
2040 if (mres->flags & DLM_MRES_RECOVERY &&
2041 ml->list == DLM_CONVERTING_LIST &&
2043 newlock->ml.convert_type) {
2044 /* newlock is doing downconvert, add it to the
2045 * head of converting list */
2046 list_add(&newlock->list, queue);
2048 list_add_tail(&newlock->list, queue);
2049 mlog(0, "%s:%.*s: added lock for node %u, "
2050 "setting refmap bit\n", dlm->name,
2051 res->lockname.len, res->lockname.name, ml->node);
2052 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
2054 spin_unlock(&res->spinlock);
2056 mlog(0, "done running all the locks\n");
2059 /* balance the ref taken when the work was queued */
2060 spin_lock(&res->spinlock);
2061 dlm_lockres_drop_inflight_ref(dlm, res);
2062 spin_unlock(&res->spinlock);
2070 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
2071 struct dlm_lock_resource *res)
2074 struct list_head *queue;
2075 struct dlm_lock *lock, *next;
2077 assert_spin_locked(&dlm->spinlock);
2078 assert_spin_locked(&res->spinlock);
2079 res->state |= DLM_LOCK_RES_RECOVERING;
2080 if (!list_empty(&res->recovering)) {
2082 "Recovering res %s:%.*s, is already on recovery list!\n",
2083 dlm->name, res->lockname.len, res->lockname.name);
2084 list_del_init(&res->recovering);
2085 dlm_lockres_put(res);
2087 /* We need to hold a reference while on the recovery list */
2088 dlm_lockres_get(res);
2089 list_add_tail(&res->recovering, &dlm->reco.resources);
2091 /* find any pending locks and put them back on proper list */
2092 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2093 queue = dlm_list_idx_to_ptr(res, i);
2094 list_for_each_entry_safe(lock, next, queue, list) {
2096 if (lock->convert_pending) {
2097 /* move converting lock back to granted */
2098 mlog(0, "node died with convert pending "
2099 "on %.*s. move back to granted list.\n",
2100 res->lockname.len, res->lockname.name);
2101 dlm_revert_pending_convert(res, lock);
2102 lock->convert_pending = 0;
2103 } else if (lock->lock_pending) {
2104 /* remove pending lock requests completely */
2105 BUG_ON(i != DLM_BLOCKED_LIST);
2106 mlog(0, "node died with lock pending "
2107 "on %.*s. remove from blocked list and skip.\n",
2108 res->lockname.len, res->lockname.name);
2109 /* lock will be floating until ref in
2110 * dlmlock_remote is freed after the network
2111 * call returns. ok for it to not be on any
2112 * list since no ast can be called
2113 * (the master is dead). */
2114 dlm_revert_pending_lock(res, lock);
2115 lock->lock_pending = 0;
2116 } else if (lock->unlock_pending) {
2117 /* if an unlock was in progress, treat as
2118 * if this had completed successfully
2119 * before sending this lock state to the
2120 * new master. note that the dlm_unlock
2121 * call is still responsible for calling
2122 * the unlockast. that will happen after
2123 * the network call times out. for now,
2124 * just move lists to prepare the new
2125 * recovery master. */
2126 BUG_ON(i != DLM_GRANTED_LIST);
2127 mlog(0, "node died with unlock pending "
2128 "on %.*s. remove from blocked list and skip.\n",
2129 res->lockname.len, res->lockname.name);
2130 dlm_commit_pending_unlock(res, lock);
2131 lock->unlock_pending = 0;
2132 } else if (lock->cancel_pending) {
2133 /* if a cancel was in progress, treat as
2134 * if this had completed successfully
2135 * before sending this lock state to the
2137 BUG_ON(i != DLM_CONVERTING_LIST);
2138 mlog(0, "node died with cancel pending "
2139 "on %.*s. move back to granted list.\n",
2140 res->lockname.len, res->lockname.name);
2141 dlm_commit_pending_cancel(res, lock);
2142 lock->cancel_pending = 0;
2151 /* removes all recovered locks from the recovery list.
2152 * sets the res->owner to the new master.
2153 * unsets the RECOVERY flag and wakes waiters. */
2154 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2155 u8 dead_node, u8 new_master)
2158 struct hlist_head *bucket;
2159 struct dlm_lock_resource *res, *next;
2161 assert_spin_locked(&dlm->spinlock);
2163 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2164 if (res->owner == dead_node) {
2165 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2166 dlm->name, res->lockname.len, res->lockname.name,
2167 res->owner, new_master);
2168 list_del_init(&res->recovering);
2169 spin_lock(&res->spinlock);
2170 /* new_master has our reference from
2171 * the lock state sent during recovery */
2172 dlm_change_lockres_owner(dlm, res, new_master);
2173 res->state &= ~DLM_LOCK_RES_RECOVERING;
2174 if (__dlm_lockres_has_locks(res))
2175 __dlm_dirty_lockres(dlm, res);
2176 spin_unlock(&res->spinlock);
2178 dlm_lockres_put(res);
2182 /* this will become unnecessary eventually, but
2183 * for now we need to run the whole hash, clear
2184 * the RECOVERING state and set the owner
2186 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2187 bucket = dlm_lockres_hash(dlm, i);
2188 hlist_for_each_entry(res, bucket, hash_node) {
2189 if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
2190 spin_lock(&res->spinlock);
2191 res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
2192 spin_unlock(&res->spinlock);
2196 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2199 if (res->owner != dead_node &&
2200 res->owner != dlm->node_num)
2203 if (!list_empty(&res->recovering)) {
2204 list_del_init(&res->recovering);
2205 dlm_lockres_put(res);
2208 /* new_master has our reference from
2209 * the lock state sent during recovery */
2210 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2211 dlm->name, res->lockname.len, res->lockname.name,
2212 res->owner, new_master);
2213 spin_lock(&res->spinlock);
2214 dlm_change_lockres_owner(dlm, res, new_master);
2215 res->state &= ~DLM_LOCK_RES_RECOVERING;
2216 if (__dlm_lockres_has_locks(res))
2217 __dlm_dirty_lockres(dlm, res);
2218 spin_unlock(&res->spinlock);
2224 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2227 if (lock->ml.type != LKM_EXMODE &&
2228 lock->ml.type != LKM_PRMODE)
2230 } else if (lock->ml.type == LKM_EXMODE)
2235 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2236 struct dlm_lock_resource *res, u8 dead_node)
2238 struct list_head *queue;
2239 struct dlm_lock *lock;
2240 int blank_lvb = 0, local = 0;
2244 assert_spin_locked(&dlm->spinlock);
2245 assert_spin_locked(&res->spinlock);
2247 if (res->owner == dlm->node_num)
2248 /* if this node owned the lockres, and if the dead node
2249 * had an EX when he died, blank out the lvb */
2250 search_node = dead_node;
2252 /* if this is a secondary lockres, and we had no EX or PR
2253 * locks granted, we can no longer trust the lvb */
2254 search_node = dlm->node_num;
2255 local = 1; /* check local state for valid lvb */
2258 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2259 queue = dlm_list_idx_to_ptr(res, i);
2260 list_for_each_entry(lock, queue, list) {
2261 if (lock->ml.node == search_node) {
2262 if (dlm_lvb_needs_invalidation(lock, local)) {
2263 /* zero the lksb lvb and lockres lvb */
2265 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2272 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2273 res->lockname.len, res->lockname.name, dead_node);
2274 memset(res->lvb, 0, DLM_LVB_LEN);
2278 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2279 struct dlm_lock_resource *res, u8 dead_node)
2281 struct dlm_lock *lock, *next;
2282 unsigned int freed = 0;
2284 /* this node is the lockres master:
2285 * 1) remove any stale locks for the dead node
2286 * 2) if the dead node had an EX when he died, blank out the lvb
2288 assert_spin_locked(&dlm->spinlock);
2289 assert_spin_locked(&res->spinlock);
2291 /* We do two dlm_lock_put(). One for removing from list and the other is
2292 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2294 /* TODO: check pending_asts, pending_basts here */
2295 list_for_each_entry_safe(lock, next, &res->granted, list) {
2296 if (lock->ml.node == dead_node) {
2297 list_del_init(&lock->list);
2299 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2304 list_for_each_entry_safe(lock, next, &res->converting, list) {
2305 if (lock->ml.node == dead_node) {
2306 list_del_init(&lock->list);
2308 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2313 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2314 if (lock->ml.node == dead_node) {
2315 list_del_init(&lock->list);
2317 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2324 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2325 "dropping ref from lockres\n", dlm->name,
2326 res->lockname.len, res->lockname.name, freed, dead_node);
2327 if(!test_bit(dead_node, res->refmap)) {
2328 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2329 "but ref was not set\n", dlm->name,
2330 res->lockname.len, res->lockname.name, freed, dead_node);
2331 __dlm_print_one_lock_resource(res);
2333 res->state |= DLM_LOCK_RES_RECOVERY_WAITING;
2334 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2335 } else if (test_bit(dead_node, res->refmap)) {
2336 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2337 "no locks and had not purged before dying\n", dlm->name,
2338 res->lockname.len, res->lockname.name, dead_node);
2339 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2342 /* do not kick thread yet */
2343 __dlm_dirty_lockres(dlm, res);
2346 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2348 struct dlm_lock_resource *res;
2350 struct hlist_head *bucket;
2351 struct hlist_node *tmp;
2352 struct dlm_lock *lock;
2355 /* purge any stale mles */
2356 dlm_clean_master_list(dlm, dead_node);
2359 * now clean up all lock resources. there are two rules:
2361 * 1) if the dead node was the master, move the lockres
2362 * to the recovering list. set the RECOVERING flag.
2363 * this lockres needs to be cleaned up before it can
2366 * 2) if this node was the master, remove all locks from
2367 * each of the lockres queues that were owned by the
2368 * dead node. once recovery finishes, the dlm thread
2369 * can be kicked again to see if any ASTs or BASTs
2370 * need to be fired as a result.
2372 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2373 bucket = dlm_lockres_hash(dlm, i);
2374 hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
2375 /* always prune any $RECOVERY entries for dead nodes,
2376 * otherwise hangs can occur during later recovery */
2377 if (dlm_is_recovery_lock(res->lockname.name,
2378 res->lockname.len)) {
2379 spin_lock(&res->spinlock);
2380 list_for_each_entry(lock, &res->granted, list) {
2381 if (lock->ml.node == dead_node) {
2382 mlog(0, "AHA! there was "
2383 "a $RECOVERY lock for dead "
2385 dead_node, dlm->name);
2386 list_del_init(&lock->list);
2389 * DLM_UNLOCK_FREE_LOCK
2396 if ((res->owner == dead_node) &&
2397 (res->state & DLM_LOCK_RES_DROPPING_REF)) {
2398 dlm_lockres_get(res);
2399 __dlm_do_purge_lockres(dlm, res);
2400 spin_unlock(&res->spinlock);
2402 dlm_lockres_put(res);
2404 } else if (res->owner == dlm->node_num)
2405 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2406 spin_unlock(&res->spinlock);
2409 spin_lock(&res->spinlock);
2410 /* zero the lvb if necessary */
2411 dlm_revalidate_lvb(dlm, res, dead_node);
2412 if (res->owner == dead_node) {
2413 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2414 mlog(0, "%s:%.*s: owned by "
2415 "dead node %u, this node was "
2416 "dropping its ref when master died. "
2417 "continue, purging the lockres.\n",
2418 dlm->name, res->lockname.len,
2419 res->lockname.name, dead_node);
2420 dlm_lockres_get(res);
2421 __dlm_do_purge_lockres(dlm, res);
2422 spin_unlock(&res->spinlock);
2424 dlm_lockres_put(res);
2427 dlm_move_lockres_to_recovery_list(dlm, res);
2428 } else if (res->owner == dlm->node_num) {
2429 dlm_free_dead_locks(dlm, res, dead_node);
2430 __dlm_lockres_calc_usage(dlm, res);
2431 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2432 if (test_bit(dead_node, res->refmap)) {
2433 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2434 "no locks and had not purged before dying\n",
2435 dlm->name, res->lockname.len,
2436 res->lockname.name, dead_node);
2437 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2440 spin_unlock(&res->spinlock);
2446 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2448 assert_spin_locked(&dlm->spinlock);
2450 if (dlm->reco.new_master == idx) {
2451 mlog(0, "%s: recovery master %d just died\n",
2453 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2454 /* finalize1 was reached, so it is safe to clear
2455 * the new_master and dead_node. that recovery
2457 mlog(0, "%s: dead master %d had reached "
2458 "finalize1 state, clearing\n", dlm->name, idx);
2459 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2460 __dlm_reset_recovery(dlm);
2464 /* Clean up join state on node death. */
2465 if (dlm->joining_node == idx) {
2466 mlog(0, "Clearing join state for node %u\n", idx);
2467 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2470 /* check to see if the node is already considered dead */
2471 if (!test_bit(idx, dlm->live_nodes_map)) {
2472 mlog(0, "for domain %s, node %d is already dead. "
2473 "another node likely did recovery already.\n",
2478 /* check to see if we do not care about this node */
2479 if (!test_bit(idx, dlm->domain_map)) {
2480 /* This also catches the case that we get a node down
2481 * but haven't joined the domain yet. */
2482 mlog(0, "node %u already removed from domain!\n", idx);
2486 clear_bit(idx, dlm->live_nodes_map);
2488 /* make sure local cleanup occurs before the heartbeat events */
2489 if (!test_bit(idx, dlm->recovery_map))
2490 dlm_do_local_recovery_cleanup(dlm, idx);
2492 /* notify anything attached to the heartbeat events */
2493 dlm_hb_event_notify_attached(dlm, idx, 0);
2495 mlog(0, "node %u being removed from domain map!\n", idx);
2496 clear_bit(idx, dlm->domain_map);
2497 clear_bit(idx, dlm->exit_domain_map);
2498 /* wake up migration waiters if a node goes down.
2499 * perhaps later we can genericize this for other waiters. */
2500 wake_up(&dlm->migration_wq);
2502 set_bit(idx, dlm->recovery_map);
2505 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2507 struct dlm_ctxt *dlm = data;
2513 * This will notify any dlm users that a node in our domain
2514 * went away without notifying us first.
2516 if (test_bit(idx, dlm->domain_map))
2517 dlm_fire_domain_eviction_callbacks(dlm, idx);
2519 spin_lock(&dlm->spinlock);
2520 __dlm_hb_node_down(dlm, idx);
2521 spin_unlock(&dlm->spinlock);
2526 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2528 struct dlm_ctxt *dlm = data;
2533 spin_lock(&dlm->spinlock);
2534 set_bit(idx, dlm->live_nodes_map);
2535 /* do NOT notify mle attached to the heartbeat events.
2536 * new nodes are not interesting in mastery until joined. */
2537 spin_unlock(&dlm->spinlock);
2542 static void dlm_reco_ast(void *astdata)
2544 struct dlm_ctxt *dlm = astdata;
2545 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2546 dlm->node_num, dlm->name);
2548 static void dlm_reco_bast(void *astdata, int blocked_type)
2550 struct dlm_ctxt *dlm = astdata;
2551 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2552 dlm->node_num, dlm->name);
2554 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2556 mlog(0, "unlockast for recovery lock fired!\n");
2560 * dlm_pick_recovery_master will continually attempt to use
2561 * dlmlock() on the special "$RECOVERY" lockres with the
2562 * LKM_NOQUEUE flag to get an EX. every thread that enters
2563 * this function on each node racing to become the recovery
2564 * master will not stop attempting this until either:
2565 * a) this node gets the EX (and becomes the recovery master),
2566 * or b) dlm->reco.new_master gets set to some nodenum
2567 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2568 * so each time a recovery master is needed, the entire cluster
2569 * will sync at this point. if the new master dies, that will
2570 * be detected in dlm_do_recovery */
2571 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2573 enum dlm_status ret;
2574 struct dlm_lockstatus lksb;
2575 int status = -EINVAL;
2577 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2578 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2580 memset(&lksb, 0, sizeof(lksb));
2582 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2583 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2584 dlm_reco_ast, dlm, dlm_reco_bast);
2586 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2587 dlm->name, ret, lksb.status);
2589 if (ret == DLM_NORMAL) {
2590 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2591 dlm->name, dlm->node_num);
2593 /* got the EX lock. check to see if another node
2594 * just became the reco master */
2595 if (dlm_reco_master_ready(dlm)) {
2596 mlog(0, "%s: got reco EX lock, but %u will "
2597 "do the recovery\n", dlm->name,
2598 dlm->reco.new_master);
2603 /* see if recovery was already finished elsewhere */
2604 spin_lock(&dlm->spinlock);
2605 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2607 mlog(0, "%s: got reco EX lock, but "
2608 "node got recovered already\n", dlm->name);
2609 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2610 mlog(ML_ERROR, "%s: new master is %u "
2611 "but no dead node!\n",
2612 dlm->name, dlm->reco.new_master);
2616 spin_unlock(&dlm->spinlock);
2619 /* if this node has actually become the recovery master,
2620 * set the master and send the messages to begin recovery */
2622 mlog(0, "%s: dead=%u, this=%u, sending "
2623 "begin_reco now\n", dlm->name,
2624 dlm->reco.dead_node, dlm->node_num);
2625 status = dlm_send_begin_reco_message(dlm,
2626 dlm->reco.dead_node);
2627 /* this always succeeds */
2630 /* set the new_master to this node */
2631 spin_lock(&dlm->spinlock);
2632 dlm_set_reco_master(dlm, dlm->node_num);
2633 spin_unlock(&dlm->spinlock);
2636 /* recovery lock is a special case. ast will not get fired,
2637 * so just go ahead and unlock it. */
2638 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2639 if (ret == DLM_DENIED) {
2640 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2641 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2643 if (ret != DLM_NORMAL) {
2644 /* this would really suck. this could only happen
2645 * if there was a network error during the unlock
2646 * because of node death. this means the unlock
2647 * is actually "done" and the lock structure is
2648 * even freed. we can continue, but only
2649 * because this specific lock name is special. */
2650 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2652 } else if (ret == DLM_NOTQUEUED) {
2653 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2654 dlm->name, dlm->node_num);
2655 /* another node is master. wait on
2656 * reco.new_master != O2NM_INVALID_NODE_NUM
2657 * for at most one second */
2658 wait_event_timeout(dlm->dlm_reco_thread_wq,
2659 dlm_reco_master_ready(dlm),
2660 msecs_to_jiffies(1000));
2661 if (!dlm_reco_master_ready(dlm)) {
2662 mlog(0, "%s: reco master taking awhile\n",
2666 /* another node has informed this one that it is reco master */
2667 mlog(0, "%s: reco master %u is ready to recover %u\n",
2668 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2670 } else if (ret == DLM_RECOVERING) {
2671 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2672 dlm->name, dlm->node_num);
2675 struct dlm_lock_resource *res;
2677 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2678 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2679 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2680 dlm_errname(lksb.status));
2681 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2682 DLM_RECOVERY_LOCK_NAME_LEN);
2684 dlm_print_one_lock_resource(res);
2685 dlm_lockres_put(res);
2687 mlog(ML_ERROR, "recovery lock not found\n");
2695 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2697 struct dlm_begin_reco br;
2699 struct dlm_node_iter iter;
2703 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2705 spin_lock(&dlm->spinlock);
2706 dlm_node_iter_init(dlm->domain_map, &iter);
2707 spin_unlock(&dlm->spinlock);
2709 clear_bit(dead_node, iter.node_map);
2711 memset(&br, 0, sizeof(br));
2712 br.node_idx = dlm->node_num;
2713 br.dead_node = dead_node;
2715 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2717 if (nodenum == dead_node) {
2718 mlog(0, "not sending begin reco to dead node "
2722 if (nodenum == dlm->node_num) {
2723 mlog(0, "not sending begin reco to self\n");
2728 mlog(0, "attempting to send begin reco msg to %d\n",
2730 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2731 &br, sizeof(br), nodenum, &status);
2732 /* negative status is handled ok by caller here */
2735 if (dlm_is_host_down(ret)) {
2736 /* node is down. not involved in recovery
2737 * so just keep going */
2738 mlog(ML_NOTICE, "%s: node %u was down when sending "
2739 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2744 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2745 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2746 * We are handling both for compatibility reasons.
2748 if (ret == -EAGAIN || ret == EAGAIN) {
2749 mlog(0, "%s: trying to start recovery of node "
2750 "%u, but node %u is waiting for last recovery "
2751 "to complete, backoff for a bit\n", dlm->name,
2752 dead_node, nodenum);
2757 struct dlm_lock_resource *res;
2759 /* this is now a serious problem, possibly ENOMEM
2760 * in the network stack. must retry */
2762 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2763 "returned %d\n", dlm->name, nodenum, ret);
2764 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2765 DLM_RECOVERY_LOCK_NAME_LEN);
2767 dlm_print_one_lock_resource(res);
2768 dlm_lockres_put(res);
2770 mlog(ML_ERROR, "recovery lock not found\n");
2772 /* sleep for a bit in hopes that we can avoid
2782 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2785 struct dlm_ctxt *dlm = data;
2786 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2788 /* ok to return 0, domain has gone away */
2792 spin_lock(&dlm->spinlock);
2793 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2794 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2795 "but this node is in finalize state, waiting on finalize2\n",
2796 dlm->name, br->node_idx, br->dead_node,
2797 dlm->reco.dead_node, dlm->reco.new_master);
2798 spin_unlock(&dlm->spinlock);
2802 spin_unlock(&dlm->spinlock);
2804 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2805 dlm->name, br->node_idx, br->dead_node,
2806 dlm->reco.dead_node, dlm->reco.new_master);
2808 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2810 spin_lock(&dlm->spinlock);
2811 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2812 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2813 mlog(0, "%s: new_master %u died, changing "
2814 "to %u\n", dlm->name, dlm->reco.new_master,
2817 mlog(0, "%s: new_master %u NOT DEAD, changing "
2818 "to %u\n", dlm->name, dlm->reco.new_master,
2820 /* may not have seen the new master as dead yet */
2823 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2824 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2825 "node %u changing it to %u\n", dlm->name,
2826 dlm->reco.dead_node, br->node_idx, br->dead_node);
2828 dlm_set_reco_master(dlm, br->node_idx);
2829 dlm_set_reco_dead_node(dlm, br->dead_node);
2830 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2831 mlog(0, "recovery master %u sees %u as dead, but this "
2832 "node has not yet. marking %u as dead\n",
2833 br->node_idx, br->dead_node, br->dead_node);
2834 if (!test_bit(br->dead_node, dlm->domain_map) ||
2835 !test_bit(br->dead_node, dlm->live_nodes_map))
2836 mlog(0, "%u not in domain/live_nodes map "
2837 "so setting it in reco map manually\n",
2839 /* force the recovery cleanup in __dlm_hb_node_down
2840 * both of these will be cleared in a moment */
2841 set_bit(br->dead_node, dlm->domain_map);
2842 set_bit(br->dead_node, dlm->live_nodes_map);
2843 __dlm_hb_node_down(dlm, br->dead_node);
2845 spin_unlock(&dlm->spinlock);
2847 dlm_kick_recovery_thread(dlm);
2849 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2850 dlm->name, br->node_idx, br->dead_node,
2851 dlm->reco.dead_node, dlm->reco.new_master);
2857 #define DLM_FINALIZE_STAGE2 0x01
2858 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2861 struct dlm_finalize_reco fr;
2862 struct dlm_node_iter iter;
2867 mlog(0, "finishing recovery for node %s:%u, "
2868 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2870 spin_lock(&dlm->spinlock);
2871 dlm_node_iter_init(dlm->domain_map, &iter);
2872 spin_unlock(&dlm->spinlock);
2875 memset(&fr, 0, sizeof(fr));
2876 fr.node_idx = dlm->node_num;
2877 fr.dead_node = dlm->reco.dead_node;
2879 fr.flags |= DLM_FINALIZE_STAGE2;
2881 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2882 if (nodenum == dlm->node_num)
2884 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2885 &fr, sizeof(fr), nodenum, &status);
2889 mlog(ML_ERROR, "Error %d when sending message %u (key "
2890 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2892 if (dlm_is_host_down(ret)) {
2893 /* this has no effect on this recovery
2894 * session, so set the status to zero to
2895 * finish out the last recovery */
2896 mlog(ML_ERROR, "node %u went down after this "
2897 "node finished recovery.\n", nodenum);
2905 /* reset the node_iter back to the top and send finalize2 */
2914 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2917 struct dlm_ctxt *dlm = data;
2918 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2921 /* ok to return 0, domain has gone away */
2925 if (fr->flags & DLM_FINALIZE_STAGE2)
2928 mlog(0, "%s: node %u finalizing recovery stage%d of "
2929 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2930 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2932 spin_lock(&dlm->spinlock);
2934 if (dlm->reco.new_master != fr->node_idx) {
2935 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2936 "%u is supposed to be the new master, dead=%u\n",
2937 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2940 if (dlm->reco.dead_node != fr->dead_node) {
2941 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2942 "node %u, but node %u is supposed to be dead\n",
2943 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2949 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2950 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2951 mlog(ML_ERROR, "%s: received finalize1 from "
2952 "new master %u for dead node %u, but "
2953 "this node has already received it!\n",
2954 dlm->name, fr->node_idx, fr->dead_node);
2955 dlm_print_reco_node_status(dlm);
2958 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2959 spin_unlock(&dlm->spinlock);
2962 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2963 mlog(ML_ERROR, "%s: received finalize2 from "
2964 "new master %u for dead node %u, but "
2965 "this node did not have finalize1!\n",
2966 dlm->name, fr->node_idx, fr->dead_node);
2967 dlm_print_reco_node_status(dlm);
2970 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2971 __dlm_reset_recovery(dlm);
2972 spin_unlock(&dlm->spinlock);
2973 dlm_kick_recovery_thread(dlm);
2977 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2978 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);