1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmrecovery.c
5 *
6 * recovery stuff
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
42
43
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
51
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53 #include "cluster/masklog.h"
54
55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
56
57 static int dlm_recovery_thread(void *data);
58 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
59 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
60 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
61 static int dlm_do_recovery(struct dlm_ctxt *dlm);
62
63 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
64 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
65 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
67 u8 request_from, u8 dead_node);
68 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
69
70 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
71 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
72 const char *lockname, int namelen,
73 int total_locks, u64 cookie,
74 u8 flags, u8 master);
75 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
76 struct dlm_migratable_lockres *mres,
77 u8 send_to,
78 struct dlm_lock_resource *res,
79 int total_locks);
80 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
81 struct dlm_lock_resource *res,
82 struct dlm_migratable_lockres *mres);
83 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
84 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
85 u8 dead_node, u8 send_to);
86 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
87 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
88 struct list_head *list, u8 dead_node);
89 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
90 u8 dead_node, u8 new_master);
91 static void dlm_reco_ast(void *astdata);
92 static void dlm_reco_bast(void *astdata, int blocked_type);
93 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
94 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
95 void *data);
96 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
97 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
98 struct dlm_lock_resource *res,
99 u8 *real_master);
100
101 static u64 dlm_get_next_mig_cookie(void);
102
103 static DEFINE_SPINLOCK(dlm_reco_state_lock);
104 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
105 static u64 dlm_mig_cookie = 1;
106
dlm_get_next_mig_cookie(void)107 static u64 dlm_get_next_mig_cookie(void)
108 {
109 u64 c;
110 spin_lock(&dlm_mig_cookie_lock);
111 c = dlm_mig_cookie;
112 if (dlm_mig_cookie == (~0ULL))
113 dlm_mig_cookie = 1;
114 else
115 dlm_mig_cookie++;
116 spin_unlock(&dlm_mig_cookie_lock);
117 return c;
118 }
119
dlm_set_reco_dead_node(struct dlm_ctxt * dlm,u8 dead_node)120 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
121 u8 dead_node)
122 {
123 assert_spin_locked(&dlm->spinlock);
124 if (dlm->reco.dead_node != dead_node)
125 mlog(0, "%s: changing dead_node from %u to %u\n",
126 dlm->name, dlm->reco.dead_node, dead_node);
127 dlm->reco.dead_node = dead_node;
128 }
129
dlm_set_reco_master(struct dlm_ctxt * dlm,u8 master)130 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
131 u8 master)
132 {
133 assert_spin_locked(&dlm->spinlock);
134 mlog(0, "%s: changing new_master from %u to %u\n",
135 dlm->name, dlm->reco.new_master, master);
136 dlm->reco.new_master = master;
137 }
138
__dlm_reset_recovery(struct dlm_ctxt * dlm)139 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
140 {
141 assert_spin_locked(&dlm->spinlock);
142 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
143 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
144 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
145 }
146
dlm_reset_recovery(struct dlm_ctxt * dlm)147 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
148 {
149 spin_lock(&dlm->spinlock);
150 __dlm_reset_recovery(dlm);
151 spin_unlock(&dlm->spinlock);
152 }
153
154 /* Worker function used during recovery. */
dlm_dispatch_work(struct work_struct * work)155 void dlm_dispatch_work(struct work_struct *work)
156 {
157 struct dlm_ctxt *dlm =
158 container_of(work, struct dlm_ctxt, dispatched_work);
159 LIST_HEAD(tmp_list);
160 struct dlm_work_item *item, *next;
161 dlm_workfunc_t *workfunc;
162 int tot=0;
163
164 spin_lock(&dlm->work_lock);
165 list_splice_init(&dlm->work_list, &tmp_list);
166 spin_unlock(&dlm->work_lock);
167
168 list_for_each_entry(item, &tmp_list, list) {
169 tot++;
170 }
171 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
172
173 list_for_each_entry_safe(item, next, &tmp_list, list) {
174 workfunc = item->func;
175 list_del_init(&item->list);
176
177 /* already have ref on dlm to avoid having
178 * it disappear. just double-check. */
179 BUG_ON(item->dlm != dlm);
180
181 /* this is allowed to sleep and
182 * call network stuff */
183 workfunc(item, item->data);
184
185 dlm_put(dlm);
186 kfree(item);
187 }
188 }
189
190 /*
191 * RECOVERY THREAD
192 */
193
dlm_kick_recovery_thread(struct dlm_ctxt * dlm)194 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
195 {
196 /* wake the recovery thread
197 * this will wake the reco thread in one of three places
198 * 1) sleeping with no recovery happening
199 * 2) sleeping with recovery mastered elsewhere
200 * 3) recovery mastered here, waiting on reco data */
201
202 wake_up(&dlm->dlm_reco_thread_wq);
203 }
204
205 /* Launch the recovery thread */
dlm_launch_recovery_thread(struct dlm_ctxt * dlm)206 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
207 {
208 mlog(0, "starting dlm recovery thread...\n");
209
210 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
211 "dlm_reco_thread");
212 if (IS_ERR(dlm->dlm_reco_thread_task)) {
213 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
214 dlm->dlm_reco_thread_task = NULL;
215 return -EINVAL;
216 }
217
218 return 0;
219 }
220
dlm_complete_recovery_thread(struct dlm_ctxt * dlm)221 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
222 {
223 if (dlm->dlm_reco_thread_task) {
224 mlog(0, "waiting for dlm recovery thread to exit\n");
225 kthread_stop(dlm->dlm_reco_thread_task);
226 dlm->dlm_reco_thread_task = NULL;
227 }
228 }
229
230
231
232 /*
233 * this is lame, but here's how recovery works...
234 * 1) all recovery threads cluster wide will work on recovering
235 * ONE node at a time
236 * 2) negotiate who will take over all the locks for the dead node.
237 * thats right... ALL the locks.
238 * 3) once a new master is chosen, everyone scans all locks
239 * and moves aside those mastered by the dead guy
240 * 4) each of these locks should be locked until recovery is done
241 * 5) the new master collects up all of secondary lock queue info
242 * one lock at a time, forcing each node to communicate back
243 * before continuing
244 * 6) each secondary lock queue responds with the full known lock info
245 * 7) once the new master has run all its locks, it sends a ALLDONE!
246 * message to everyone
247 * 8) upon receiving this message, the secondary queue node unlocks
248 * and responds to the ALLDONE
249 * 9) once the new master gets responses from everyone, he unlocks
250 * everything and recovery for this dead node is done
251 *10) go back to 2) while there are still dead nodes
252 *
253 */
254
dlm_print_reco_node_status(struct dlm_ctxt * dlm)255 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
256 {
257 struct dlm_reco_node_data *ndata;
258 struct dlm_lock_resource *res;
259
260 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
261 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
262 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
263 dlm->reco.dead_node, dlm->reco.new_master);
264
265 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
266 char *st = "unknown";
267 switch (ndata->state) {
268 case DLM_RECO_NODE_DATA_INIT:
269 st = "init";
270 break;
271 case DLM_RECO_NODE_DATA_REQUESTING:
272 st = "requesting";
273 break;
274 case DLM_RECO_NODE_DATA_DEAD:
275 st = "dead";
276 break;
277 case DLM_RECO_NODE_DATA_RECEIVING:
278 st = "receiving";
279 break;
280 case DLM_RECO_NODE_DATA_REQUESTED:
281 st = "requested";
282 break;
283 case DLM_RECO_NODE_DATA_DONE:
284 st = "done";
285 break;
286 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
287 st = "finalize-sent";
288 break;
289 default:
290 st = "bad";
291 break;
292 }
293 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
294 dlm->name, ndata->node_num, st);
295 }
296 list_for_each_entry(res, &dlm->reco.resources, recovering) {
297 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
298 dlm->name, res->lockname.len, res->lockname.name);
299 }
300 }
301
302 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
303
dlm_recovery_thread(void * data)304 static int dlm_recovery_thread(void *data)
305 {
306 int status;
307 struct dlm_ctxt *dlm = data;
308 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
309
310 mlog(0, "dlm thread running for %s...\n", dlm->name);
311
312 while (!kthread_should_stop()) {
313 if (dlm_domain_fully_joined(dlm)) {
314 status = dlm_do_recovery(dlm);
315 if (status == -EAGAIN) {
316 /* do not sleep, recheck immediately. */
317 continue;
318 }
319 if (status < 0)
320 mlog_errno(status);
321 }
322
323 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
324 kthread_should_stop(),
325 timeout);
326 }
327
328 mlog(0, "quitting DLM recovery thread\n");
329 return 0;
330 }
331
332 /* returns true when the recovery master has contacted us */
dlm_reco_master_ready(struct dlm_ctxt * dlm)333 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
334 {
335 int ready;
336 spin_lock(&dlm->spinlock);
337 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
338 spin_unlock(&dlm->spinlock);
339 return ready;
340 }
341
342 /* returns true if node is no longer in the domain
343 * could be dead or just not joined */
dlm_is_node_dead(struct dlm_ctxt * dlm,u8 node)344 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
345 {
346 int dead;
347 spin_lock(&dlm->spinlock);
348 dead = !test_bit(node, dlm->domain_map);
349 spin_unlock(&dlm->spinlock);
350 return dead;
351 }
352
353 /* returns true if node is no longer in the domain
354 * could be dead or just not joined */
dlm_is_node_recovered(struct dlm_ctxt * dlm,u8 node)355 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
356 {
357 int recovered;
358 spin_lock(&dlm->spinlock);
359 recovered = !test_bit(node, dlm->recovery_map);
360 spin_unlock(&dlm->spinlock);
361 return recovered;
362 }
363
364
dlm_wait_for_node_death(struct dlm_ctxt * dlm,u8 node,int timeout)365 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
366 {
367 if (dlm_is_node_dead(dlm, node))
368 return;
369
370 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
371 "domain %s\n", node, dlm->name);
372
373 if (timeout)
374 wait_event_timeout(dlm->dlm_reco_thread_wq,
375 dlm_is_node_dead(dlm, node),
376 msecs_to_jiffies(timeout));
377 else
378 wait_event(dlm->dlm_reco_thread_wq,
379 dlm_is_node_dead(dlm, node));
380 }
381
dlm_wait_for_node_recovery(struct dlm_ctxt * dlm,u8 node,int timeout)382 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
383 {
384 if (dlm_is_node_recovered(dlm, node))
385 return;
386
387 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
388 "domain %s\n", node, dlm->name);
389
390 if (timeout)
391 wait_event_timeout(dlm->dlm_reco_thread_wq,
392 dlm_is_node_recovered(dlm, node),
393 msecs_to_jiffies(timeout));
394 else
395 wait_event(dlm->dlm_reco_thread_wq,
396 dlm_is_node_recovered(dlm, node));
397 }
398
399 /* callers of the top-level api calls (dlmlock/dlmunlock) should
400 * block on the dlm->reco.event when recovery is in progress.
401 * the dlm recovery thread will set this state when it begins
402 * recovering a dead node (as the new master or not) and clear
403 * the state and wake as soon as all affected lock resources have
404 * been marked with the RECOVERY flag */
dlm_in_recovery(struct dlm_ctxt * dlm)405 static int dlm_in_recovery(struct dlm_ctxt *dlm)
406 {
407 int in_recovery;
408 spin_lock(&dlm->spinlock);
409 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
410 spin_unlock(&dlm->spinlock);
411 return in_recovery;
412 }
413
414
dlm_wait_for_recovery(struct dlm_ctxt * dlm)415 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
416 {
417 if (dlm_in_recovery(dlm)) {
418 mlog(0, "%s: reco thread %d in recovery: "
419 "state=%d, master=%u, dead=%u\n",
420 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
421 dlm->reco.state, dlm->reco.new_master,
422 dlm->reco.dead_node);
423 }
424 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
425 }
426
dlm_begin_recovery(struct dlm_ctxt * dlm)427 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
428 {
429 spin_lock(&dlm->spinlock);
430 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
431 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
432 dlm->name, dlm->reco.dead_node);
433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
434 spin_unlock(&dlm->spinlock);
435 }
436
dlm_end_recovery(struct dlm_ctxt * dlm)437 static void dlm_end_recovery(struct dlm_ctxt *dlm)
438 {
439 spin_lock(&dlm->spinlock);
440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
442 spin_unlock(&dlm->spinlock);
443 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
444 wake_up(&dlm->reco.event);
445 }
446
dlm_print_recovery_master(struct dlm_ctxt * dlm)447 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
448 {
449 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
450 "dead node %u in domain %s\n", dlm->reco.new_master,
451 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
452 dlm->reco.dead_node, dlm->name);
453 }
454
dlm_do_recovery(struct dlm_ctxt * dlm)455 static int dlm_do_recovery(struct dlm_ctxt *dlm)
456 {
457 int status = 0;
458 int ret;
459
460 spin_lock(&dlm->spinlock);
461
462 /* check to see if the new master has died */
463 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
464 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
465 mlog(0, "new master %u died while recovering %u!\n",
466 dlm->reco.new_master, dlm->reco.dead_node);
467 /* unset the new_master, leave dead_node */
468 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
469 }
470
471 /* select a target to recover */
472 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
473 int bit;
474
475 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
476 if (bit >= O2NM_MAX_NODES || bit < 0)
477 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
478 else
479 dlm_set_reco_dead_node(dlm, bit);
480 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
481 /* BUG? */
482 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
483 dlm->reco.dead_node);
484 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
485 }
486
487 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
488 // mlog(0, "nothing to recover! sleeping now!\n");
489 spin_unlock(&dlm->spinlock);
490 /* return to main thread loop and sleep. */
491 return 0;
492 }
493 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
494 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
495 dlm->reco.dead_node);
496 spin_unlock(&dlm->spinlock);
497
498 /* take write barrier */
499 /* (stops the list reshuffling thread, proxy ast handling) */
500 dlm_begin_recovery(dlm);
501
502 if (dlm->reco.new_master == dlm->node_num)
503 goto master_here;
504
505 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
506 /* choose a new master, returns 0 if this node
507 * is the master, -EEXIST if it's another node.
508 * this does not return until a new master is chosen
509 * or recovery completes entirely. */
510 ret = dlm_pick_recovery_master(dlm);
511 if (!ret) {
512 /* already notified everyone. go. */
513 goto master_here;
514 }
515 mlog(0, "another node will master this recovery session.\n");
516 }
517
518 dlm_print_recovery_master(dlm);
519
520 /* it is safe to start everything back up here
521 * because all of the dead node's lock resources
522 * have been marked as in-recovery */
523 dlm_end_recovery(dlm);
524
525 /* sleep out in main dlm_recovery_thread loop. */
526 return 0;
527
528 master_here:
529 dlm_print_recovery_master(dlm);
530
531 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
532 if (status < 0) {
533 /* we should never hit this anymore */
534 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
535 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
536 /* yield a bit to allow any final network messages
537 * to get handled on remaining nodes */
538 msleep(100);
539 } else {
540 /* success! see if any other nodes need recovery */
541 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
542 dlm->name, dlm->reco.dead_node, dlm->node_num);
543 spin_lock(&dlm->spinlock);
544 __dlm_reset_recovery(dlm);
545 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
546 spin_unlock(&dlm->spinlock);
547 }
548 dlm_end_recovery(dlm);
549
550 /* continue and look for another dead node */
551 return -EAGAIN;
552 }
553
dlm_remaster_locks(struct dlm_ctxt * dlm,u8 dead_node)554 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
555 {
556 int status = 0;
557 struct dlm_reco_node_data *ndata;
558 int all_nodes_done;
559 int destroy = 0;
560 int pass = 0;
561
562 do {
563 /* we have become recovery master. there is no escaping
564 * this, so just keep trying until we get it. */
565 status = dlm_init_recovery_area(dlm, dead_node);
566 if (status < 0) {
567 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
568 "retrying\n", dlm->name);
569 msleep(1000);
570 }
571 } while (status != 0);
572
573 /* safe to access the node data list without a lock, since this
574 * process is the only one to change the list */
575 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
576 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
577 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
578
579 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
580 ndata->node_num);
581
582 if (ndata->node_num == dlm->node_num) {
583 ndata->state = DLM_RECO_NODE_DATA_DONE;
584 continue;
585 }
586
587 do {
588 status = dlm_request_all_locks(dlm, ndata->node_num,
589 dead_node);
590 if (status < 0) {
591 mlog_errno(status);
592 if (dlm_is_host_down(status)) {
593 /* node died, ignore it for recovery */
594 status = 0;
595 ndata->state = DLM_RECO_NODE_DATA_DEAD;
596 /* wait for the domain map to catch up
597 * with the network state. */
598 wait_event_timeout(dlm->dlm_reco_thread_wq,
599 dlm_is_node_dead(dlm,
600 ndata->node_num),
601 msecs_to_jiffies(1000));
602 mlog(0, "waited 1 sec for %u, "
603 "dead? %s\n", ndata->node_num,
604 dlm_is_node_dead(dlm, ndata->node_num) ?
605 "yes" : "no");
606 } else {
607 /* -ENOMEM on the other node */
608 mlog(0, "%s: node %u returned "
609 "%d during recovery, retrying "
610 "after a short wait\n",
611 dlm->name, ndata->node_num,
612 status);
613 msleep(100);
614 }
615 }
616 } while (status != 0);
617
618 spin_lock(&dlm_reco_state_lock);
619 switch (ndata->state) {
620 case DLM_RECO_NODE_DATA_INIT:
621 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
622 case DLM_RECO_NODE_DATA_REQUESTED:
623 BUG();
624 break;
625 case DLM_RECO_NODE_DATA_DEAD:
626 mlog(0, "node %u died after requesting "
627 "recovery info for node %u\n",
628 ndata->node_num, dead_node);
629 /* fine. don't need this node's info.
630 * continue without it. */
631 break;
632 case DLM_RECO_NODE_DATA_REQUESTING:
633 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
634 mlog(0, "now receiving recovery data from "
635 "node %u for dead node %u\n",
636 ndata->node_num, dead_node);
637 break;
638 case DLM_RECO_NODE_DATA_RECEIVING:
639 mlog(0, "already receiving recovery data from "
640 "node %u for dead node %u\n",
641 ndata->node_num, dead_node);
642 break;
643 case DLM_RECO_NODE_DATA_DONE:
644 mlog(0, "already DONE receiving recovery data "
645 "from node %u for dead node %u\n",
646 ndata->node_num, dead_node);
647 break;
648 }
649 spin_unlock(&dlm_reco_state_lock);
650 }
651
652 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
653
654 /* nodes should be sending reco data now
655 * just need to wait */
656
657 while (1) {
658 /* check all the nodes now to see if we are
659 * done, or if anyone died */
660 all_nodes_done = 1;
661 spin_lock(&dlm_reco_state_lock);
662 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
663 mlog(0, "checking recovery state of node %u\n",
664 ndata->node_num);
665 switch (ndata->state) {
666 case DLM_RECO_NODE_DATA_INIT:
667 case DLM_RECO_NODE_DATA_REQUESTING:
668 mlog(ML_ERROR, "bad ndata state for "
669 "node %u: state=%d\n",
670 ndata->node_num, ndata->state);
671 BUG();
672 break;
673 case DLM_RECO_NODE_DATA_DEAD:
674 mlog(0, "node %u died after "
675 "requesting recovery info for "
676 "node %u\n", ndata->node_num,
677 dead_node);
678 break;
679 case DLM_RECO_NODE_DATA_RECEIVING:
680 case DLM_RECO_NODE_DATA_REQUESTED:
681 mlog(0, "%s: node %u still in state %s\n",
682 dlm->name, ndata->node_num,
683 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
684 "receiving" : "requested");
685 all_nodes_done = 0;
686 break;
687 case DLM_RECO_NODE_DATA_DONE:
688 mlog(0, "%s: node %u state is done\n",
689 dlm->name, ndata->node_num);
690 break;
691 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
692 mlog(0, "%s: node %u state is finalize\n",
693 dlm->name, ndata->node_num);
694 break;
695 }
696 }
697 spin_unlock(&dlm_reco_state_lock);
698
699 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
700 all_nodes_done?"yes":"no");
701 if (all_nodes_done) {
702 int ret;
703
704 /* Set this flag on recovery master to avoid
705 * a new recovery for another dead node start
706 * before the recovery is not done. That may
707 * cause recovery hung.*/
708 spin_lock(&dlm->spinlock);
709 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
710 spin_unlock(&dlm->spinlock);
711
712 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
713 * just send a finalize message to everyone and
714 * clean up */
715 mlog(0, "all nodes are done! send finalize\n");
716 ret = dlm_send_finalize_reco_message(dlm);
717 if (ret < 0)
718 mlog_errno(ret);
719
720 spin_lock(&dlm->spinlock);
721 dlm_finish_local_lockres_recovery(dlm, dead_node,
722 dlm->node_num);
723 spin_unlock(&dlm->spinlock);
724 mlog(0, "should be done with recovery!\n");
725
726 mlog(0, "finishing recovery of %s at %lu, "
727 "dead=%u, this=%u, new=%u\n", dlm->name,
728 jiffies, dlm->reco.dead_node,
729 dlm->node_num, dlm->reco.new_master);
730 destroy = 1;
731 status = 0;
732 /* rescan everything marked dirty along the way */
733 dlm_kick_thread(dlm, NULL);
734 break;
735 }
736 /* wait to be signalled, with periodic timeout
737 * to check for node death */
738 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
739 kthread_should_stop(),
740 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
741
742 }
743
744 if (destroy)
745 dlm_destroy_recovery_area(dlm, dead_node);
746
747 return status;
748 }
749
dlm_init_recovery_area(struct dlm_ctxt * dlm,u8 dead_node)750 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
751 {
752 int num=0;
753 struct dlm_reco_node_data *ndata;
754
755 spin_lock(&dlm->spinlock);
756 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
757 /* nodes can only be removed (by dying) after dropping
758 * this lock, and death will be trapped later, so this should do */
759 spin_unlock(&dlm->spinlock);
760
761 while (1) {
762 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
763 if (num >= O2NM_MAX_NODES) {
764 break;
765 }
766 BUG_ON(num == dead_node);
767
768 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
769 if (!ndata) {
770 dlm_destroy_recovery_area(dlm, dead_node);
771 return -ENOMEM;
772 }
773 ndata->node_num = num;
774 ndata->state = DLM_RECO_NODE_DATA_INIT;
775 spin_lock(&dlm_reco_state_lock);
776 list_add_tail(&ndata->list, &dlm->reco.node_data);
777 spin_unlock(&dlm_reco_state_lock);
778 num++;
779 }
780
781 return 0;
782 }
783
dlm_destroy_recovery_area(struct dlm_ctxt * dlm,u8 dead_node)784 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
785 {
786 struct dlm_reco_node_data *ndata, *next;
787 LIST_HEAD(tmplist);
788
789 spin_lock(&dlm_reco_state_lock);
790 list_splice_init(&dlm->reco.node_data, &tmplist);
791 spin_unlock(&dlm_reco_state_lock);
792
793 list_for_each_entry_safe(ndata, next, &tmplist, list) {
794 list_del_init(&ndata->list);
795 kfree(ndata);
796 }
797 }
798
dlm_request_all_locks(struct dlm_ctxt * dlm,u8 request_from,u8 dead_node)799 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
800 u8 dead_node)
801 {
802 struct dlm_lock_request lr;
803 enum dlm_status ret;
804
805 mlog(0, "\n");
806
807
808 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
809 "to %u\n", dead_node, request_from);
810
811 memset(&lr, 0, sizeof(lr));
812 lr.node_idx = dlm->node_num;
813 lr.dead_node = dead_node;
814
815 // send message
816 ret = DLM_NOLOCKMGR;
817 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
818 &lr, sizeof(lr), request_from, NULL);
819
820 /* negative status is handled by caller */
821 if (ret < 0)
822 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
823 "to recover dead node %u\n", dlm->name, ret,
824 request_from, dead_node);
825 // return from here, then
826 // sleep until all received or error
827 return ret;
828
829 }
830
dlm_request_all_locks_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)831 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
832 void **ret_data)
833 {
834 struct dlm_ctxt *dlm = data;
835 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
836 char *buf = NULL;
837 struct dlm_work_item *item = NULL;
838
839 if (!dlm_grab(dlm))
840 return -EINVAL;
841
842 if (lr->dead_node != dlm->reco.dead_node) {
843 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
844 "dead_node is %u\n", dlm->name, lr->node_idx,
845 lr->dead_node, dlm->reco.dead_node);
846 dlm_print_reco_node_status(dlm);
847 /* this is a hack */
848 dlm_put(dlm);
849 return -ENOMEM;
850 }
851 BUG_ON(lr->dead_node != dlm->reco.dead_node);
852
853 item = kzalloc(sizeof(*item), GFP_NOFS);
854 if (!item) {
855 dlm_put(dlm);
856 return -ENOMEM;
857 }
858
859 /* this will get freed by dlm_request_all_locks_worker */
860 buf = (char *) __get_free_page(GFP_NOFS);
861 if (!buf) {
862 kfree(item);
863 dlm_put(dlm);
864 return -ENOMEM;
865 }
866
867 /* queue up work for dlm_request_all_locks_worker */
868 dlm_grab(dlm); /* get an extra ref for the work item */
869 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
870 item->u.ral.reco_master = lr->node_idx;
871 item->u.ral.dead_node = lr->dead_node;
872 spin_lock(&dlm->work_lock);
873 list_add_tail(&item->list, &dlm->work_list);
874 spin_unlock(&dlm->work_lock);
875 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
876
877 dlm_put(dlm);
878 return 0;
879 }
880
dlm_request_all_locks_worker(struct dlm_work_item * item,void * data)881 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
882 {
883 struct dlm_migratable_lockres *mres;
884 struct dlm_lock_resource *res;
885 struct dlm_ctxt *dlm;
886 LIST_HEAD(resources);
887 int ret;
888 u8 dead_node, reco_master;
889 int skip_all_done = 0;
890
891 dlm = item->dlm;
892 dead_node = item->u.ral.dead_node;
893 reco_master = item->u.ral.reco_master;
894 mres = (struct dlm_migratable_lockres *)data;
895
896 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
897 dlm->name, dead_node, reco_master);
898
899 if (dead_node != dlm->reco.dead_node ||
900 reco_master != dlm->reco.new_master) {
901 /* worker could have been created before the recovery master
902 * died. if so, do not continue, but do not error. */
903 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
904 mlog(ML_NOTICE, "%s: will not send recovery state, "
905 "recovery master %u died, thread=(dead=%u,mas=%u)"
906 " current=(dead=%u,mas=%u)\n", dlm->name,
907 reco_master, dead_node, reco_master,
908 dlm->reco.dead_node, dlm->reco.new_master);
909 } else {
910 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
911 "master=%u), request(dead=%u, master=%u)\n",
912 dlm->name, dlm->reco.dead_node,
913 dlm->reco.new_master, dead_node, reco_master);
914 }
915 goto leave;
916 }
917
918 /* lock resources should have already been moved to the
919 * dlm->reco.resources list. now move items from that list
920 * to a temp list if the dead owner matches. note that the
921 * whole cluster recovers only one node at a time, so we
922 * can safely move UNKNOWN lock resources for each recovery
923 * session. */
924 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
925
926 /* now we can begin blasting lockreses without the dlm lock */
927
928 /* any errors returned will be due to the new_master dying,
929 * the dlm_reco_thread should detect this */
930 list_for_each_entry(res, &resources, recovering) {
931 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
932 DLM_MRES_RECOVERY);
933 if (ret < 0) {
934 mlog(ML_ERROR, "%s: node %u went down while sending "
935 "recovery state for dead node %u, ret=%d\n", dlm->name,
936 reco_master, dead_node, ret);
937 skip_all_done = 1;
938 break;
939 }
940 }
941
942 /* move the resources back to the list */
943 spin_lock(&dlm->spinlock);
944 list_splice_init(&resources, &dlm->reco.resources);
945 spin_unlock(&dlm->spinlock);
946
947 if (!skip_all_done) {
948 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
949 if (ret < 0) {
950 mlog(ML_ERROR, "%s: node %u went down while sending "
951 "recovery all-done for dead node %u, ret=%d\n",
952 dlm->name, reco_master, dead_node, ret);
953 }
954 }
955 leave:
956 free_page((unsigned long)data);
957 }
958
959
dlm_send_all_done_msg(struct dlm_ctxt * dlm,u8 dead_node,u8 send_to)960 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
961 {
962 int ret, tmpret;
963 struct dlm_reco_data_done done_msg;
964
965 memset(&done_msg, 0, sizeof(done_msg));
966 done_msg.node_idx = dlm->node_num;
967 done_msg.dead_node = dead_node;
968 mlog(0, "sending DATA DONE message to %u, "
969 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
970 done_msg.dead_node);
971
972 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
973 sizeof(done_msg), send_to, &tmpret);
974 if (ret < 0) {
975 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
976 "to recover dead node %u\n", dlm->name, ret, send_to,
977 dead_node);
978 if (!dlm_is_host_down(ret)) {
979 BUG();
980 }
981 } else
982 ret = tmpret;
983 return ret;
984 }
985
986
dlm_reco_data_done_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)987 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
988 void **ret_data)
989 {
990 struct dlm_ctxt *dlm = data;
991 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
992 struct dlm_reco_node_data *ndata = NULL;
993 int ret = -EINVAL;
994
995 if (!dlm_grab(dlm))
996 return -EINVAL;
997
998 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
999 "node_idx=%u, this node=%u\n", done->dead_node,
1000 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1001
1002 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
1003 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
1004 "node_idx=%u, this node=%u\n", done->dead_node,
1005 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1006
1007 spin_lock(&dlm_reco_state_lock);
1008 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
1009 if (ndata->node_num != done->node_idx)
1010 continue;
1011
1012 switch (ndata->state) {
1013 /* should have moved beyond INIT but not to FINALIZE yet */
1014 case DLM_RECO_NODE_DATA_INIT:
1015 case DLM_RECO_NODE_DATA_DEAD:
1016 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1017 mlog(ML_ERROR, "bad ndata state for node %u:"
1018 " state=%d\n", ndata->node_num,
1019 ndata->state);
1020 BUG();
1021 break;
1022 /* these states are possible at this point, anywhere along
1023 * the line of recovery */
1024 case DLM_RECO_NODE_DATA_DONE:
1025 case DLM_RECO_NODE_DATA_RECEIVING:
1026 case DLM_RECO_NODE_DATA_REQUESTED:
1027 case DLM_RECO_NODE_DATA_REQUESTING:
1028 mlog(0, "node %u is DONE sending "
1029 "recovery data!\n",
1030 ndata->node_num);
1031
1032 ndata->state = DLM_RECO_NODE_DATA_DONE;
1033 ret = 0;
1034 break;
1035 }
1036 }
1037 spin_unlock(&dlm_reco_state_lock);
1038
1039 /* wake the recovery thread, some node is done */
1040 if (!ret)
1041 dlm_kick_recovery_thread(dlm);
1042
1043 if (ret < 0)
1044 mlog(ML_ERROR, "failed to find recovery node data for node "
1045 "%u\n", done->node_idx);
1046 dlm_put(dlm);
1047
1048 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1049 return ret;
1050 }
1051
dlm_move_reco_locks_to_list(struct dlm_ctxt * dlm,struct list_head * list,u8 dead_node)1052 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1053 struct list_head *list,
1054 u8 dead_node)
1055 {
1056 struct dlm_lock_resource *res, *next;
1057 struct dlm_lock *lock;
1058
1059 spin_lock(&dlm->spinlock);
1060 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1061 /* always prune any $RECOVERY entries for dead nodes,
1062 * otherwise hangs can occur during later recovery */
1063 if (dlm_is_recovery_lock(res->lockname.name,
1064 res->lockname.len)) {
1065 spin_lock(&res->spinlock);
1066 list_for_each_entry(lock, &res->granted, list) {
1067 if (lock->ml.node == dead_node) {
1068 mlog(0, "AHA! there was "
1069 "a $RECOVERY lock for dead "
1070 "node %u (%s)!\n",
1071 dead_node, dlm->name);
1072 list_del_init(&lock->list);
1073 dlm_lock_put(lock);
1074 break;
1075 }
1076 }
1077 spin_unlock(&res->spinlock);
1078 continue;
1079 }
1080
1081 if (res->owner == dead_node) {
1082 mlog(0, "found lockres owned by dead node while "
1083 "doing recovery for node %u. sending it.\n",
1084 dead_node);
1085 list_move_tail(&res->recovering, list);
1086 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1087 mlog(0, "found UNKNOWN owner while doing recovery "
1088 "for node %u. sending it.\n", dead_node);
1089 list_move_tail(&res->recovering, list);
1090 }
1091 }
1092 spin_unlock(&dlm->spinlock);
1093 }
1094
dlm_num_locks_in_lockres(struct dlm_lock_resource * res)1095 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1096 {
1097 int total_locks = 0;
1098 struct list_head *iter, *queue = &res->granted;
1099 int i;
1100
1101 for (i=0; i<3; i++) {
1102 list_for_each(iter, queue)
1103 total_locks++;
1104 queue++;
1105 }
1106 return total_locks;
1107 }
1108
1109
dlm_send_mig_lockres_msg(struct dlm_ctxt * dlm,struct dlm_migratable_lockres * mres,u8 send_to,struct dlm_lock_resource * res,int total_locks)1110 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1111 struct dlm_migratable_lockres *mres,
1112 u8 send_to,
1113 struct dlm_lock_resource *res,
1114 int total_locks)
1115 {
1116 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1117 int mres_total_locks = be32_to_cpu(mres->total_locks);
1118 int sz, ret = 0, status = 0;
1119 u8 orig_flags = mres->flags,
1120 orig_master = mres->master;
1121
1122 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1123 if (!mres->num_locks)
1124 return 0;
1125
1126 sz = sizeof(struct dlm_migratable_lockres) +
1127 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1128
1129 /* add an all-done flag if we reached the last lock */
1130 orig_flags = mres->flags;
1131 BUG_ON(total_locks > mres_total_locks);
1132 if (total_locks == mres_total_locks)
1133 mres->flags |= DLM_MRES_ALL_DONE;
1134
1135 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1136 dlm->name, res->lockname.len, res->lockname.name,
1137 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1138 send_to);
1139
1140 /* send it */
1141 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1142 sz, send_to, &status);
1143 if (ret < 0) {
1144 /* XXX: negative status is not handled.
1145 * this will end up killing this node. */
1146 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1147 "node %u (%s)\n", dlm->name, mres->lockname_len,
1148 mres->lockname, ret, send_to,
1149 (orig_flags & DLM_MRES_MIGRATION ?
1150 "migration" : "recovery"));
1151 } else {
1152 /* might get an -ENOMEM back here */
1153 ret = status;
1154 if (ret < 0) {
1155 mlog_errno(ret);
1156
1157 if (ret == -EFAULT) {
1158 mlog(ML_ERROR, "node %u told me to kill "
1159 "myself!\n", send_to);
1160 BUG();
1161 }
1162 }
1163 }
1164
1165 /* zero and reinit the message buffer */
1166 dlm_init_migratable_lockres(mres, res->lockname.name,
1167 res->lockname.len, mres_total_locks,
1168 mig_cookie, orig_flags, orig_master);
1169 return ret;
1170 }
1171
dlm_init_migratable_lockres(struct dlm_migratable_lockres * mres,const char * lockname,int namelen,int total_locks,u64 cookie,u8 flags,u8 master)1172 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1173 const char *lockname, int namelen,
1174 int total_locks, u64 cookie,
1175 u8 flags, u8 master)
1176 {
1177 /* mres here is one full page */
1178 clear_page(mres);
1179 mres->lockname_len = namelen;
1180 memcpy(mres->lockname, lockname, namelen);
1181 mres->num_locks = 0;
1182 mres->total_locks = cpu_to_be32(total_locks);
1183 mres->mig_cookie = cpu_to_be64(cookie);
1184 mres->flags = flags;
1185 mres->master = master;
1186 }
1187
dlm_prepare_lvb_for_migration(struct dlm_lock * lock,struct dlm_migratable_lockres * mres,int queue)1188 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1189 struct dlm_migratable_lockres *mres,
1190 int queue)
1191 {
1192 if (!lock->lksb)
1193 return;
1194
1195 /* Ignore lvb in all locks in the blocked list */
1196 if (queue == DLM_BLOCKED_LIST)
1197 return;
1198
1199 /* Only consider lvbs in locks with granted EX or PR lock levels */
1200 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1201 return;
1202
1203 if (dlm_lvb_is_empty(mres->lvb)) {
1204 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1205 return;
1206 }
1207
1208 /* Ensure the lvb copied for migration matches in other valid locks */
1209 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1210 return;
1211
1212 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1213 "node=%u\n",
1214 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1215 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1216 lock->lockres->lockname.len, lock->lockres->lockname.name,
1217 lock->ml.node);
1218 dlm_print_one_lock_resource(lock->lockres);
1219 BUG();
1220 }
1221
1222 /* returns 1 if this lock fills the network structure,
1223 * 0 otherwise */
dlm_add_lock_to_array(struct dlm_lock * lock,struct dlm_migratable_lockres * mres,int queue)1224 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1225 struct dlm_migratable_lockres *mres, int queue)
1226 {
1227 struct dlm_migratable_lock *ml;
1228 int lock_num = mres->num_locks;
1229
1230 ml = &(mres->ml[lock_num]);
1231 ml->cookie = lock->ml.cookie;
1232 ml->type = lock->ml.type;
1233 ml->convert_type = lock->ml.convert_type;
1234 ml->highest_blocked = lock->ml.highest_blocked;
1235 ml->list = queue;
1236 if (lock->lksb) {
1237 ml->flags = lock->lksb->flags;
1238 dlm_prepare_lvb_for_migration(lock, mres, queue);
1239 }
1240 ml->node = lock->ml.node;
1241 mres->num_locks++;
1242 /* we reached the max, send this network message */
1243 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1244 return 1;
1245 return 0;
1246 }
1247
dlm_add_dummy_lock(struct dlm_ctxt * dlm,struct dlm_migratable_lockres * mres)1248 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1249 struct dlm_migratable_lockres *mres)
1250 {
1251 struct dlm_lock dummy;
1252 memset(&dummy, 0, sizeof(dummy));
1253 dummy.ml.cookie = 0;
1254 dummy.ml.type = LKM_IVMODE;
1255 dummy.ml.convert_type = LKM_IVMODE;
1256 dummy.ml.highest_blocked = LKM_IVMODE;
1257 dummy.lksb = NULL;
1258 dummy.ml.node = dlm->node_num;
1259 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1260 }
1261
dlm_is_dummy_lock(struct dlm_ctxt * dlm,struct dlm_migratable_lock * ml,u8 * nodenum)1262 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1263 struct dlm_migratable_lock *ml,
1264 u8 *nodenum)
1265 {
1266 if (unlikely(ml->cookie == 0 &&
1267 ml->type == LKM_IVMODE &&
1268 ml->convert_type == LKM_IVMODE &&
1269 ml->highest_blocked == LKM_IVMODE &&
1270 ml->list == DLM_BLOCKED_LIST)) {
1271 *nodenum = ml->node;
1272 return 1;
1273 }
1274 return 0;
1275 }
1276
dlm_send_one_lockres(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_migratable_lockres * mres,u8 send_to,u8 flags)1277 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1278 struct dlm_migratable_lockres *mres,
1279 u8 send_to, u8 flags)
1280 {
1281 struct list_head *queue;
1282 int total_locks, i;
1283 u64 mig_cookie = 0;
1284 struct dlm_lock *lock;
1285 int ret = 0;
1286
1287 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1288
1289 mlog(0, "sending to %u\n", send_to);
1290
1291 total_locks = dlm_num_locks_in_lockres(res);
1292 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1293 /* rare, but possible */
1294 mlog(0, "argh. lockres has %d locks. this will "
1295 "require more than one network packet to "
1296 "migrate\n", total_locks);
1297 mig_cookie = dlm_get_next_mig_cookie();
1298 }
1299
1300 dlm_init_migratable_lockres(mres, res->lockname.name,
1301 res->lockname.len, total_locks,
1302 mig_cookie, flags, res->owner);
1303
1304 total_locks = 0;
1305 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1306 queue = dlm_list_idx_to_ptr(res, i);
1307 list_for_each_entry(lock, queue, list) {
1308 /* add another lock. */
1309 total_locks++;
1310 if (!dlm_add_lock_to_array(lock, mres, i))
1311 continue;
1312
1313 /* this filled the lock message,
1314 * we must send it immediately. */
1315 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1316 res, total_locks);
1317 if (ret < 0)
1318 goto error;
1319 }
1320 }
1321 if (total_locks == 0) {
1322 /* send a dummy lock to indicate a mastery reference only */
1323 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1324 dlm->name, res->lockname.len, res->lockname.name,
1325 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1326 "migration");
1327 dlm_add_dummy_lock(dlm, mres);
1328 }
1329 /* flush any remaining locks */
1330 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1331 if (ret < 0)
1332 goto error;
1333 return ret;
1334
1335 error:
1336 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1337 dlm->name, ret);
1338 if (!dlm_is_host_down(ret))
1339 BUG();
1340 mlog(0, "%s: node %u went down while sending %s "
1341 "lockres %.*s\n", dlm->name, send_to,
1342 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1343 res->lockname.len, res->lockname.name);
1344 return ret;
1345 }
1346
1347
1348
1349 /*
1350 * this message will contain no more than one page worth of
1351 * recovery data, and it will work on only one lockres.
1352 * there may be many locks in this page, and we may need to wait
1353 * for additional packets to complete all the locks (rare, but
1354 * possible).
1355 */
1356 /*
1357 * NOTE: the allocation error cases here are scary
1358 * we really cannot afford to fail an alloc in recovery
1359 * do we spin? returning an error only delays the problem really
1360 */
1361
dlm_mig_lockres_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)1362 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1363 void **ret_data)
1364 {
1365 struct dlm_ctxt *dlm = data;
1366 struct dlm_migratable_lockres *mres =
1367 (struct dlm_migratable_lockres *)msg->buf;
1368 int ret = 0;
1369 u8 real_master;
1370 u8 extra_refs = 0;
1371 char *buf = NULL;
1372 struct dlm_work_item *item = NULL;
1373 struct dlm_lock_resource *res = NULL;
1374
1375 if (!dlm_grab(dlm))
1376 return -EINVAL;
1377
1378 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1379
1380 real_master = mres->master;
1381 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1382 /* cannot migrate a lockres with no master */
1383 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1384 }
1385
1386 mlog(0, "%s message received from node %u\n",
1387 (mres->flags & DLM_MRES_RECOVERY) ?
1388 "recovery" : "migration", mres->master);
1389 if (mres->flags & DLM_MRES_ALL_DONE)
1390 mlog(0, "all done flag. all lockres data received!\n");
1391
1392 ret = -ENOMEM;
1393 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1394 item = kzalloc(sizeof(*item), GFP_NOFS);
1395 if (!buf || !item)
1396 goto leave;
1397
1398 /* lookup the lock to see if we have a secondary queue for this
1399 * already... just add the locks in and this will have its owner
1400 * and RECOVERY flag changed when it completes. */
1401 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1402 if (res) {
1403 /* this will get a ref on res */
1404 /* mark it as recovering/migrating and hash it */
1405 spin_lock(&res->spinlock);
1406 if (mres->flags & DLM_MRES_RECOVERY) {
1407 res->state |= DLM_LOCK_RES_RECOVERING;
1408 } else {
1409 if (res->state & DLM_LOCK_RES_MIGRATING) {
1410 /* this is at least the second
1411 * lockres message */
1412 mlog(0, "lock %.*s is already migrating\n",
1413 mres->lockname_len,
1414 mres->lockname);
1415 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1416 /* caller should BUG */
1417 mlog(ML_ERROR, "node is attempting to migrate "
1418 "lock %.*s, but marked as recovering!\n",
1419 mres->lockname_len, mres->lockname);
1420 ret = -EFAULT;
1421 spin_unlock(&res->spinlock);
1422 goto leave;
1423 }
1424 res->state |= DLM_LOCK_RES_MIGRATING;
1425 }
1426 spin_unlock(&res->spinlock);
1427 } else {
1428 /* need to allocate, just like if it was
1429 * mastered here normally */
1430 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1431 if (!res)
1432 goto leave;
1433
1434 /* to match the ref that we would have gotten if
1435 * dlm_lookup_lockres had succeeded */
1436 dlm_lockres_get(res);
1437
1438 /* mark it as recovering/migrating and hash it */
1439 if (mres->flags & DLM_MRES_RECOVERY)
1440 res->state |= DLM_LOCK_RES_RECOVERING;
1441 else
1442 res->state |= DLM_LOCK_RES_MIGRATING;
1443
1444 spin_lock(&dlm->spinlock);
1445 __dlm_insert_lockres(dlm, res);
1446 spin_unlock(&dlm->spinlock);
1447
1448 /* Add an extra ref for this lock-less lockres lest the
1449 * dlm_thread purges it before we get the chance to add
1450 * locks to it */
1451 dlm_lockres_get(res);
1452
1453 /* There are three refs that need to be put.
1454 * 1. Taken above.
1455 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1456 * 3. dlm_lookup_lockres()
1457 * The first one is handled at the end of this function. The
1458 * other two are handled in the worker thread after locks have
1459 * been attached. Yes, we don't wait for purge time to match
1460 * kref_init. The lockres will still have atleast one ref
1461 * added because it is in the hash __dlm_insert_lockres() */
1462 extra_refs++;
1463
1464 /* now that the new lockres is inserted,
1465 * make it usable by other processes */
1466 spin_lock(&res->spinlock);
1467 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1468 spin_unlock(&res->spinlock);
1469 wake_up(&res->wq);
1470 }
1471
1472 /* at this point we have allocated everything we need,
1473 * and we have a hashed lockres with an extra ref and
1474 * the proper res->state flags. */
1475 ret = 0;
1476 spin_lock(&res->spinlock);
1477 /* drop this either when master requery finds a different master
1478 * or when a lock is added by the recovery worker */
1479 dlm_lockres_grab_inflight_ref(dlm, res);
1480 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1481 /* migration cannot have an unknown master */
1482 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1483 mlog(0, "recovery has passed me a lockres with an "
1484 "unknown owner.. will need to requery: "
1485 "%.*s\n", mres->lockname_len, mres->lockname);
1486 } else {
1487 /* take a reference now to pin the lockres, drop it
1488 * when locks are added in the worker */
1489 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1490 }
1491 spin_unlock(&res->spinlock);
1492
1493 /* queue up work for dlm_mig_lockres_worker */
1494 dlm_grab(dlm); /* get an extra ref for the work item */
1495 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1496 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1497 item->u.ml.lockres = res; /* already have a ref */
1498 item->u.ml.real_master = real_master;
1499 item->u.ml.extra_ref = extra_refs;
1500 spin_lock(&dlm->work_lock);
1501 list_add_tail(&item->list, &dlm->work_list);
1502 spin_unlock(&dlm->work_lock);
1503 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1504
1505 leave:
1506 /* One extra ref taken needs to be put here */
1507 if (extra_refs)
1508 dlm_lockres_put(res);
1509
1510 dlm_put(dlm);
1511 if (ret < 0) {
1512 if (buf)
1513 kfree(buf);
1514 if (item)
1515 kfree(item);
1516 mlog_errno(ret);
1517 }
1518
1519 return ret;
1520 }
1521
1522
dlm_mig_lockres_worker(struct dlm_work_item * item,void * data)1523 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1524 {
1525 struct dlm_ctxt *dlm;
1526 struct dlm_migratable_lockres *mres;
1527 int ret = 0;
1528 struct dlm_lock_resource *res;
1529 u8 real_master;
1530 u8 extra_ref;
1531
1532 dlm = item->dlm;
1533 mres = (struct dlm_migratable_lockres *)data;
1534
1535 res = item->u.ml.lockres;
1536 real_master = item->u.ml.real_master;
1537 extra_ref = item->u.ml.extra_ref;
1538
1539 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1540 /* this case is super-rare. only occurs if
1541 * node death happens during migration. */
1542 again:
1543 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1544 if (ret < 0) {
1545 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1546 ret);
1547 goto again;
1548 }
1549 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1550 mlog(0, "lockres %.*s not claimed. "
1551 "this node will take it.\n",
1552 res->lockname.len, res->lockname.name);
1553 } else {
1554 spin_lock(&res->spinlock);
1555 dlm_lockres_drop_inflight_ref(dlm, res);
1556 spin_unlock(&res->spinlock);
1557 mlog(0, "master needs to respond to sender "
1558 "that node %u still owns %.*s\n",
1559 real_master, res->lockname.len,
1560 res->lockname.name);
1561 /* cannot touch this lockres */
1562 goto leave;
1563 }
1564 }
1565
1566 ret = dlm_process_recovery_data(dlm, res, mres);
1567 if (ret < 0)
1568 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1569 else
1570 mlog(0, "dlm_process_recovery_data succeeded\n");
1571
1572 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1573 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1574 ret = dlm_finish_migration(dlm, res, mres->master);
1575 if (ret < 0)
1576 mlog_errno(ret);
1577 }
1578
1579 leave:
1580 /* See comment in dlm_mig_lockres_handler() */
1581 if (res) {
1582 if (extra_ref)
1583 dlm_lockres_put(res);
1584 dlm_lockres_put(res);
1585 }
1586 kfree(data);
1587 }
1588
1589
1590
dlm_lockres_master_requery(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 * real_master)1591 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1592 struct dlm_lock_resource *res,
1593 u8 *real_master)
1594 {
1595 struct dlm_node_iter iter;
1596 int nodenum;
1597 int ret = 0;
1598
1599 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1600
1601 /* we only reach here if one of the two nodes in a
1602 * migration died while the migration was in progress.
1603 * at this point we need to requery the master. we
1604 * know that the new_master got as far as creating
1605 * an mle on at least one node, but we do not know
1606 * if any nodes had actually cleared the mle and set
1607 * the master to the new_master. the old master
1608 * is supposed to set the owner to UNKNOWN in the
1609 * event of a new_master death, so the only possible
1610 * responses that we can get from nodes here are
1611 * that the master is new_master, or that the master
1612 * is UNKNOWN.
1613 * if all nodes come back with UNKNOWN then we know
1614 * the lock needs remastering here.
1615 * if any node comes back with a valid master, check
1616 * to see if that master is the one that we are
1617 * recovering. if so, then the new_master died and
1618 * we need to remaster this lock. if not, then the
1619 * new_master survived and that node will respond to
1620 * other nodes about the owner.
1621 * if there is an owner, this node needs to dump this
1622 * lockres and alert the sender that this lockres
1623 * was rejected. */
1624 spin_lock(&dlm->spinlock);
1625 dlm_node_iter_init(dlm->domain_map, &iter);
1626 spin_unlock(&dlm->spinlock);
1627
1628 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1629 /* do not send to self */
1630 if (nodenum == dlm->node_num)
1631 continue;
1632 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1633 if (ret < 0) {
1634 mlog_errno(ret);
1635 if (!dlm_is_host_down(ret))
1636 BUG();
1637 /* host is down, so answer for that node would be
1638 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1639 }
1640 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1641 mlog(0, "lock master is %u\n", *real_master);
1642 break;
1643 }
1644 }
1645 return ret;
1646 }
1647
1648
dlm_do_master_requery(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 nodenum,u8 * real_master)1649 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1650 u8 nodenum, u8 *real_master)
1651 {
1652 int ret = -EINVAL;
1653 struct dlm_master_requery req;
1654 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1655
1656 memset(&req, 0, sizeof(req));
1657 req.node_idx = dlm->node_num;
1658 req.namelen = res->lockname.len;
1659 memcpy(req.name, res->lockname.name, res->lockname.len);
1660
1661 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1662 &req, sizeof(req), nodenum, &status);
1663 /* XXX: negative status not handled properly here. */
1664 if (ret < 0)
1665 mlog(ML_ERROR, "Error %d when sending message %u (key "
1666 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1667 dlm->key, nodenum);
1668 else {
1669 BUG_ON(status < 0);
1670 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1671 *real_master = (u8) (status & 0xff);
1672 mlog(0, "node %u responded to master requery with %u\n",
1673 nodenum, *real_master);
1674 ret = 0;
1675 }
1676 return ret;
1677 }
1678
1679
1680 /* this function cannot error, so unless the sending
1681 * or receiving of the message failed, the owner can
1682 * be trusted */
dlm_master_requery_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)1683 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1684 void **ret_data)
1685 {
1686 struct dlm_ctxt *dlm = data;
1687 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1688 struct dlm_lock_resource *res = NULL;
1689 unsigned int hash;
1690 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1691 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1692
1693 if (!dlm_grab(dlm)) {
1694 /* since the domain has gone away on this
1695 * node, the proper response is UNKNOWN */
1696 return master;
1697 }
1698
1699 hash = dlm_lockid_hash(req->name, req->namelen);
1700
1701 spin_lock(&dlm->spinlock);
1702 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1703 if (res) {
1704 spin_lock(&res->spinlock);
1705 master = res->owner;
1706 if (master == dlm->node_num) {
1707 int ret = dlm_dispatch_assert_master(dlm, res,
1708 0, 0, flags);
1709 if (ret < 0) {
1710 mlog_errno(-ENOMEM);
1711 /* retry!? */
1712 BUG();
1713 }
1714 } else /* put.. incase we are not the master */
1715 dlm_lockres_put(res);
1716 spin_unlock(&res->spinlock);
1717 }
1718 spin_unlock(&dlm->spinlock);
1719
1720 dlm_put(dlm);
1721 return master;
1722 }
1723
1724 static inline struct list_head *
dlm_list_num_to_pointer(struct dlm_lock_resource * res,int list_num)1725 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1726 {
1727 struct list_head *ret;
1728 BUG_ON(list_num < 0);
1729 BUG_ON(list_num > 2);
1730 ret = &(res->granted);
1731 ret += list_num;
1732 return ret;
1733 }
1734 /* TODO: do ast flush business
1735 * TODO: do MIGRATING and RECOVERING spinning
1736 */
1737
1738 /*
1739 * NOTE about in-flight requests during migration:
1740 *
1741 * Before attempting the migrate, the master has marked the lockres as
1742 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1743 * requests either got queued before the MIGRATING flag got set, in which
1744 * case the lock data will reflect the change and a return message is on
1745 * the way, or the request failed to get in before MIGRATING got set. In
1746 * this case, the caller will be told to spin and wait for the MIGRATING
1747 * flag to be dropped, then recheck the master.
1748 * This holds true for the convert, cancel and unlock cases, and since lvb
1749 * updates are tied to these same messages, it applies to lvb updates as
1750 * well. For the lock case, there is no way a lock can be on the master
1751 * queue and not be on the secondary queue since the lock is always added
1752 * locally first. This means that the new target node will never be sent
1753 * a lock that he doesn't already have on the list.
1754 * In total, this means that the local lock is correct and should not be
1755 * updated to match the one sent by the master. Any messages sent back
1756 * from the master before the MIGRATING flag will bring the lock properly
1757 * up-to-date, and the change will be ordered properly for the waiter.
1758 * We will *not* attempt to modify the lock underneath the waiter.
1759 */
1760
dlm_process_recovery_data(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_migratable_lockres * mres)1761 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1762 struct dlm_lock_resource *res,
1763 struct dlm_migratable_lockres *mres)
1764 {
1765 struct dlm_migratable_lock *ml;
1766 struct list_head *queue, *iter;
1767 struct list_head *tmpq = NULL;
1768 struct dlm_lock *newlock = NULL;
1769 struct dlm_lockstatus *lksb = NULL;
1770 int ret = 0;
1771 int i, j, bad;
1772 struct dlm_lock *lock;
1773 u8 from = O2NM_MAX_NODES;
1774 unsigned int added = 0;
1775 __be64 c;
1776
1777 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1778 for (i=0; i<mres->num_locks; i++) {
1779 ml = &(mres->ml[i]);
1780
1781 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1782 /* placeholder, just need to set the refmap bit */
1783 BUG_ON(mres->num_locks != 1);
1784 mlog(0, "%s:%.*s: dummy lock for %u\n",
1785 dlm->name, mres->lockname_len, mres->lockname,
1786 from);
1787 spin_lock(&res->spinlock);
1788 dlm_lockres_set_refmap_bit(dlm, res, from);
1789 spin_unlock(&res->spinlock);
1790 added++;
1791 break;
1792 }
1793 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1794 newlock = NULL;
1795 lksb = NULL;
1796
1797 queue = dlm_list_num_to_pointer(res, ml->list);
1798 tmpq = NULL;
1799
1800 /* if the lock is for the local node it needs to
1801 * be moved to the proper location within the queue.
1802 * do not allocate a new lock structure. */
1803 if (ml->node == dlm->node_num) {
1804 /* MIGRATION ONLY! */
1805 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1806
1807 lock = NULL;
1808 spin_lock(&res->spinlock);
1809 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1810 tmpq = dlm_list_idx_to_ptr(res, j);
1811 list_for_each(iter, tmpq) {
1812 lock = list_entry(iter,
1813 struct dlm_lock, list);
1814 if (lock->ml.cookie == ml->cookie)
1815 break;
1816 lock = NULL;
1817 }
1818 if (lock)
1819 break;
1820 }
1821
1822 /* lock is always created locally first, and
1823 * destroyed locally last. it must be on the list */
1824 if (!lock) {
1825 c = ml->cookie;
1826 mlog(ML_ERROR, "Could not find local lock "
1827 "with cookie %u:%llu, node %u, "
1828 "list %u, flags 0x%x, type %d, "
1829 "conv %d, highest blocked %d\n",
1830 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1831 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1832 ml->node, ml->list, ml->flags, ml->type,
1833 ml->convert_type, ml->highest_blocked);
1834 __dlm_print_one_lock_resource(res);
1835 BUG();
1836 }
1837
1838 if (lock->ml.node != ml->node) {
1839 c = lock->ml.cookie;
1840 mlog(ML_ERROR, "Mismatched node# in lock "
1841 "cookie %u:%llu, name %.*s, node %u\n",
1842 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1843 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1844 res->lockname.len, res->lockname.name,
1845 lock->ml.node);
1846 c = ml->cookie;
1847 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1848 "node %u, list %u, flags 0x%x, type %d, "
1849 "conv %d, highest blocked %d\n",
1850 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1851 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1852 ml->node, ml->list, ml->flags, ml->type,
1853 ml->convert_type, ml->highest_blocked);
1854 __dlm_print_one_lock_resource(res);
1855 BUG();
1856 }
1857
1858 if (tmpq != queue) {
1859 c = ml->cookie;
1860 mlog(0, "Lock cookie %u:%llu was on list %u "
1861 "instead of list %u for %.*s\n",
1862 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1863 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1864 j, ml->list, res->lockname.len,
1865 res->lockname.name);
1866 __dlm_print_one_lock_resource(res);
1867 spin_unlock(&res->spinlock);
1868 continue;
1869 }
1870
1871 /* see NOTE above about why we do not update
1872 * to match the master here */
1873
1874 /* move the lock to its proper place */
1875 /* do not alter lock refcount. switching lists. */
1876 list_move_tail(&lock->list, queue);
1877 spin_unlock(&res->spinlock);
1878 added++;
1879
1880 mlog(0, "just reordered a local lock!\n");
1881 continue;
1882 }
1883
1884 /* lock is for another node. */
1885 newlock = dlm_new_lock(ml->type, ml->node,
1886 be64_to_cpu(ml->cookie), NULL);
1887 if (!newlock) {
1888 ret = -ENOMEM;
1889 goto leave;
1890 }
1891 lksb = newlock->lksb;
1892 dlm_lock_attach_lockres(newlock, res);
1893
1894 if (ml->convert_type != LKM_IVMODE) {
1895 BUG_ON(queue != &res->converting);
1896 newlock->ml.convert_type = ml->convert_type;
1897 }
1898 lksb->flags |= (ml->flags &
1899 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1900
1901 if (ml->type == LKM_NLMODE)
1902 goto skip_lvb;
1903
1904 if (!dlm_lvb_is_empty(mres->lvb)) {
1905 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1906 /* other node was trying to update
1907 * lvb when node died. recreate the
1908 * lksb with the updated lvb. */
1909 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1910 /* the lock resource lvb update must happen
1911 * NOW, before the spinlock is dropped.
1912 * we no longer wait for the AST to update
1913 * the lvb. */
1914 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1915 } else {
1916 /* otherwise, the node is sending its
1917 * most recent valid lvb info */
1918 BUG_ON(ml->type != LKM_EXMODE &&
1919 ml->type != LKM_PRMODE);
1920 if (!dlm_lvb_is_empty(res->lvb) &&
1921 (ml->type == LKM_EXMODE ||
1922 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1923 int i;
1924 mlog(ML_ERROR, "%s:%.*s: received bad "
1925 "lvb! type=%d\n", dlm->name,
1926 res->lockname.len,
1927 res->lockname.name, ml->type);
1928 printk("lockres lvb=[");
1929 for (i=0; i<DLM_LVB_LEN; i++)
1930 printk("%02x", res->lvb[i]);
1931 printk("]\nmigrated lvb=[");
1932 for (i=0; i<DLM_LVB_LEN; i++)
1933 printk("%02x", mres->lvb[i]);
1934 printk("]\n");
1935 dlm_print_one_lock_resource(res);
1936 BUG();
1937 }
1938 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1939 }
1940 }
1941 skip_lvb:
1942
1943 /* NOTE:
1944 * wrt lock queue ordering and recovery:
1945 * 1. order of locks on granted queue is
1946 * meaningless.
1947 * 2. order of locks on converting queue is
1948 * LOST with the node death. sorry charlie.
1949 * 3. order of locks on the blocked queue is
1950 * also LOST.
1951 * order of locks does not affect integrity, it
1952 * just means that a lock request may get pushed
1953 * back in line as a result of the node death.
1954 * also note that for a given node the lock order
1955 * for its secondary queue locks is preserved
1956 * relative to each other, but clearly *not*
1957 * preserved relative to locks from other nodes.
1958 */
1959 bad = 0;
1960 spin_lock(&res->spinlock);
1961 list_for_each_entry(lock, queue, list) {
1962 if (lock->ml.cookie == ml->cookie) {
1963 c = lock->ml.cookie;
1964 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1965 "exists on this lockres!\n", dlm->name,
1966 res->lockname.len, res->lockname.name,
1967 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1968 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1969
1970 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1971 "node=%u, cookie=%u:%llu, queue=%d\n",
1972 ml->type, ml->convert_type, ml->node,
1973 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1974 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1975 ml->list);
1976
1977 __dlm_print_one_lock_resource(res);
1978 bad = 1;
1979 break;
1980 }
1981 }
1982 if (!bad) {
1983 dlm_lock_get(newlock);
1984 list_add_tail(&newlock->list, queue);
1985 mlog(0, "%s:%.*s: added lock for node %u, "
1986 "setting refmap bit\n", dlm->name,
1987 res->lockname.len, res->lockname.name, ml->node);
1988 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
1989 added++;
1990 }
1991 spin_unlock(&res->spinlock);
1992 }
1993 mlog(0, "done running all the locks\n");
1994
1995 leave:
1996 /* balance the ref taken when the work was queued */
1997 spin_lock(&res->spinlock);
1998 dlm_lockres_drop_inflight_ref(dlm, res);
1999 spin_unlock(&res->spinlock);
2000
2001 if (ret < 0) {
2002 mlog_errno(ret);
2003 if (newlock)
2004 dlm_lock_put(newlock);
2005 }
2006
2007 return ret;
2008 }
2009
dlm_move_lockres_to_recovery_list(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)2010 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
2011 struct dlm_lock_resource *res)
2012 {
2013 int i;
2014 struct list_head *queue;
2015 struct dlm_lock *lock, *next;
2016
2017 assert_spin_locked(&dlm->spinlock);
2018 assert_spin_locked(&res->spinlock);
2019 res->state |= DLM_LOCK_RES_RECOVERING;
2020 if (!list_empty(&res->recovering)) {
2021 mlog(0,
2022 "Recovering res %s:%.*s, is already on recovery list!\n",
2023 dlm->name, res->lockname.len, res->lockname.name);
2024 list_del_init(&res->recovering);
2025 dlm_lockres_put(res);
2026 }
2027 /* We need to hold a reference while on the recovery list */
2028 dlm_lockres_get(res);
2029 list_add_tail(&res->recovering, &dlm->reco.resources);
2030
2031 /* find any pending locks and put them back on proper list */
2032 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2033 queue = dlm_list_idx_to_ptr(res, i);
2034 list_for_each_entry_safe(lock, next, queue, list) {
2035 dlm_lock_get(lock);
2036 if (lock->convert_pending) {
2037 /* move converting lock back to granted */
2038 BUG_ON(i != DLM_CONVERTING_LIST);
2039 mlog(0, "node died with convert pending "
2040 "on %.*s. move back to granted list.\n",
2041 res->lockname.len, res->lockname.name);
2042 dlm_revert_pending_convert(res, lock);
2043 lock->convert_pending = 0;
2044 } else if (lock->lock_pending) {
2045 /* remove pending lock requests completely */
2046 BUG_ON(i != DLM_BLOCKED_LIST);
2047 mlog(0, "node died with lock pending "
2048 "on %.*s. remove from blocked list and skip.\n",
2049 res->lockname.len, res->lockname.name);
2050 /* lock will be floating until ref in
2051 * dlmlock_remote is freed after the network
2052 * call returns. ok for it to not be on any
2053 * list since no ast can be called
2054 * (the master is dead). */
2055 dlm_revert_pending_lock(res, lock);
2056 lock->lock_pending = 0;
2057 } else if (lock->unlock_pending) {
2058 /* if an unlock was in progress, treat as
2059 * if this had completed successfully
2060 * before sending this lock state to the
2061 * new master. note that the dlm_unlock
2062 * call is still responsible for calling
2063 * the unlockast. that will happen after
2064 * the network call times out. for now,
2065 * just move lists to prepare the new
2066 * recovery master. */
2067 BUG_ON(i != DLM_GRANTED_LIST);
2068 mlog(0, "node died with unlock pending "
2069 "on %.*s. remove from blocked list and skip.\n",
2070 res->lockname.len, res->lockname.name);
2071 dlm_commit_pending_unlock(res, lock);
2072 lock->unlock_pending = 0;
2073 } else if (lock->cancel_pending) {
2074 /* if a cancel was in progress, treat as
2075 * if this had completed successfully
2076 * before sending this lock state to the
2077 * new master */
2078 BUG_ON(i != DLM_CONVERTING_LIST);
2079 mlog(0, "node died with cancel pending "
2080 "on %.*s. move back to granted list.\n",
2081 res->lockname.len, res->lockname.name);
2082 dlm_commit_pending_cancel(res, lock);
2083 lock->cancel_pending = 0;
2084 }
2085 dlm_lock_put(lock);
2086 }
2087 }
2088 }
2089
2090
2091
2092 /* removes all recovered locks from the recovery list.
2093 * sets the res->owner to the new master.
2094 * unsets the RECOVERY flag and wakes waiters. */
dlm_finish_local_lockres_recovery(struct dlm_ctxt * dlm,u8 dead_node,u8 new_master)2095 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2096 u8 dead_node, u8 new_master)
2097 {
2098 int i;
2099 struct hlist_node *hash_iter;
2100 struct hlist_head *bucket;
2101 struct dlm_lock_resource *res, *next;
2102
2103 assert_spin_locked(&dlm->spinlock);
2104
2105 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2106 if (res->owner == dead_node) {
2107 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2108 dlm->name, res->lockname.len, res->lockname.name,
2109 res->owner, new_master);
2110 list_del_init(&res->recovering);
2111 spin_lock(&res->spinlock);
2112 /* new_master has our reference from
2113 * the lock state sent during recovery */
2114 dlm_change_lockres_owner(dlm, res, new_master);
2115 res->state &= ~DLM_LOCK_RES_RECOVERING;
2116 if (__dlm_lockres_has_locks(res))
2117 __dlm_dirty_lockres(dlm, res);
2118 spin_unlock(&res->spinlock);
2119 wake_up(&res->wq);
2120 dlm_lockres_put(res);
2121 }
2122 }
2123
2124 /* this will become unnecessary eventually, but
2125 * for now we need to run the whole hash, clear
2126 * the RECOVERING state and set the owner
2127 * if necessary */
2128 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2129 bucket = dlm_lockres_hash(dlm, i);
2130 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
2131 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2132 continue;
2133
2134 if (res->owner != dead_node &&
2135 res->owner != dlm->node_num)
2136 continue;
2137
2138 if (!list_empty(&res->recovering)) {
2139 list_del_init(&res->recovering);
2140 dlm_lockres_put(res);
2141 }
2142
2143 /* new_master has our reference from
2144 * the lock state sent during recovery */
2145 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2146 dlm->name, res->lockname.len, res->lockname.name,
2147 res->owner, new_master);
2148 spin_lock(&res->spinlock);
2149 dlm_change_lockres_owner(dlm, res, new_master);
2150 res->state &= ~DLM_LOCK_RES_RECOVERING;
2151 if (__dlm_lockres_has_locks(res))
2152 __dlm_dirty_lockres(dlm, res);
2153 spin_unlock(&res->spinlock);
2154 wake_up(&res->wq);
2155 }
2156 }
2157 }
2158
dlm_lvb_needs_invalidation(struct dlm_lock * lock,int local)2159 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2160 {
2161 if (local) {
2162 if (lock->ml.type != LKM_EXMODE &&
2163 lock->ml.type != LKM_PRMODE)
2164 return 1;
2165 } else if (lock->ml.type == LKM_EXMODE)
2166 return 1;
2167 return 0;
2168 }
2169
dlm_revalidate_lvb(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 dead_node)2170 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2171 struct dlm_lock_resource *res, u8 dead_node)
2172 {
2173 struct list_head *queue;
2174 struct dlm_lock *lock;
2175 int blank_lvb = 0, local = 0;
2176 int i;
2177 u8 search_node;
2178
2179 assert_spin_locked(&dlm->spinlock);
2180 assert_spin_locked(&res->spinlock);
2181
2182 if (res->owner == dlm->node_num)
2183 /* if this node owned the lockres, and if the dead node
2184 * had an EX when he died, blank out the lvb */
2185 search_node = dead_node;
2186 else {
2187 /* if this is a secondary lockres, and we had no EX or PR
2188 * locks granted, we can no longer trust the lvb */
2189 search_node = dlm->node_num;
2190 local = 1; /* check local state for valid lvb */
2191 }
2192
2193 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2194 queue = dlm_list_idx_to_ptr(res, i);
2195 list_for_each_entry(lock, queue, list) {
2196 if (lock->ml.node == search_node) {
2197 if (dlm_lvb_needs_invalidation(lock, local)) {
2198 /* zero the lksb lvb and lockres lvb */
2199 blank_lvb = 1;
2200 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2201 }
2202 }
2203 }
2204 }
2205
2206 if (blank_lvb) {
2207 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2208 res->lockname.len, res->lockname.name, dead_node);
2209 memset(res->lvb, 0, DLM_LVB_LEN);
2210 }
2211 }
2212
dlm_free_dead_locks(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 dead_node)2213 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2214 struct dlm_lock_resource *res, u8 dead_node)
2215 {
2216 struct dlm_lock *lock, *next;
2217 unsigned int freed = 0;
2218
2219 /* this node is the lockres master:
2220 * 1) remove any stale locks for the dead node
2221 * 2) if the dead node had an EX when he died, blank out the lvb
2222 */
2223 assert_spin_locked(&dlm->spinlock);
2224 assert_spin_locked(&res->spinlock);
2225
2226 /* We do two dlm_lock_put(). One for removing from list and the other is
2227 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2228
2229 /* TODO: check pending_asts, pending_basts here */
2230 list_for_each_entry_safe(lock, next, &res->granted, list) {
2231 if (lock->ml.node == dead_node) {
2232 list_del_init(&lock->list);
2233 dlm_lock_put(lock);
2234 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2235 dlm_lock_put(lock);
2236 freed++;
2237 }
2238 }
2239 list_for_each_entry_safe(lock, next, &res->converting, list) {
2240 if (lock->ml.node == dead_node) {
2241 list_del_init(&lock->list);
2242 dlm_lock_put(lock);
2243 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2244 dlm_lock_put(lock);
2245 freed++;
2246 }
2247 }
2248 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2249 if (lock->ml.node == dead_node) {
2250 list_del_init(&lock->list);
2251 dlm_lock_put(lock);
2252 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2253 dlm_lock_put(lock);
2254 freed++;
2255 }
2256 }
2257
2258 if (freed) {
2259 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2260 "dropping ref from lockres\n", dlm->name,
2261 res->lockname.len, res->lockname.name, freed, dead_node);
2262 if(!test_bit(dead_node, res->refmap)) {
2263 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2264 "but ref was not set\n", dlm->name,
2265 res->lockname.len, res->lockname.name, freed, dead_node);
2266 __dlm_print_one_lock_resource(res);
2267 }
2268 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2269 } else if (test_bit(dead_node, res->refmap)) {
2270 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2271 "no locks and had not purged before dying\n", dlm->name,
2272 res->lockname.len, res->lockname.name, dead_node);
2273 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2274 }
2275
2276 /* do not kick thread yet */
2277 __dlm_dirty_lockres(dlm, res);
2278 }
2279
2280 /* if this node is the recovery master, and there are no
2281 * locks for a given lockres owned by this node that are in
2282 * either PR or EX mode, zero out the lvb before requesting.
2283 *
2284 */
2285
2286
dlm_do_local_recovery_cleanup(struct dlm_ctxt * dlm,u8 dead_node)2287 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2288 {
2289 struct hlist_node *iter;
2290 struct dlm_lock_resource *res;
2291 int i;
2292 struct hlist_head *bucket;
2293 struct dlm_lock *lock;
2294
2295
2296 /* purge any stale mles */
2297 dlm_clean_master_list(dlm, dead_node);
2298
2299 /*
2300 * now clean up all lock resources. there are two rules:
2301 *
2302 * 1) if the dead node was the master, move the lockres
2303 * to the recovering list. set the RECOVERING flag.
2304 * this lockres needs to be cleaned up before it can
2305 * be used further.
2306 *
2307 * 2) if this node was the master, remove all locks from
2308 * each of the lockres queues that were owned by the
2309 * dead node. once recovery finishes, the dlm thread
2310 * can be kicked again to see if any ASTs or BASTs
2311 * need to be fired as a result.
2312 */
2313 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2314 bucket = dlm_lockres_hash(dlm, i);
2315 hlist_for_each_entry(res, iter, bucket, hash_node) {
2316 /* always prune any $RECOVERY entries for dead nodes,
2317 * otherwise hangs can occur during later recovery */
2318 if (dlm_is_recovery_lock(res->lockname.name,
2319 res->lockname.len)) {
2320 spin_lock(&res->spinlock);
2321 list_for_each_entry(lock, &res->granted, list) {
2322 if (lock->ml.node == dead_node) {
2323 mlog(0, "AHA! there was "
2324 "a $RECOVERY lock for dead "
2325 "node %u (%s)!\n",
2326 dead_node, dlm->name);
2327 list_del_init(&lock->list);
2328 dlm_lock_put(lock);
2329 break;
2330 }
2331 }
2332 spin_unlock(&res->spinlock);
2333 continue;
2334 }
2335 spin_lock(&res->spinlock);
2336 /* zero the lvb if necessary */
2337 dlm_revalidate_lvb(dlm, res, dead_node);
2338 if (res->owner == dead_node) {
2339 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2340 mlog(ML_NOTICE, "%s: res %.*s, Skip "
2341 "recovery as it is being freed\n",
2342 dlm->name, res->lockname.len,
2343 res->lockname.name);
2344 } else
2345 dlm_move_lockres_to_recovery_list(dlm,
2346 res);
2347
2348 } else if (res->owner == dlm->node_num) {
2349 dlm_free_dead_locks(dlm, res, dead_node);
2350 __dlm_lockres_calc_usage(dlm, res);
2351 }
2352 spin_unlock(&res->spinlock);
2353 }
2354 }
2355
2356 }
2357
__dlm_hb_node_down(struct dlm_ctxt * dlm,int idx)2358 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2359 {
2360 assert_spin_locked(&dlm->spinlock);
2361
2362 if (dlm->reco.new_master == idx) {
2363 mlog(0, "%s: recovery master %d just died\n",
2364 dlm->name, idx);
2365 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2366 /* finalize1 was reached, so it is safe to clear
2367 * the new_master and dead_node. that recovery
2368 * is complete. */
2369 mlog(0, "%s: dead master %d had reached "
2370 "finalize1 state, clearing\n", dlm->name, idx);
2371 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2372 __dlm_reset_recovery(dlm);
2373 }
2374 }
2375
2376 /* Clean up join state on node death. */
2377 if (dlm->joining_node == idx) {
2378 mlog(0, "Clearing join state for node %u\n", idx);
2379 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2380 }
2381
2382 /* check to see if the node is already considered dead */
2383 if (!test_bit(idx, dlm->live_nodes_map)) {
2384 mlog(0, "for domain %s, node %d is already dead. "
2385 "another node likely did recovery already.\n",
2386 dlm->name, idx);
2387 return;
2388 }
2389
2390 /* check to see if we do not care about this node */
2391 if (!test_bit(idx, dlm->domain_map)) {
2392 /* This also catches the case that we get a node down
2393 * but haven't joined the domain yet. */
2394 mlog(0, "node %u already removed from domain!\n", idx);
2395 return;
2396 }
2397
2398 clear_bit(idx, dlm->live_nodes_map);
2399
2400 /* make sure local cleanup occurs before the heartbeat events */
2401 if (!test_bit(idx, dlm->recovery_map))
2402 dlm_do_local_recovery_cleanup(dlm, idx);
2403
2404 /* notify anything attached to the heartbeat events */
2405 dlm_hb_event_notify_attached(dlm, idx, 0);
2406
2407 mlog(0, "node %u being removed from domain map!\n", idx);
2408 clear_bit(idx, dlm->domain_map);
2409 clear_bit(idx, dlm->exit_domain_map);
2410 /* wake up migration waiters if a node goes down.
2411 * perhaps later we can genericize this for other waiters. */
2412 wake_up(&dlm->migration_wq);
2413
2414 if (test_bit(idx, dlm->recovery_map))
2415 mlog(0, "domain %s, node %u already added "
2416 "to recovery map!\n", dlm->name, idx);
2417 else
2418 set_bit(idx, dlm->recovery_map);
2419 }
2420
dlm_hb_node_down_cb(struct o2nm_node * node,int idx,void * data)2421 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2422 {
2423 struct dlm_ctxt *dlm = data;
2424
2425 if (!dlm_grab(dlm))
2426 return;
2427
2428 /*
2429 * This will notify any dlm users that a node in our domain
2430 * went away without notifying us first.
2431 */
2432 if (test_bit(idx, dlm->domain_map))
2433 dlm_fire_domain_eviction_callbacks(dlm, idx);
2434
2435 spin_lock(&dlm->spinlock);
2436 __dlm_hb_node_down(dlm, idx);
2437 spin_unlock(&dlm->spinlock);
2438
2439 dlm_put(dlm);
2440 }
2441
dlm_hb_node_up_cb(struct o2nm_node * node,int idx,void * data)2442 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2443 {
2444 struct dlm_ctxt *dlm = data;
2445
2446 if (!dlm_grab(dlm))
2447 return;
2448
2449 spin_lock(&dlm->spinlock);
2450 set_bit(idx, dlm->live_nodes_map);
2451 /* do NOT notify mle attached to the heartbeat events.
2452 * new nodes are not interesting in mastery until joined. */
2453 spin_unlock(&dlm->spinlock);
2454
2455 dlm_put(dlm);
2456 }
2457
dlm_reco_ast(void * astdata)2458 static void dlm_reco_ast(void *astdata)
2459 {
2460 struct dlm_ctxt *dlm = astdata;
2461 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2462 dlm->node_num, dlm->name);
2463 }
dlm_reco_bast(void * astdata,int blocked_type)2464 static void dlm_reco_bast(void *astdata, int blocked_type)
2465 {
2466 struct dlm_ctxt *dlm = astdata;
2467 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2468 dlm->node_num, dlm->name);
2469 }
dlm_reco_unlock_ast(void * astdata,enum dlm_status st)2470 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2471 {
2472 mlog(0, "unlockast for recovery lock fired!\n");
2473 }
2474
2475 /*
2476 * dlm_pick_recovery_master will continually attempt to use
2477 * dlmlock() on the special "$RECOVERY" lockres with the
2478 * LKM_NOQUEUE flag to get an EX. every thread that enters
2479 * this function on each node racing to become the recovery
2480 * master will not stop attempting this until either:
2481 * a) this node gets the EX (and becomes the recovery master),
2482 * or b) dlm->reco.new_master gets set to some nodenum
2483 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2484 * so each time a recovery master is needed, the entire cluster
2485 * will sync at this point. if the new master dies, that will
2486 * be detected in dlm_do_recovery */
dlm_pick_recovery_master(struct dlm_ctxt * dlm)2487 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2488 {
2489 enum dlm_status ret;
2490 struct dlm_lockstatus lksb;
2491 int status = -EINVAL;
2492
2493 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2494 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2495 again:
2496 memset(&lksb, 0, sizeof(lksb));
2497
2498 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2499 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2500 dlm_reco_ast, dlm, dlm_reco_bast);
2501
2502 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2503 dlm->name, ret, lksb.status);
2504
2505 if (ret == DLM_NORMAL) {
2506 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2507 dlm->name, dlm->node_num);
2508
2509 /* got the EX lock. check to see if another node
2510 * just became the reco master */
2511 if (dlm_reco_master_ready(dlm)) {
2512 mlog(0, "%s: got reco EX lock, but %u will "
2513 "do the recovery\n", dlm->name,
2514 dlm->reco.new_master);
2515 status = -EEXIST;
2516 } else {
2517 status = 0;
2518
2519 /* see if recovery was already finished elsewhere */
2520 spin_lock(&dlm->spinlock);
2521 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2522 status = -EINVAL;
2523 mlog(0, "%s: got reco EX lock, but "
2524 "node got recovered already\n", dlm->name);
2525 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2526 mlog(ML_ERROR, "%s: new master is %u "
2527 "but no dead node!\n",
2528 dlm->name, dlm->reco.new_master);
2529 BUG();
2530 }
2531 }
2532 spin_unlock(&dlm->spinlock);
2533 }
2534
2535 /* if this node has actually become the recovery master,
2536 * set the master and send the messages to begin recovery */
2537 if (!status) {
2538 mlog(0, "%s: dead=%u, this=%u, sending "
2539 "begin_reco now\n", dlm->name,
2540 dlm->reco.dead_node, dlm->node_num);
2541 status = dlm_send_begin_reco_message(dlm,
2542 dlm->reco.dead_node);
2543 /* this always succeeds */
2544 BUG_ON(status);
2545
2546 /* set the new_master to this node */
2547 spin_lock(&dlm->spinlock);
2548 dlm_set_reco_master(dlm, dlm->node_num);
2549 spin_unlock(&dlm->spinlock);
2550 }
2551
2552 /* recovery lock is a special case. ast will not get fired,
2553 * so just go ahead and unlock it. */
2554 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2555 if (ret == DLM_DENIED) {
2556 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2557 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2558 }
2559 if (ret != DLM_NORMAL) {
2560 /* this would really suck. this could only happen
2561 * if there was a network error during the unlock
2562 * because of node death. this means the unlock
2563 * is actually "done" and the lock structure is
2564 * even freed. we can continue, but only
2565 * because this specific lock name is special. */
2566 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2567 }
2568 } else if (ret == DLM_NOTQUEUED) {
2569 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2570 dlm->name, dlm->node_num);
2571 /* another node is master. wait on
2572 * reco.new_master != O2NM_INVALID_NODE_NUM
2573 * for at most one second */
2574 wait_event_timeout(dlm->dlm_reco_thread_wq,
2575 dlm_reco_master_ready(dlm),
2576 msecs_to_jiffies(1000));
2577 if (!dlm_reco_master_ready(dlm)) {
2578 mlog(0, "%s: reco master taking awhile\n",
2579 dlm->name);
2580 goto again;
2581 }
2582 /* another node has informed this one that it is reco master */
2583 mlog(0, "%s: reco master %u is ready to recover %u\n",
2584 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2585 status = -EEXIST;
2586 } else if (ret == DLM_RECOVERING) {
2587 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2588 dlm->name, dlm->node_num);
2589 goto again;
2590 } else {
2591 struct dlm_lock_resource *res;
2592
2593 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2594 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2595 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2596 dlm_errname(lksb.status));
2597 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2598 DLM_RECOVERY_LOCK_NAME_LEN);
2599 if (res) {
2600 dlm_print_one_lock_resource(res);
2601 dlm_lockres_put(res);
2602 } else {
2603 mlog(ML_ERROR, "recovery lock not found\n");
2604 }
2605 BUG();
2606 }
2607
2608 return status;
2609 }
2610
dlm_send_begin_reco_message(struct dlm_ctxt * dlm,u8 dead_node)2611 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2612 {
2613 struct dlm_begin_reco br;
2614 int ret = 0;
2615 struct dlm_node_iter iter;
2616 int nodenum;
2617 int status;
2618
2619 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2620
2621 spin_lock(&dlm->spinlock);
2622 dlm_node_iter_init(dlm->domain_map, &iter);
2623 spin_unlock(&dlm->spinlock);
2624
2625 clear_bit(dead_node, iter.node_map);
2626
2627 memset(&br, 0, sizeof(br));
2628 br.node_idx = dlm->node_num;
2629 br.dead_node = dead_node;
2630
2631 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2632 ret = 0;
2633 if (nodenum == dead_node) {
2634 mlog(0, "not sending begin reco to dead node "
2635 "%u\n", dead_node);
2636 continue;
2637 }
2638 if (nodenum == dlm->node_num) {
2639 mlog(0, "not sending begin reco to self\n");
2640 continue;
2641 }
2642 retry:
2643 ret = -EINVAL;
2644 mlog(0, "attempting to send begin reco msg to %d\n",
2645 nodenum);
2646 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2647 &br, sizeof(br), nodenum, &status);
2648 /* negative status is handled ok by caller here */
2649 if (ret >= 0)
2650 ret = status;
2651 if (dlm_is_host_down(ret)) {
2652 /* node is down. not involved in recovery
2653 * so just keep going */
2654 mlog(ML_NOTICE, "%s: node %u was down when sending "
2655 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2656 ret = 0;
2657 }
2658
2659 /*
2660 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2661 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2662 * We are handling both for compatibility reasons.
2663 */
2664 if (ret == -EAGAIN || ret == EAGAIN) {
2665 mlog(0, "%s: trying to start recovery of node "
2666 "%u, but node %u is waiting for last recovery "
2667 "to complete, backoff for a bit\n", dlm->name,
2668 dead_node, nodenum);
2669 msleep(100);
2670 goto retry;
2671 }
2672 if (ret < 0) {
2673 struct dlm_lock_resource *res;
2674
2675 /* this is now a serious problem, possibly ENOMEM
2676 * in the network stack. must retry */
2677 mlog_errno(ret);
2678 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2679 "returned %d\n", dlm->name, nodenum, ret);
2680 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2681 DLM_RECOVERY_LOCK_NAME_LEN);
2682 if (res) {
2683 dlm_print_one_lock_resource(res);
2684 dlm_lockres_put(res);
2685 } else {
2686 mlog(ML_ERROR, "recovery lock not found\n");
2687 }
2688 /* sleep for a bit in hopes that we can avoid
2689 * another ENOMEM */
2690 msleep(100);
2691 goto retry;
2692 }
2693 }
2694
2695 return ret;
2696 }
2697
dlm_begin_reco_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)2698 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2699 void **ret_data)
2700 {
2701 struct dlm_ctxt *dlm = data;
2702 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2703
2704 /* ok to return 0, domain has gone away */
2705 if (!dlm_grab(dlm))
2706 return 0;
2707
2708 spin_lock(&dlm->spinlock);
2709 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2710 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2711 "but this node is in finalize state, waiting on finalize2\n",
2712 dlm->name, br->node_idx, br->dead_node,
2713 dlm->reco.dead_node, dlm->reco.new_master);
2714 spin_unlock(&dlm->spinlock);
2715 return -EAGAIN;
2716 }
2717 spin_unlock(&dlm->spinlock);
2718
2719 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2720 dlm->name, br->node_idx, br->dead_node,
2721 dlm->reco.dead_node, dlm->reco.new_master);
2722
2723 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2724
2725 spin_lock(&dlm->spinlock);
2726 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2727 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2728 mlog(0, "%s: new_master %u died, changing "
2729 "to %u\n", dlm->name, dlm->reco.new_master,
2730 br->node_idx);
2731 } else {
2732 mlog(0, "%s: new_master %u NOT DEAD, changing "
2733 "to %u\n", dlm->name, dlm->reco.new_master,
2734 br->node_idx);
2735 /* may not have seen the new master as dead yet */
2736 }
2737 }
2738 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2739 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2740 "node %u changing it to %u\n", dlm->name,
2741 dlm->reco.dead_node, br->node_idx, br->dead_node);
2742 }
2743 dlm_set_reco_master(dlm, br->node_idx);
2744 dlm_set_reco_dead_node(dlm, br->dead_node);
2745 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2746 mlog(0, "recovery master %u sees %u as dead, but this "
2747 "node has not yet. marking %u as dead\n",
2748 br->node_idx, br->dead_node, br->dead_node);
2749 if (!test_bit(br->dead_node, dlm->domain_map) ||
2750 !test_bit(br->dead_node, dlm->live_nodes_map))
2751 mlog(0, "%u not in domain/live_nodes map "
2752 "so setting it in reco map manually\n",
2753 br->dead_node);
2754 /* force the recovery cleanup in __dlm_hb_node_down
2755 * both of these will be cleared in a moment */
2756 set_bit(br->dead_node, dlm->domain_map);
2757 set_bit(br->dead_node, dlm->live_nodes_map);
2758 __dlm_hb_node_down(dlm, br->dead_node);
2759 }
2760 spin_unlock(&dlm->spinlock);
2761
2762 dlm_kick_recovery_thread(dlm);
2763
2764 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2765 dlm->name, br->node_idx, br->dead_node,
2766 dlm->reco.dead_node, dlm->reco.new_master);
2767
2768 dlm_put(dlm);
2769 return 0;
2770 }
2771
2772 #define DLM_FINALIZE_STAGE2 0x01
dlm_send_finalize_reco_message(struct dlm_ctxt * dlm)2773 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2774 {
2775 int ret = 0;
2776 struct dlm_finalize_reco fr;
2777 struct dlm_node_iter iter;
2778 int nodenum;
2779 int status;
2780 int stage = 1;
2781
2782 mlog(0, "finishing recovery for node %s:%u, "
2783 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2784
2785 spin_lock(&dlm->spinlock);
2786 dlm_node_iter_init(dlm->domain_map, &iter);
2787 spin_unlock(&dlm->spinlock);
2788
2789 stage2:
2790 memset(&fr, 0, sizeof(fr));
2791 fr.node_idx = dlm->node_num;
2792 fr.dead_node = dlm->reco.dead_node;
2793 if (stage == 2)
2794 fr.flags |= DLM_FINALIZE_STAGE2;
2795
2796 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2797 if (nodenum == dlm->node_num)
2798 continue;
2799 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2800 &fr, sizeof(fr), nodenum, &status);
2801 if (ret >= 0)
2802 ret = status;
2803 if (ret < 0) {
2804 mlog(ML_ERROR, "Error %d when sending message %u (key "
2805 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2806 dlm->key, nodenum);
2807 if (dlm_is_host_down(ret)) {
2808 /* this has no effect on this recovery
2809 * session, so set the status to zero to
2810 * finish out the last recovery */
2811 mlog(ML_ERROR, "node %u went down after this "
2812 "node finished recovery.\n", nodenum);
2813 ret = 0;
2814 continue;
2815 }
2816 break;
2817 }
2818 }
2819 if (stage == 1) {
2820 /* reset the node_iter back to the top and send finalize2 */
2821 iter.curnode = -1;
2822 stage = 2;
2823 goto stage2;
2824 }
2825
2826 return ret;
2827 }
2828
dlm_finalize_reco_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)2829 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2830 void **ret_data)
2831 {
2832 struct dlm_ctxt *dlm = data;
2833 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2834 int stage = 1;
2835
2836 /* ok to return 0, domain has gone away */
2837 if (!dlm_grab(dlm))
2838 return 0;
2839
2840 if (fr->flags & DLM_FINALIZE_STAGE2)
2841 stage = 2;
2842
2843 mlog(0, "%s: node %u finalizing recovery stage%d of "
2844 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2845 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2846
2847 spin_lock(&dlm->spinlock);
2848
2849 if (dlm->reco.new_master != fr->node_idx) {
2850 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2851 "%u is supposed to be the new master, dead=%u\n",
2852 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2853 BUG();
2854 }
2855 if (dlm->reco.dead_node != fr->dead_node) {
2856 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2857 "node %u, but node %u is supposed to be dead\n",
2858 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2859 BUG();
2860 }
2861
2862 switch (stage) {
2863 case 1:
2864 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2865 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2866 mlog(ML_ERROR, "%s: received finalize1 from "
2867 "new master %u for dead node %u, but "
2868 "this node has already received it!\n",
2869 dlm->name, fr->node_idx, fr->dead_node);
2870 dlm_print_reco_node_status(dlm);
2871 BUG();
2872 }
2873 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2874 spin_unlock(&dlm->spinlock);
2875 break;
2876 case 2:
2877 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2878 mlog(ML_ERROR, "%s: received finalize2 from "
2879 "new master %u for dead node %u, but "
2880 "this node did not have finalize1!\n",
2881 dlm->name, fr->node_idx, fr->dead_node);
2882 dlm_print_reco_node_status(dlm);
2883 BUG();
2884 }
2885 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2886 __dlm_reset_recovery(dlm);
2887 spin_unlock(&dlm->spinlock);
2888 dlm_kick_recovery_thread(dlm);
2889 break;
2890 default:
2891 BUG();
2892 }
2893
2894 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2895 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2896
2897 dlm_put(dlm);
2898 return 0;
2899 }
2900