Commit | Line | Data |
---|---|---|
6714d8e8 KH |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * dlmmod.c | |
5 | * | |
6 | * standalone DLM module | |
7 | * | |
8 | * Copyright (C) 2004 Oracle. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public | |
12 | * License as published by the Free Software Foundation; either | |
13 | * version 2 of the License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public | |
21 | * License along with this program; if not, write to the | |
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
23 | * Boston, MA 021110-1307, USA. | |
24 | * | |
25 | */ | |
26 | ||
27 | ||
28 | #include <linux/module.h> | |
29 | #include <linux/fs.h> | |
30 | #include <linux/types.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/highmem.h> | |
6714d8e8 KH |
33 | #include <linux/init.h> |
34 | #include <linux/sysctl.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/blkdev.h> | |
37 | #include <linux/socket.h> | |
38 | #include <linux/inet.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/delay.h> | |
41 | ||
42 | ||
43 | #include "cluster/heartbeat.h" | |
44 | #include "cluster/nodemanager.h" | |
45 | #include "cluster/tcp.h" | |
46 | ||
47 | #include "dlmapi.h" | |
48 | #include "dlmcommon.h" | |
82353b59 | 49 | #include "dlmdomain.h" |
e5a0334c | 50 | #include "dlmdebug.h" |
6714d8e8 KH |
51 | |
52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) | |
53 | #include "cluster/masklog.h" | |
54 | ||
6714d8e8 KH |
55 | static void dlm_mle_node_down(struct dlm_ctxt *dlm, |
56 | struct dlm_master_list_entry *mle, | |
57 | struct o2nm_node *node, | |
58 | int idx); | |
59 | static void dlm_mle_node_up(struct dlm_ctxt *dlm, | |
60 | struct dlm_master_list_entry *mle, | |
61 | struct o2nm_node *node, | |
62 | int idx); | |
63 | ||
64 | static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); | |
ba2bf218 KH |
65 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, |
66 | struct dlm_lock_resource *res, | |
67 | void *nodemap, u32 flags); | |
f3f85464 | 68 | static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); |
6714d8e8 KH |
69 | |
70 | static inline int dlm_mle_equal(struct dlm_ctxt *dlm, | |
71 | struct dlm_master_list_entry *mle, | |
72 | const char *name, | |
73 | unsigned int namelen) | |
74 | { | |
6714d8e8 KH |
75 | if (dlm != mle->dlm) |
76 | return 0; | |
77 | ||
7141514b SM |
78 | if (namelen != mle->mnamelen || |
79 | memcmp(name, mle->mname, namelen) != 0) | |
f77a9a78 SM |
80 | return 0; |
81 | ||
6714d8e8 KH |
82 | return 1; |
83 | } | |
84 | ||
724bdca9 SM |
85 | static struct kmem_cache *dlm_lockres_cache = NULL; |
86 | static struct kmem_cache *dlm_lockname_cache = NULL; | |
e18b890b | 87 | static struct kmem_cache *dlm_mle_cache = NULL; |
6714d8e8 | 88 | |
6714d8e8 KH |
89 | static void dlm_mle_release(struct kref *kref); |
90 | static void dlm_init_mle(struct dlm_master_list_entry *mle, | |
91 | enum dlm_mle_type type, | |
92 | struct dlm_ctxt *dlm, | |
93 | struct dlm_lock_resource *res, | |
94 | const char *name, | |
95 | unsigned int namelen); | |
96 | static void dlm_put_mle(struct dlm_master_list_entry *mle); | |
97 | static void __dlm_put_mle(struct dlm_master_list_entry *mle); | |
98 | static int dlm_find_mle(struct dlm_ctxt *dlm, | |
99 | struct dlm_master_list_entry **mle, | |
100 | char *name, unsigned int namelen); | |
101 | ||
ba2bf218 KH |
102 | static int dlm_do_master_request(struct dlm_lock_resource *res, |
103 | struct dlm_master_list_entry *mle, int to); | |
6714d8e8 KH |
104 | |
105 | ||
106 | static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, | |
107 | struct dlm_lock_resource *res, | |
108 | struct dlm_master_list_entry *mle, | |
109 | int *blocked); | |
110 | static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | |
111 | struct dlm_lock_resource *res, | |
112 | struct dlm_master_list_entry *mle, | |
113 | int blocked); | |
114 | static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | |
115 | struct dlm_lock_resource *res, | |
116 | struct dlm_master_list_entry *mle, | |
117 | struct dlm_master_list_entry **oldmle, | |
118 | const char *name, unsigned int namelen, | |
119 | u8 new_master, u8 master); | |
120 | ||
121 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, | |
122 | struct dlm_lock_resource *res); | |
123 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |
124 | struct dlm_lock_resource *res); | |
125 | static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, | |
126 | struct dlm_lock_resource *res, | |
127 | u8 target); | |
c03872f5 KH |
128 | static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, |
129 | struct dlm_lock_resource *res); | |
6714d8e8 KH |
130 | |
131 | ||
132 | int dlm_is_host_down(int errno) | |
133 | { | |
134 | switch (errno) { | |
135 | case -EBADF: | |
136 | case -ECONNREFUSED: | |
137 | case -ENOTCONN: | |
138 | case -ECONNRESET: | |
139 | case -EPIPE: | |
140 | case -EHOSTDOWN: | |
141 | case -EHOSTUNREACH: | |
142 | case -ETIMEDOUT: | |
143 | case -ECONNABORTED: | |
144 | case -ENETDOWN: | |
145 | case -ENETUNREACH: | |
146 | case -ENETRESET: | |
147 | case -ESHUTDOWN: | |
148 | case -ENOPROTOOPT: | |
149 | case -EINVAL: /* if returned from our tcp code, | |
150 | this means there is no socket */ | |
151 | return 1; | |
152 | } | |
153 | return 0; | |
154 | } | |
155 | ||
156 | ||
157 | /* | |
158 | * MASTER LIST FUNCTIONS | |
159 | */ | |
160 | ||
161 | ||
162 | /* | |
163 | * regarding master list entries and heartbeat callbacks: | |
164 | * | |
165 | * in order to avoid sleeping and allocation that occurs in | |
166 | * heartbeat, master list entries are simply attached to the | |
167 | * dlm's established heartbeat callbacks. the mle is attached | |
168 | * when it is created, and since the dlm->spinlock is held at | |
169 | * that time, any heartbeat event will be properly discovered | |
170 | * by the mle. the mle needs to be detached from the | |
171 | * dlm->mle_hb_events list as soon as heartbeat events are no | |
172 | * longer useful to the mle, and before the mle is freed. | |
173 | * | |
174 | * as a general rule, heartbeat events are no longer needed by | |
175 | * the mle once an "answer" regarding the lock master has been | |
176 | * received. | |
177 | */ | |
178 | static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, | |
179 | struct dlm_master_list_entry *mle) | |
180 | { | |
181 | assert_spin_locked(&dlm->spinlock); | |
182 | ||
183 | list_add_tail(&mle->hb_events, &dlm->mle_hb_events); | |
184 | } | |
185 | ||
186 | ||
187 | static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, | |
188 | struct dlm_master_list_entry *mle) | |
189 | { | |
190 | if (!list_empty(&mle->hb_events)) | |
191 | list_del_init(&mle->hb_events); | |
192 | } | |
193 | ||
194 | ||
195 | static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, | |
196 | struct dlm_master_list_entry *mle) | |
197 | { | |
198 | spin_lock(&dlm->spinlock); | |
199 | __dlm_mle_detach_hb_events(dlm, mle); | |
200 | spin_unlock(&dlm->spinlock); | |
201 | } | |
202 | ||
a2bf0477 KH |
203 | static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) |
204 | { | |
205 | struct dlm_ctxt *dlm; | |
206 | dlm = mle->dlm; | |
207 | ||
208 | assert_spin_locked(&dlm->spinlock); | |
209 | assert_spin_locked(&dlm->master_lock); | |
210 | mle->inuse++; | |
211 | kref_get(&mle->mle_refs); | |
212 | } | |
213 | ||
214 | static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) | |
215 | { | |
216 | struct dlm_ctxt *dlm; | |
217 | dlm = mle->dlm; | |
218 | ||
219 | spin_lock(&dlm->spinlock); | |
220 | spin_lock(&dlm->master_lock); | |
221 | mle->inuse--; | |
222 | __dlm_put_mle(mle); | |
223 | spin_unlock(&dlm->master_lock); | |
224 | spin_unlock(&dlm->spinlock); | |
225 | ||
226 | } | |
227 | ||
6714d8e8 KH |
228 | /* remove from list and free */ |
229 | static void __dlm_put_mle(struct dlm_master_list_entry *mle) | |
230 | { | |
231 | struct dlm_ctxt *dlm; | |
232 | dlm = mle->dlm; | |
233 | ||
234 | assert_spin_locked(&dlm->spinlock); | |
235 | assert_spin_locked(&dlm->master_lock); | |
aa852354 KH |
236 | if (!atomic_read(&mle->mle_refs.refcount)) { |
237 | /* this may or may not crash, but who cares. | |
238 | * it's a BUG. */ | |
239 | mlog(ML_ERROR, "bad mle: %p\n", mle); | |
240 | dlm_print_one_mle(mle); | |
241 | BUG(); | |
242 | } else | |
243 | kref_put(&mle->mle_refs, dlm_mle_release); | |
6714d8e8 KH |
244 | } |
245 | ||
246 | ||
247 | /* must not have any spinlocks coming in */ | |
248 | static void dlm_put_mle(struct dlm_master_list_entry *mle) | |
249 | { | |
250 | struct dlm_ctxt *dlm; | |
251 | dlm = mle->dlm; | |
252 | ||
253 | spin_lock(&dlm->spinlock); | |
254 | spin_lock(&dlm->master_lock); | |
255 | __dlm_put_mle(mle); | |
256 | spin_unlock(&dlm->master_lock); | |
257 | spin_unlock(&dlm->spinlock); | |
258 | } | |
259 | ||
260 | static inline void dlm_get_mle(struct dlm_master_list_entry *mle) | |
261 | { | |
262 | kref_get(&mle->mle_refs); | |
263 | } | |
264 | ||
265 | static void dlm_init_mle(struct dlm_master_list_entry *mle, | |
266 | enum dlm_mle_type type, | |
267 | struct dlm_ctxt *dlm, | |
268 | struct dlm_lock_resource *res, | |
269 | const char *name, | |
270 | unsigned int namelen) | |
271 | { | |
272 | assert_spin_locked(&dlm->spinlock); | |
273 | ||
274 | mle->dlm = dlm; | |
275 | mle->type = type; | |
2ed6c750 | 276 | INIT_HLIST_NODE(&mle->master_hash_node); |
6714d8e8 KH |
277 | INIT_LIST_HEAD(&mle->hb_events); |
278 | memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); | |
279 | spin_lock_init(&mle->spinlock); | |
280 | init_waitqueue_head(&mle->wq); | |
281 | atomic_set(&mle->woken, 0); | |
282 | kref_init(&mle->mle_refs); | |
283 | memset(mle->response_map, 0, sizeof(mle->response_map)); | |
284 | mle->master = O2NM_MAX_NODES; | |
285 | mle->new_master = O2NM_MAX_NODES; | |
a2bf0477 | 286 | mle->inuse = 0; |
6714d8e8 | 287 | |
f77a9a78 SM |
288 | BUG_ON(mle->type != DLM_MLE_BLOCK && |
289 | mle->type != DLM_MLE_MASTER && | |
290 | mle->type != DLM_MLE_MIGRATION); | |
291 | ||
6714d8e8 KH |
292 | if (mle->type == DLM_MLE_MASTER) { |
293 | BUG_ON(!res); | |
7141514b SM |
294 | mle->mleres = res; |
295 | memcpy(mle->mname, res->lockname.name, res->lockname.len); | |
296 | mle->mnamelen = res->lockname.len; | |
297 | mle->mnamehash = res->lockname.hash; | |
f77a9a78 | 298 | } else { |
6714d8e8 | 299 | BUG_ON(!name); |
7141514b SM |
300 | mle->mleres = NULL; |
301 | memcpy(mle->mname, name, namelen); | |
302 | mle->mnamelen = namelen; | |
303 | mle->mnamehash = dlm_lockid_hash(name, namelen); | |
6714d8e8 KH |
304 | } |
305 | ||
2041d8fd SM |
306 | atomic_inc(&dlm->mle_tot_count[mle->type]); |
307 | atomic_inc(&dlm->mle_cur_count[mle->type]); | |
308 | ||
6714d8e8 KH |
309 | /* copy off the node_map and register hb callbacks on our copy */ |
310 | memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); | |
311 | memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); | |
312 | clear_bit(dlm->node_num, mle->vote_map); | |
313 | clear_bit(dlm->node_num, mle->node_map); | |
314 | ||
315 | /* attach the mle to the domain node up/down events */ | |
316 | __dlm_mle_attach_hb_events(dlm, mle); | |
317 | } | |
318 | ||
1c084577 SM |
319 | void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) |
320 | { | |
321 | assert_spin_locked(&dlm->spinlock); | |
322 | assert_spin_locked(&dlm->master_lock); | |
323 | ||
2ed6c750 SM |
324 | if (!hlist_unhashed(&mle->master_hash_node)) |
325 | hlist_del_init(&mle->master_hash_node); | |
1c084577 SM |
326 | } |
327 | ||
328 | void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) | |
329 | { | |
2ed6c750 | 330 | struct hlist_head *bucket; |
2ed6c750 | 331 | |
1c084577 SM |
332 | assert_spin_locked(&dlm->master_lock); |
333 | ||
7141514b | 334 | bucket = dlm_master_hash(dlm, mle->mnamehash); |
2ed6c750 | 335 | hlist_add_head(&mle->master_hash_node, bucket); |
1c084577 | 336 | } |
6714d8e8 KH |
337 | |
338 | /* returns 1 if found, 0 if not */ | |
339 | static int dlm_find_mle(struct dlm_ctxt *dlm, | |
340 | struct dlm_master_list_entry **mle, | |
341 | char *name, unsigned int namelen) | |
342 | { | |
343 | struct dlm_master_list_entry *tmpmle; | |
2ed6c750 SM |
344 | struct hlist_head *bucket; |
345 | struct hlist_node *list; | |
346 | unsigned int hash; | |
6714d8e8 KH |
347 | |
348 | assert_spin_locked(&dlm->master_lock); | |
349 | ||
2ed6c750 SM |
350 | hash = dlm_lockid_hash(name, namelen); |
351 | bucket = dlm_master_hash(dlm, hash); | |
352 | hlist_for_each(list, bucket) { | |
353 | tmpmle = hlist_entry(list, struct dlm_master_list_entry, | |
354 | master_hash_node); | |
6714d8e8 KH |
355 | if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) |
356 | continue; | |
357 | dlm_get_mle(tmpmle); | |
358 | *mle = tmpmle; | |
359 | return 1; | |
360 | } | |
361 | return 0; | |
362 | } | |
363 | ||
364 | void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) | |
365 | { | |
366 | struct dlm_master_list_entry *mle; | |
6714d8e8 KH |
367 | |
368 | assert_spin_locked(&dlm->spinlock); | |
2bd63216 | 369 | |
800deef3 | 370 | list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { |
6714d8e8 KH |
371 | if (node_up) |
372 | dlm_mle_node_up(dlm, mle, NULL, idx); | |
373 | else | |
374 | dlm_mle_node_down(dlm, mle, NULL, idx); | |
375 | } | |
376 | } | |
377 | ||
378 | static void dlm_mle_node_down(struct dlm_ctxt *dlm, | |
379 | struct dlm_master_list_entry *mle, | |
380 | struct o2nm_node *node, int idx) | |
381 | { | |
382 | spin_lock(&mle->spinlock); | |
383 | ||
384 | if (!test_bit(idx, mle->node_map)) | |
385 | mlog(0, "node %u already removed from nodemap!\n", idx); | |
386 | else | |
387 | clear_bit(idx, mle->node_map); | |
388 | ||
389 | spin_unlock(&mle->spinlock); | |
390 | } | |
391 | ||
392 | static void dlm_mle_node_up(struct dlm_ctxt *dlm, | |
393 | struct dlm_master_list_entry *mle, | |
394 | struct o2nm_node *node, int idx) | |
395 | { | |
396 | spin_lock(&mle->spinlock); | |
397 | ||
398 | if (test_bit(idx, mle->node_map)) | |
399 | mlog(0, "node %u already in node map!\n", idx); | |
400 | else | |
401 | set_bit(idx, mle->node_map); | |
402 | ||
403 | spin_unlock(&mle->spinlock); | |
404 | } | |
405 | ||
406 | ||
407 | int dlm_init_mle_cache(void) | |
408 | { | |
12eb0035 | 409 | dlm_mle_cache = kmem_cache_create("o2dlm_mle", |
6714d8e8 KH |
410 | sizeof(struct dlm_master_list_entry), |
411 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 412 | NULL); |
6714d8e8 KH |
413 | if (dlm_mle_cache == NULL) |
414 | return -ENOMEM; | |
415 | return 0; | |
416 | } | |
417 | ||
418 | void dlm_destroy_mle_cache(void) | |
419 | { | |
420 | if (dlm_mle_cache) | |
421 | kmem_cache_destroy(dlm_mle_cache); | |
422 | } | |
423 | ||
424 | static void dlm_mle_release(struct kref *kref) | |
425 | { | |
426 | struct dlm_master_list_entry *mle; | |
427 | struct dlm_ctxt *dlm; | |
428 | ||
6714d8e8 KH |
429 | mle = container_of(kref, struct dlm_master_list_entry, mle_refs); |
430 | dlm = mle->dlm; | |
431 | ||
6714d8e8 KH |
432 | assert_spin_locked(&dlm->spinlock); |
433 | assert_spin_locked(&dlm->master_lock); | |
434 | ||
7141514b SM |
435 | mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, |
436 | mle->type); | |
2ed6c750 | 437 | |
6714d8e8 | 438 | /* remove from list if not already */ |
1c084577 | 439 | __dlm_unlink_mle(dlm, mle); |
6714d8e8 KH |
440 | |
441 | /* detach the mle from the domain node up/down events */ | |
442 | __dlm_mle_detach_hb_events(dlm, mle); | |
443 | ||
2041d8fd SM |
444 | atomic_dec(&dlm->mle_cur_count[mle->type]); |
445 | ||
6714d8e8 KH |
446 | /* NOTE: kfree under spinlock here. |
447 | * if this is bad, we can move this to a freelist. */ | |
448 | kmem_cache_free(dlm_mle_cache, mle); | |
449 | } | |
450 | ||
451 | ||
452 | /* | |
453 | * LOCK RESOURCE FUNCTIONS | |
454 | */ | |
455 | ||
724bdca9 SM |
456 | int dlm_init_master_caches(void) |
457 | { | |
458 | dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", | |
459 | sizeof(struct dlm_lock_resource), | |
460 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
461 | if (!dlm_lockres_cache) | |
462 | goto bail; | |
463 | ||
464 | dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", | |
465 | DLM_LOCKID_NAME_MAX, 0, | |
466 | SLAB_HWCACHE_ALIGN, NULL); | |
467 | if (!dlm_lockname_cache) | |
468 | goto bail; | |
469 | ||
470 | return 0; | |
471 | bail: | |
472 | dlm_destroy_master_caches(); | |
473 | return -ENOMEM; | |
474 | } | |
475 | ||
476 | void dlm_destroy_master_caches(void) | |
477 | { | |
478 | if (dlm_lockname_cache) | |
479 | kmem_cache_destroy(dlm_lockname_cache); | |
480 | ||
481 | if (dlm_lockres_cache) | |
482 | kmem_cache_destroy(dlm_lockres_cache); | |
483 | } | |
484 | ||
6714d8e8 KH |
485 | static void dlm_lockres_release(struct kref *kref) |
486 | { | |
487 | struct dlm_lock_resource *res; | |
b0d4f817 | 488 | struct dlm_ctxt *dlm; |
6714d8e8 KH |
489 | |
490 | res = container_of(kref, struct dlm_lock_resource, refs); | |
b0d4f817 | 491 | dlm = res->dlm; |
6714d8e8 KH |
492 | |
493 | /* This should not happen -- all lockres' have a name | |
494 | * associated with them at init time. */ | |
495 | BUG_ON(!res->lockname.name); | |
496 | ||
497 | mlog(0, "destroying lockres %.*s\n", res->lockname.len, | |
498 | res->lockname.name); | |
499 | ||
b0d4f817 | 500 | spin_lock(&dlm->track_lock); |
29576f8b SM |
501 | if (!list_empty(&res->tracking)) |
502 | list_del_init(&res->tracking); | |
503 | else { | |
504 | mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n", | |
505 | res->lockname.len, res->lockname.name); | |
506 | dlm_print_one_lock_resource(res); | |
507 | } | |
b0d4f817 SM |
508 | spin_unlock(&dlm->track_lock); |
509 | ||
6800791a SM |
510 | atomic_dec(&dlm->res_cur_count); |
511 | ||
a7f90d83 KH |
512 | if (!hlist_unhashed(&res->hash_node) || |
513 | !list_empty(&res->granted) || | |
514 | !list_empty(&res->converting) || | |
515 | !list_empty(&res->blocked) || | |
516 | !list_empty(&res->dirty) || | |
517 | !list_empty(&res->recovering) || | |
518 | !list_empty(&res->purge)) { | |
519 | mlog(ML_ERROR, | |
520 | "Going to BUG for resource %.*s." | |
521 | " We're on a list! [%c%c%c%c%c%c%c]\n", | |
522 | res->lockname.len, res->lockname.name, | |
523 | !hlist_unhashed(&res->hash_node) ? 'H' : ' ', | |
524 | !list_empty(&res->granted) ? 'G' : ' ', | |
525 | !list_empty(&res->converting) ? 'C' : ' ', | |
526 | !list_empty(&res->blocked) ? 'B' : ' ', | |
527 | !list_empty(&res->dirty) ? 'D' : ' ', | |
528 | !list_empty(&res->recovering) ? 'R' : ' ', | |
529 | !list_empty(&res->purge) ? 'P' : ' '); | |
530 | ||
531 | dlm_print_one_lock_resource(res); | |
532 | } | |
533 | ||
6714d8e8 KH |
534 | /* By the time we're ready to blow this guy away, we shouldn't |
535 | * be on any lists. */ | |
81f2094a | 536 | BUG_ON(!hlist_unhashed(&res->hash_node)); |
6714d8e8 KH |
537 | BUG_ON(!list_empty(&res->granted)); |
538 | BUG_ON(!list_empty(&res->converting)); | |
539 | BUG_ON(!list_empty(&res->blocked)); | |
540 | BUG_ON(!list_empty(&res->dirty)); | |
541 | BUG_ON(!list_empty(&res->recovering)); | |
542 | BUG_ON(!list_empty(&res->purge)); | |
543 | ||
724bdca9 | 544 | kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); |
6714d8e8 | 545 | |
724bdca9 | 546 | kmem_cache_free(dlm_lockres_cache, res); |
6714d8e8 KH |
547 | } |
548 | ||
6714d8e8 KH |
549 | void dlm_lockres_put(struct dlm_lock_resource *res) |
550 | { | |
551 | kref_put(&res->refs, dlm_lockres_release); | |
552 | } | |
553 | ||
554 | static void dlm_init_lockres(struct dlm_ctxt *dlm, | |
555 | struct dlm_lock_resource *res, | |
556 | const char *name, unsigned int namelen) | |
557 | { | |
558 | char *qname; | |
559 | ||
560 | /* If we memset here, we lose our reference to the kmalloc'd | |
561 | * res->lockname.name, so be sure to init every field | |
562 | * correctly! */ | |
563 | ||
564 | qname = (char *) res->lockname.name; | |
565 | memcpy(qname, name, namelen); | |
566 | ||
567 | res->lockname.len = namelen; | |
a3d33291 | 568 | res->lockname.hash = dlm_lockid_hash(name, namelen); |
6714d8e8 KH |
569 | |
570 | init_waitqueue_head(&res->wq); | |
571 | spin_lock_init(&res->spinlock); | |
81f2094a | 572 | INIT_HLIST_NODE(&res->hash_node); |
6714d8e8 KH |
573 | INIT_LIST_HEAD(&res->granted); |
574 | INIT_LIST_HEAD(&res->converting); | |
575 | INIT_LIST_HEAD(&res->blocked); | |
576 | INIT_LIST_HEAD(&res->dirty); | |
577 | INIT_LIST_HEAD(&res->recovering); | |
578 | INIT_LIST_HEAD(&res->purge); | |
29576f8b | 579 | INIT_LIST_HEAD(&res->tracking); |
6714d8e8 KH |
580 | atomic_set(&res->asts_reserved, 0); |
581 | res->migration_pending = 0; | |
ba2bf218 | 582 | res->inflight_locks = 0; |
6714d8e8 | 583 | |
b0d4f817 SM |
584 | res->dlm = dlm; |
585 | ||
6714d8e8 KH |
586 | kref_init(&res->refs); |
587 | ||
6800791a SM |
588 | atomic_inc(&dlm->res_tot_count); |
589 | atomic_inc(&dlm->res_cur_count); | |
590 | ||
6714d8e8 KH |
591 | /* just for consistency */ |
592 | spin_lock(&res->spinlock); | |
593 | dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); | |
594 | spin_unlock(&res->spinlock); | |
595 | ||
596 | res->state = DLM_LOCK_RES_IN_PROGRESS; | |
597 | ||
598 | res->last_used = 0; | |
599 | ||
18c6ac38 | 600 | spin_lock(&dlm->spinlock); |
29576f8b | 601 | list_add_tail(&res->tracking, &dlm->tracking_list); |
18c6ac38 | 602 | spin_unlock(&dlm->spinlock); |
29576f8b | 603 | |
6714d8e8 | 604 | memset(res->lvb, 0, DLM_LVB_LEN); |
ba2bf218 | 605 | memset(res->refmap, 0, sizeof(res->refmap)); |
6714d8e8 KH |
606 | } |
607 | ||
608 | struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | |
609 | const char *name, | |
610 | unsigned int namelen) | |
611 | { | |
724bdca9 | 612 | struct dlm_lock_resource *res = NULL; |
6714d8e8 | 613 | |
3914ed0c | 614 | res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); |
6714d8e8 | 615 | if (!res) |
724bdca9 | 616 | goto error; |
6714d8e8 | 617 | |
3914ed0c | 618 | res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); |
724bdca9 SM |
619 | if (!res->lockname.name) |
620 | goto error; | |
6714d8e8 KH |
621 | |
622 | dlm_init_lockres(dlm, res, name, namelen); | |
623 | return res; | |
724bdca9 SM |
624 | |
625 | error: | |
626 | if (res && res->lockname.name) | |
627 | kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); | |
628 | ||
629 | if (res) | |
630 | kmem_cache_free(dlm_lockres_cache, res); | |
631 | return NULL; | |
6714d8e8 KH |
632 | } |
633 | ||
8d400b81 SM |
634 | void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, |
635 | struct dlm_lock_resource *res, int bit) | |
ba2bf218 | 636 | { |
8d400b81 SM |
637 | assert_spin_locked(&res->spinlock); |
638 | ||
639 | mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, | |
640 | res->lockname.name, bit, __builtin_return_address(0)); | |
641 | ||
642 | set_bit(bit, res->refmap); | |
643 | } | |
644 | ||
645 | void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, | |
646 | struct dlm_lock_resource *res, int bit) | |
647 | { | |
648 | assert_spin_locked(&res->spinlock); | |
649 | ||
650 | mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, | |
651 | res->lockname.name, bit, __builtin_return_address(0)); | |
652 | ||
653 | clear_bit(bit, res->refmap); | |
654 | } | |
655 | ||
656 | ||
657 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | |
658 | struct dlm_lock_resource *res) | |
659 | { | |
660 | assert_spin_locked(&res->spinlock); | |
ba2bf218 KH |
661 | |
662 | if (!test_bit(dlm->node_num, res->refmap)) { | |
663 | BUG_ON(res->inflight_locks != 0); | |
8d400b81 | 664 | dlm_lockres_set_refmap_bit(dlm, res, dlm->node_num); |
ba2bf218 KH |
665 | } |
666 | res->inflight_locks++; | |
8d400b81 SM |
667 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, |
668 | res->lockname.len, res->lockname.name, res->inflight_locks, | |
669 | __builtin_return_address(0)); | |
ba2bf218 KH |
670 | } |
671 | ||
8d400b81 SM |
672 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
673 | struct dlm_lock_resource *res) | |
ba2bf218 KH |
674 | { |
675 | assert_spin_locked(&res->spinlock); | |
676 | ||
677 | BUG_ON(res->inflight_locks == 0); | |
8d400b81 | 678 | |
ba2bf218 | 679 | res->inflight_locks--; |
8d400b81 SM |
680 | mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, |
681 | res->lockname.len, res->lockname.name, res->inflight_locks, | |
682 | __builtin_return_address(0)); | |
683 | ||
ba2bf218 | 684 | if (res->inflight_locks == 0) |
8d400b81 | 685 | dlm_lockres_clear_refmap_bit(dlm, res, dlm->node_num); |
ba2bf218 KH |
686 | wake_up(&res->wq); |
687 | } | |
688 | ||
6714d8e8 KH |
689 | /* |
690 | * lookup a lock resource by name. | |
691 | * may already exist in the hashtable. | |
692 | * lockid is null terminated | |
693 | * | |
694 | * if not, allocate enough for the lockres and for | |
695 | * the temporary structure used in doing the mastering. | |
696 | * | |
697 | * also, do a lookup in the dlm->master_list to see | |
698 | * if another node has begun mastering the same lock. | |
699 | * if so, there should be a block entry in there | |
700 | * for this name, and we should *not* attempt to master | |
701 | * the lock here. need to wait around for that node | |
702 | * to assert_master (or die). | |
703 | * | |
704 | */ | |
705 | struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | |
706 | const char *lockid, | |
3384f3df | 707 | int namelen, |
6714d8e8 KH |
708 | int flags) |
709 | { | |
710 | struct dlm_lock_resource *tmpres=NULL, *res=NULL; | |
711 | struct dlm_master_list_entry *mle = NULL; | |
712 | struct dlm_master_list_entry *alloc_mle = NULL; | |
713 | int blocked = 0; | |
714 | int ret, nodenum; | |
715 | struct dlm_node_iter iter; | |
3384f3df | 716 | unsigned int hash; |
6714d8e8 | 717 | int tries = 0; |
c03872f5 | 718 | int bit, wait_on_recovery = 0; |
ba2bf218 | 719 | int drop_inflight_if_nonlocal = 0; |
6714d8e8 KH |
720 | |
721 | BUG_ON(!lockid); | |
722 | ||
a3d33291 | 723 | hash = dlm_lockid_hash(lockid, namelen); |
6714d8e8 KH |
724 | |
725 | mlog(0, "get lockres %s (len %d)\n", lockid, namelen); | |
726 | ||
727 | lookup: | |
728 | spin_lock(&dlm->spinlock); | |
ba2bf218 | 729 | tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); |
6714d8e8 | 730 | if (tmpres) { |
ba2bf218 KH |
731 | int dropping_ref = 0; |
732 | ||
7b791d68 SM |
733 | spin_unlock(&dlm->spinlock); |
734 | ||
ba2bf218 | 735 | spin_lock(&tmpres->spinlock); |
7b791d68 SM |
736 | /* We wait for the other thread that is mastering the resource */ |
737 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | |
738 | __dlm_wait_on_lockres(tmpres); | |
739 | BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); | |
740 | } | |
741 | ||
ba2bf218 KH |
742 | if (tmpres->owner == dlm->node_num) { |
743 | BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); | |
744 | dlm_lockres_grab_inflight_ref(dlm, tmpres); | |
745 | } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) | |
746 | dropping_ref = 1; | |
747 | spin_unlock(&tmpres->spinlock); | |
ba2bf218 KH |
748 | |
749 | /* wait until done messaging the master, drop our ref to allow | |
750 | * the lockres to be purged, start over. */ | |
751 | if (dropping_ref) { | |
752 | spin_lock(&tmpres->spinlock); | |
753 | __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); | |
754 | spin_unlock(&tmpres->spinlock); | |
755 | dlm_lockres_put(tmpres); | |
756 | tmpres = NULL; | |
757 | goto lookup; | |
758 | } | |
759 | ||
6714d8e8 KH |
760 | mlog(0, "found in hash!\n"); |
761 | if (res) | |
762 | dlm_lockres_put(res); | |
763 | res = tmpres; | |
764 | goto leave; | |
765 | } | |
766 | ||
767 | if (!res) { | |
768 | spin_unlock(&dlm->spinlock); | |
769 | mlog(0, "allocating a new resource\n"); | |
770 | /* nothing found and we need to allocate one. */ | |
3914ed0c | 771 | alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); |
6714d8e8 KH |
772 | if (!alloc_mle) |
773 | goto leave; | |
774 | res = dlm_new_lockres(dlm, lockid, namelen); | |
775 | if (!res) | |
776 | goto leave; | |
777 | goto lookup; | |
778 | } | |
779 | ||
780 | mlog(0, "no lockres found, allocated our own: %p\n", res); | |
781 | ||
782 | if (flags & LKM_LOCAL) { | |
783 | /* caller knows it's safe to assume it's not mastered elsewhere | |
784 | * DONE! return right away */ | |
785 | spin_lock(&res->spinlock); | |
786 | dlm_change_lockres_owner(dlm, res, dlm->node_num); | |
787 | __dlm_insert_lockres(dlm, res); | |
ba2bf218 | 788 | dlm_lockres_grab_inflight_ref(dlm, res); |
6714d8e8 KH |
789 | spin_unlock(&res->spinlock); |
790 | spin_unlock(&dlm->spinlock); | |
791 | /* lockres still marked IN_PROGRESS */ | |
792 | goto wake_waiters; | |
793 | } | |
794 | ||
795 | /* check master list to see if another node has started mastering it */ | |
796 | spin_lock(&dlm->master_lock); | |
797 | ||
798 | /* if we found a block, wait for lock to be mastered by another node */ | |
799 | blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); | |
800 | if (blocked) { | |
ba2bf218 | 801 | int mig; |
6714d8e8 KH |
802 | if (mle->type == DLM_MLE_MASTER) { |
803 | mlog(ML_ERROR, "master entry for nonexistent lock!\n"); | |
804 | BUG(); | |
ba2bf218 KH |
805 | } |
806 | mig = (mle->type == DLM_MLE_MIGRATION); | |
807 | /* if there is a migration in progress, let the migration | |
808 | * finish before continuing. we can wait for the absence | |
809 | * of the MIGRATION mle: either the migrate finished or | |
810 | * one of the nodes died and the mle was cleaned up. | |
811 | * if there is a BLOCK here, but it already has a master | |
812 | * set, we are too late. the master does not have a ref | |
813 | * for us in the refmap. detach the mle and drop it. | |
814 | * either way, go back to the top and start over. */ | |
815 | if (mig || mle->master != O2NM_MAX_NODES) { | |
816 | BUG_ON(mig && mle->master == dlm->node_num); | |
817 | /* we arrived too late. the master does not | |
818 | * have a ref for us. retry. */ | |
819 | mlog(0, "%s:%.*s: late on %s\n", | |
820 | dlm->name, namelen, lockid, | |
821 | mig ? "MIGRATION" : "BLOCK"); | |
6714d8e8 | 822 | spin_unlock(&dlm->master_lock); |
6714d8e8 KH |
823 | spin_unlock(&dlm->spinlock); |
824 | ||
825 | /* master is known, detach */ | |
ba2bf218 KH |
826 | if (!mig) |
827 | dlm_mle_detach_hb_events(dlm, mle); | |
6714d8e8 KH |
828 | dlm_put_mle(mle); |
829 | mle = NULL; | |
25985edc | 830 | /* this is lame, but we can't wait on either |
ba2bf218 KH |
831 | * the mle or lockres waitqueue here */ |
832 | if (mig) | |
833 | msleep(100); | |
834 | goto lookup; | |
6714d8e8 KH |
835 | } |
836 | } else { | |
837 | /* go ahead and try to master lock on this node */ | |
838 | mle = alloc_mle; | |
839 | /* make sure this does not get freed below */ | |
840 | alloc_mle = NULL; | |
841 | dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); | |
842 | set_bit(dlm->node_num, mle->maybe_map); | |
1c084577 | 843 | __dlm_insert_mle(dlm, mle); |
c03872f5 KH |
844 | |
845 | /* still holding the dlm spinlock, check the recovery map | |
2bd63216 | 846 | * to see if there are any nodes that still need to be |
c03872f5 KH |
847 | * considered. these will not appear in the mle nodemap |
848 | * but they might own this lockres. wait on them. */ | |
849 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | |
850 | if (bit < O2NM_MAX_NODES) { | |
8decab3c SM |
851 | mlog(0, "%s: res %.*s, At least one node (%d) " |
852 | "to recover before lock mastery can begin\n", | |
c03872f5 KH |
853 | dlm->name, namelen, (char *)lockid, bit); |
854 | wait_on_recovery = 1; | |
855 | } | |
6714d8e8 KH |
856 | } |
857 | ||
858 | /* at this point there is either a DLM_MLE_BLOCK or a | |
859 | * DLM_MLE_MASTER on the master list, so it's safe to add the | |
860 | * lockres to the hashtable. anyone who finds the lock will | |
861 | * still have to wait on the IN_PROGRESS. */ | |
862 | ||
863 | /* finally add the lockres to its hash bucket */ | |
864 | __dlm_insert_lockres(dlm, res); | |
8d400b81 SM |
865 | |
866 | spin_lock(&res->spinlock); | |
867 | dlm_lockres_grab_inflight_ref(dlm, res); | |
868 | spin_unlock(&res->spinlock); | |
ba2bf218 KH |
869 | |
870 | /* if this node does not become the master make sure to drop | |
871 | * this inflight reference below */ | |
872 | drop_inflight_if_nonlocal = 1; | |
873 | ||
6714d8e8 KH |
874 | /* get an extra ref on the mle in case this is a BLOCK |
875 | * if so, the creator of the BLOCK may try to put the last | |
876 | * ref at this time in the assert master handler, so we | |
877 | * need an extra one to keep from a bad ptr deref. */ | |
a2bf0477 | 878 | dlm_get_mle_inuse(mle); |
6714d8e8 KH |
879 | spin_unlock(&dlm->master_lock); |
880 | spin_unlock(&dlm->spinlock); | |
881 | ||
e7e69eb3 | 882 | redo_request: |
c03872f5 KH |
883 | while (wait_on_recovery) { |
884 | /* any cluster changes that occurred after dropping the | |
885 | * dlm spinlock would be detectable be a change on the mle, | |
886 | * so we only need to clear out the recovery map once. */ | |
887 | if (dlm_is_recovery_lock(lockid, namelen)) { | |
8decab3c SM |
888 | mlog(0, "%s: Recovery map is not empty, but must " |
889 | "master $RECOVERY lock now\n", dlm->name); | |
c03872f5 KH |
890 | if (!dlm_pre_master_reco_lockres(dlm, res)) |
891 | wait_on_recovery = 0; | |
892 | else { | |
893 | mlog(0, "%s: waiting 500ms for heartbeat state " | |
894 | "change\n", dlm->name); | |
895 | msleep(500); | |
896 | } | |
897 | continue; | |
2bd63216 | 898 | } |
c03872f5 KH |
899 | |
900 | dlm_kick_recovery_thread(dlm); | |
aa087b84 | 901 | msleep(1000); |
c03872f5 KH |
902 | dlm_wait_for_recovery(dlm); |
903 | ||
904 | spin_lock(&dlm->spinlock); | |
905 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | |
906 | if (bit < O2NM_MAX_NODES) { | |
8decab3c SM |
907 | mlog(0, "%s: res %.*s, At least one node (%d) " |
908 | "to recover before lock mastery can begin\n", | |
c03872f5 KH |
909 | dlm->name, namelen, (char *)lockid, bit); |
910 | wait_on_recovery = 1; | |
911 | } else | |
912 | wait_on_recovery = 0; | |
913 | spin_unlock(&dlm->spinlock); | |
b7084ab5 KH |
914 | |
915 | if (wait_on_recovery) | |
916 | dlm_wait_for_node_recovery(dlm, bit, 10000); | |
c03872f5 KH |
917 | } |
918 | ||
6714d8e8 KH |
919 | /* must wait for lock to be mastered elsewhere */ |
920 | if (blocked) | |
921 | goto wait; | |
922 | ||
6714d8e8 KH |
923 | ret = -EINVAL; |
924 | dlm_node_iter_init(mle->vote_map, &iter); | |
925 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | |
ba2bf218 | 926 | ret = dlm_do_master_request(res, mle, nodenum); |
6714d8e8 KH |
927 | if (ret < 0) |
928 | mlog_errno(ret); | |
929 | if (mle->master != O2NM_MAX_NODES) { | |
930 | /* found a master ! */ | |
9c6510a5 KH |
931 | if (mle->master <= nodenum) |
932 | break; | |
933 | /* if our master request has not reached the master | |
934 | * yet, keep going until it does. this is how the | |
935 | * master will know that asserts are needed back to | |
936 | * the lower nodes. */ | |
8decab3c SM |
937 | mlog(0, "%s: res %.*s, Requests only up to %u but " |
938 | "master is %u, keep going\n", dlm->name, namelen, | |
9c6510a5 | 939 | lockid, nodenum, mle->master); |
6714d8e8 KH |
940 | } |
941 | } | |
942 | ||
943 | wait: | |
944 | /* keep going until the response map includes all nodes */ | |
945 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); | |
946 | if (ret < 0) { | |
e7e69eb3 | 947 | wait_on_recovery = 1; |
8decab3c SM |
948 | mlog(0, "%s: res %.*s, Node map changed, redo the master " |
949 | "request now, blocked=%d\n", dlm->name, res->lockname.len, | |
6714d8e8 KH |
950 | res->lockname.name, blocked); |
951 | if (++tries > 20) { | |
8decab3c SM |
952 | mlog(ML_ERROR, "%s: res %.*s, Spinning on " |
953 | "dlm_wait_for_lock_mastery, blocked = %d\n", | |
2bd63216 | 954 | dlm->name, res->lockname.len, |
6714d8e8 KH |
955 | res->lockname.name, blocked); |
956 | dlm_print_one_lock_resource(res); | |
8a9343fa | 957 | dlm_print_one_mle(mle); |
6714d8e8 KH |
958 | tries = 0; |
959 | } | |
960 | goto redo_request; | |
961 | } | |
962 | ||
8decab3c SM |
963 | mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, |
964 | res->lockname.name, res->owner); | |
6714d8e8 KH |
965 | /* make sure we never continue without this */ |
966 | BUG_ON(res->owner == O2NM_MAX_NODES); | |
967 | ||
968 | /* master is known, detach if not already detached */ | |
969 | dlm_mle_detach_hb_events(dlm, mle); | |
970 | dlm_put_mle(mle); | |
971 | /* put the extra ref */ | |
a2bf0477 | 972 | dlm_put_mle_inuse(mle); |
6714d8e8 KH |
973 | |
974 | wake_waiters: | |
975 | spin_lock(&res->spinlock); | |
ba2bf218 KH |
976 | if (res->owner != dlm->node_num && drop_inflight_if_nonlocal) |
977 | dlm_lockres_drop_inflight_ref(dlm, res); | |
6714d8e8 KH |
978 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; |
979 | spin_unlock(&res->spinlock); | |
980 | wake_up(&res->wq); | |
981 | ||
982 | leave: | |
983 | /* need to free the unused mle */ | |
984 | if (alloc_mle) | |
985 | kmem_cache_free(dlm_mle_cache, alloc_mle); | |
986 | ||
987 | return res; | |
988 | } | |
989 | ||
990 | ||
991 | #define DLM_MASTERY_TIMEOUT_MS 5000 | |
992 | ||
993 | static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, | |
994 | struct dlm_lock_resource *res, | |
995 | struct dlm_master_list_entry *mle, | |
996 | int *blocked) | |
997 | { | |
998 | u8 m; | |
999 | int ret, bit; | |
1000 | int map_changed, voting_done; | |
1001 | int assert, sleep; | |
1002 | ||
1003 | recheck: | |
1004 | ret = 0; | |
1005 | assert = 0; | |
1006 | ||
1007 | /* check if another node has already become the owner */ | |
1008 | spin_lock(&res->spinlock); | |
1009 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
9c6510a5 KH |
1010 | mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, |
1011 | res->lockname.len, res->lockname.name, res->owner); | |
6714d8e8 | 1012 | spin_unlock(&res->spinlock); |
9c6510a5 KH |
1013 | /* this will cause the master to re-assert across |
1014 | * the whole cluster, freeing up mles */ | |
588e0090 | 1015 | if (res->owner != dlm->node_num) { |
ba2bf218 | 1016 | ret = dlm_do_master_request(res, mle, res->owner); |
588e0090 KH |
1017 | if (ret < 0) { |
1018 | /* give recovery a chance to run */ | |
1019 | mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); | |
1020 | msleep(500); | |
1021 | goto recheck; | |
1022 | } | |
9c6510a5 KH |
1023 | } |
1024 | ret = 0; | |
6714d8e8 KH |
1025 | goto leave; |
1026 | } | |
1027 | spin_unlock(&res->spinlock); | |
1028 | ||
1029 | spin_lock(&mle->spinlock); | |
1030 | m = mle->master; | |
1031 | map_changed = (memcmp(mle->vote_map, mle->node_map, | |
1032 | sizeof(mle->vote_map)) != 0); | |
1033 | voting_done = (memcmp(mle->vote_map, mle->response_map, | |
1034 | sizeof(mle->vote_map)) == 0); | |
1035 | ||
1036 | /* restart if we hit any errors */ | |
1037 | if (map_changed) { | |
1038 | int b; | |
1039 | mlog(0, "%s: %.*s: node map changed, restarting\n", | |
1040 | dlm->name, res->lockname.len, res->lockname.name); | |
1041 | ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); | |
1042 | b = (mle->type == DLM_MLE_BLOCK); | |
1043 | if ((*blocked && !b) || (!*blocked && b)) { | |
2bd63216 | 1044 | mlog(0, "%s:%.*s: status change: old=%d new=%d\n", |
6714d8e8 KH |
1045 | dlm->name, res->lockname.len, res->lockname.name, |
1046 | *blocked, b); | |
1047 | *blocked = b; | |
1048 | } | |
1049 | spin_unlock(&mle->spinlock); | |
1050 | if (ret < 0) { | |
1051 | mlog_errno(ret); | |
1052 | goto leave; | |
1053 | } | |
1054 | mlog(0, "%s:%.*s: restart lock mastery succeeded, " | |
1055 | "rechecking now\n", dlm->name, res->lockname.len, | |
1056 | res->lockname.name); | |
1057 | goto recheck; | |
aa852354 KH |
1058 | } else { |
1059 | if (!voting_done) { | |
1060 | mlog(0, "map not changed and voting not done " | |
1061 | "for %s:%.*s\n", dlm->name, res->lockname.len, | |
1062 | res->lockname.name); | |
1063 | } | |
6714d8e8 KH |
1064 | } |
1065 | ||
1066 | if (m != O2NM_MAX_NODES) { | |
1067 | /* another node has done an assert! | |
1068 | * all done! */ | |
1069 | sleep = 0; | |
1070 | } else { | |
1071 | sleep = 1; | |
1072 | /* have all nodes responded? */ | |
1073 | if (voting_done && !*blocked) { | |
1074 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | |
1075 | if (dlm->node_num <= bit) { | |
1076 | /* my node number is lowest. | |
1077 | * now tell other nodes that I am | |
1078 | * mastering this. */ | |
1079 | mle->master = dlm->node_num; | |
ba2bf218 KH |
1080 | /* ref was grabbed in get_lock_resource |
1081 | * will be dropped in dlmlock_master */ | |
6714d8e8 KH |
1082 | assert = 1; |
1083 | sleep = 0; | |
1084 | } | |
1085 | /* if voting is done, but we have not received | |
1086 | * an assert master yet, we must sleep */ | |
1087 | } | |
1088 | } | |
1089 | ||
1090 | spin_unlock(&mle->spinlock); | |
1091 | ||
1092 | /* sleep if we haven't finished voting yet */ | |
1093 | if (sleep) { | |
1094 | unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); | |
1095 | ||
1096 | /* | |
1097 | if (atomic_read(&mle->mle_refs.refcount) < 2) | |
1098 | mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, | |
1099 | atomic_read(&mle->mle_refs.refcount), | |
1100 | res->lockname.len, res->lockname.name); | |
1101 | */ | |
1102 | atomic_set(&mle->woken, 0); | |
1103 | (void)wait_event_timeout(mle->wq, | |
1104 | (atomic_read(&mle->woken) == 1), | |
1105 | timeo); | |
1106 | if (res->owner == O2NM_MAX_NODES) { | |
ba2bf218 KH |
1107 | mlog(0, "%s:%.*s: waiting again\n", dlm->name, |
1108 | res->lockname.len, res->lockname.name); | |
6714d8e8 KH |
1109 | goto recheck; |
1110 | } | |
1111 | mlog(0, "done waiting, master is %u\n", res->owner); | |
1112 | ret = 0; | |
1113 | goto leave; | |
1114 | } | |
1115 | ||
1116 | ret = 0; /* done */ | |
1117 | if (assert) { | |
1118 | m = dlm->node_num; | |
1119 | mlog(0, "about to master %.*s here, this=%u\n", | |
1120 | res->lockname.len, res->lockname.name, m); | |
ba2bf218 | 1121 | ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); |
6714d8e8 KH |
1122 | if (ret) { |
1123 | /* This is a failure in the network path, | |
1124 | * not in the response to the assert_master | |
1125 | * (any nonzero response is a BUG on this node). | |
1126 | * Most likely a socket just got disconnected | |
1127 | * due to node death. */ | |
1128 | mlog_errno(ret); | |
1129 | } | |
1130 | /* no longer need to restart lock mastery. | |
1131 | * all living nodes have been contacted. */ | |
1132 | ret = 0; | |
1133 | } | |
1134 | ||
1135 | /* set the lockres owner */ | |
1136 | spin_lock(&res->spinlock); | |
ba2bf218 KH |
1137 | /* mastery reference obtained either during |
1138 | * assert_master_handler or in get_lock_resource */ | |
6714d8e8 KH |
1139 | dlm_change_lockres_owner(dlm, res, m); |
1140 | spin_unlock(&res->spinlock); | |
1141 | ||
1142 | leave: | |
1143 | return ret; | |
1144 | } | |
1145 | ||
1146 | struct dlm_bitmap_diff_iter | |
1147 | { | |
1148 | int curnode; | |
1149 | unsigned long *orig_bm; | |
1150 | unsigned long *cur_bm; | |
1151 | unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
1152 | }; | |
1153 | ||
1154 | enum dlm_node_state_change | |
1155 | { | |
1156 | NODE_DOWN = -1, | |
1157 | NODE_NO_CHANGE = 0, | |
1158 | NODE_UP | |
1159 | }; | |
1160 | ||
1161 | static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, | |
1162 | unsigned long *orig_bm, | |
1163 | unsigned long *cur_bm) | |
1164 | { | |
1165 | unsigned long p1, p2; | |
1166 | int i; | |
1167 | ||
1168 | iter->curnode = -1; | |
1169 | iter->orig_bm = orig_bm; | |
1170 | iter->cur_bm = cur_bm; | |
1171 | ||
1172 | for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { | |
1173 | p1 = *(iter->orig_bm + i); | |
1174 | p2 = *(iter->cur_bm + i); | |
1175 | iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); | |
1176 | } | |
1177 | } | |
1178 | ||
1179 | static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, | |
1180 | enum dlm_node_state_change *state) | |
1181 | { | |
1182 | int bit; | |
1183 | ||
1184 | if (iter->curnode >= O2NM_MAX_NODES) | |
1185 | return -ENOENT; | |
1186 | ||
1187 | bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, | |
1188 | iter->curnode+1); | |
1189 | if (bit >= O2NM_MAX_NODES) { | |
1190 | iter->curnode = O2NM_MAX_NODES; | |
1191 | return -ENOENT; | |
1192 | } | |
1193 | ||
1194 | /* if it was there in the original then this node died */ | |
1195 | if (test_bit(bit, iter->orig_bm)) | |
1196 | *state = NODE_DOWN; | |
1197 | else | |
1198 | *state = NODE_UP; | |
1199 | ||
1200 | iter->curnode = bit; | |
1201 | return bit; | |
1202 | } | |
1203 | ||
1204 | ||
1205 | static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | |
1206 | struct dlm_lock_resource *res, | |
1207 | struct dlm_master_list_entry *mle, | |
1208 | int blocked) | |
1209 | { | |
1210 | struct dlm_bitmap_diff_iter bdi; | |
1211 | enum dlm_node_state_change sc; | |
1212 | int node; | |
1213 | int ret = 0; | |
1214 | ||
1215 | mlog(0, "something happened such that the " | |
1216 | "master process may need to be restarted!\n"); | |
1217 | ||
1218 | assert_spin_locked(&mle->spinlock); | |
1219 | ||
1220 | dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); | |
1221 | node = dlm_bitmap_diff_iter_next(&bdi, &sc); | |
1222 | while (node >= 0) { | |
1223 | if (sc == NODE_UP) { | |
e2faea4c KH |
1224 | /* a node came up. clear any old vote from |
1225 | * the response map and set it in the vote map | |
1226 | * then restart the mastery. */ | |
1227 | mlog(ML_NOTICE, "node %d up while restarting\n", node); | |
6714d8e8 KH |
1228 | |
1229 | /* redo the master request, but only for the new node */ | |
1230 | mlog(0, "sending request to new node\n"); | |
1231 | clear_bit(node, mle->response_map); | |
1232 | set_bit(node, mle->vote_map); | |
1233 | } else { | |
1234 | mlog(ML_ERROR, "node down! %d\n", node); | |
6714d8e8 KH |
1235 | if (blocked) { |
1236 | int lowest = find_next_bit(mle->maybe_map, | |
1237 | O2NM_MAX_NODES, 0); | |
1238 | ||
1239 | /* act like it was never there */ | |
1240 | clear_bit(node, mle->maybe_map); | |
1241 | ||
e7e69eb3 KH |
1242 | if (node == lowest) { |
1243 | mlog(0, "expected master %u died" | |
1244 | " while this node was blocked " | |
1245 | "waiting on it!\n", node); | |
1246 | lowest = find_next_bit(mle->maybe_map, | |
1247 | O2NM_MAX_NODES, | |
1248 | lowest+1); | |
1249 | if (lowest < O2NM_MAX_NODES) { | |
1250 | mlog(0, "%s:%.*s:still " | |
1251 | "blocked. waiting on %u " | |
1252 | "now\n", dlm->name, | |
1253 | res->lockname.len, | |
1254 | res->lockname.name, | |
1255 | lowest); | |
1256 | } else { | |
1257 | /* mle is an MLE_BLOCK, but | |
1258 | * there is now nothing left to | |
1259 | * block on. we need to return | |
1260 | * all the way back out and try | |
1261 | * again with an MLE_MASTER. | |
1262 | * dlm_do_local_recovery_cleanup | |
1263 | * has already run, so the mle | |
1264 | * refcount is ok */ | |
1265 | mlog(0, "%s:%.*s: no " | |
1266 | "longer blocking. try to " | |
1267 | "master this here\n", | |
1268 | dlm->name, | |
1269 | res->lockname.len, | |
1270 | res->lockname.name); | |
1271 | mle->type = DLM_MLE_MASTER; | |
7141514b | 1272 | mle->mleres = res; |
e7e69eb3 | 1273 | } |
6714d8e8 | 1274 | } |
6714d8e8 KH |
1275 | } |
1276 | ||
e7e69eb3 KH |
1277 | /* now blank out everything, as if we had never |
1278 | * contacted anyone */ | |
1279 | memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); | |
1280 | memset(mle->response_map, 0, sizeof(mle->response_map)); | |
1281 | /* reset the vote_map to the current node_map */ | |
1282 | memcpy(mle->vote_map, mle->node_map, | |
1283 | sizeof(mle->node_map)); | |
1284 | /* put myself into the maybe map */ | |
1285 | if (mle->type != DLM_MLE_BLOCK) | |
1286 | set_bit(dlm->node_num, mle->maybe_map); | |
6714d8e8 KH |
1287 | } |
1288 | ret = -EAGAIN; | |
6714d8e8 KH |
1289 | node = dlm_bitmap_diff_iter_next(&bdi, &sc); |
1290 | } | |
1291 | return ret; | |
1292 | } | |
1293 | ||
1294 | ||
1295 | /* | |
1296 | * DLM_MASTER_REQUEST_MSG | |
1297 | * | |
1298 | * returns: 0 on success, | |
1299 | * -errno on a network error | |
1300 | * | |
1301 | * on error, the caller should assume the target node is "dead" | |
1302 | * | |
1303 | */ | |
1304 | ||
ba2bf218 KH |
1305 | static int dlm_do_master_request(struct dlm_lock_resource *res, |
1306 | struct dlm_master_list_entry *mle, int to) | |
6714d8e8 KH |
1307 | { |
1308 | struct dlm_ctxt *dlm = mle->dlm; | |
1309 | struct dlm_master_request request; | |
1310 | int ret, response=0, resend; | |
1311 | ||
1312 | memset(&request, 0, sizeof(request)); | |
1313 | request.node_idx = dlm->node_num; | |
1314 | ||
1315 | BUG_ON(mle->type == DLM_MLE_MIGRATION); | |
1316 | ||
7141514b SM |
1317 | request.namelen = (u8)mle->mnamelen; |
1318 | memcpy(request.name, mle->mname, request.namelen); | |
6714d8e8 KH |
1319 | |
1320 | again: | |
1321 | ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, | |
1322 | sizeof(request), to, &response); | |
1323 | if (ret < 0) { | |
1324 | if (ret == -ESRCH) { | |
1325 | /* should never happen */ | |
1326 | mlog(ML_ERROR, "TCP stack not ready!\n"); | |
1327 | BUG(); | |
1328 | } else if (ret == -EINVAL) { | |
1329 | mlog(ML_ERROR, "bad args passed to o2net!\n"); | |
1330 | BUG(); | |
1331 | } else if (ret == -ENOMEM) { | |
1332 | mlog(ML_ERROR, "out of memory while trying to send " | |
1333 | "network message! retrying\n"); | |
1334 | /* this is totally crude */ | |
1335 | msleep(50); | |
1336 | goto again; | |
1337 | } else if (!dlm_is_host_down(ret)) { | |
1338 | /* not a network error. bad. */ | |
1339 | mlog_errno(ret); | |
1340 | mlog(ML_ERROR, "unhandled error!"); | |
1341 | BUG(); | |
1342 | } | |
1343 | /* all other errors should be network errors, | |
1344 | * and likely indicate node death */ | |
1345 | mlog(ML_ERROR, "link to %d went down!\n", to); | |
1346 | goto out; | |
1347 | } | |
1348 | ||
1349 | ret = 0; | |
1350 | resend = 0; | |
1351 | spin_lock(&mle->spinlock); | |
1352 | switch (response) { | |
1353 | case DLM_MASTER_RESP_YES: | |
1354 | set_bit(to, mle->response_map); | |
1355 | mlog(0, "node %u is the master, response=YES\n", to); | |
ba2bf218 KH |
1356 | mlog(0, "%s:%.*s: master node %u now knows I have a " |
1357 | "reference\n", dlm->name, res->lockname.len, | |
1358 | res->lockname.name, to); | |
6714d8e8 KH |
1359 | mle->master = to; |
1360 | break; | |
1361 | case DLM_MASTER_RESP_NO: | |
1362 | mlog(0, "node %u not master, response=NO\n", to); | |
1363 | set_bit(to, mle->response_map); | |
1364 | break; | |
1365 | case DLM_MASTER_RESP_MAYBE: | |
1366 | mlog(0, "node %u not master, response=MAYBE\n", to); | |
1367 | set_bit(to, mle->response_map); | |
1368 | set_bit(to, mle->maybe_map); | |
1369 | break; | |
1370 | case DLM_MASTER_RESP_ERROR: | |
1371 | mlog(0, "node %u hit an error, resending\n", to); | |
1372 | resend = 1; | |
1373 | response = 0; | |
1374 | break; | |
1375 | default: | |
1376 | mlog(ML_ERROR, "bad response! %u\n", response); | |
1377 | BUG(); | |
1378 | } | |
1379 | spin_unlock(&mle->spinlock); | |
1380 | if (resend) { | |
1381 | /* this is also totally crude */ | |
1382 | msleep(50); | |
1383 | goto again; | |
1384 | } | |
1385 | ||
1386 | out: | |
1387 | return ret; | |
1388 | } | |
1389 | ||
1390 | /* | |
1391 | * locks that can be taken here: | |
1392 | * dlm->spinlock | |
1393 | * res->spinlock | |
1394 | * mle->spinlock | |
1395 | * dlm->master_list | |
1396 | * | |
1397 | * if possible, TRIM THIS DOWN!!! | |
1398 | */ | |
d74c9803 KH |
1399 | int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, |
1400 | void **ret_data) | |
6714d8e8 KH |
1401 | { |
1402 | u8 response = DLM_MASTER_RESP_MAYBE; | |
1403 | struct dlm_ctxt *dlm = data; | |
9c6510a5 | 1404 | struct dlm_lock_resource *res = NULL; |
6714d8e8 KH |
1405 | struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; |
1406 | struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; | |
1407 | char *name; | |
a3d33291 | 1408 | unsigned int namelen, hash; |
6714d8e8 KH |
1409 | int found, ret; |
1410 | int set_maybe; | |
9c6510a5 | 1411 | int dispatch_assert = 0; |
6714d8e8 KH |
1412 | |
1413 | if (!dlm_grab(dlm)) | |
1414 | return DLM_MASTER_RESP_NO; | |
1415 | ||
1416 | if (!dlm_domain_fully_joined(dlm)) { | |
1417 | response = DLM_MASTER_RESP_NO; | |
1418 | goto send_response; | |
1419 | } | |
1420 | ||
1421 | name = request->name; | |
1422 | namelen = request->namelen; | |
a3d33291 | 1423 | hash = dlm_lockid_hash(name, namelen); |
6714d8e8 KH |
1424 | |
1425 | if (namelen > DLM_LOCKID_NAME_MAX) { | |
1426 | response = DLM_IVBUFLEN; | |
1427 | goto send_response; | |
1428 | } | |
1429 | ||
1430 | way_up_top: | |
1431 | spin_lock(&dlm->spinlock); | |
a3d33291 | 1432 | res = __dlm_lookup_lockres(dlm, name, namelen, hash); |
6714d8e8 KH |
1433 | if (res) { |
1434 | spin_unlock(&dlm->spinlock); | |
1435 | ||
1436 | /* take care of the easy cases up front */ | |
1437 | spin_lock(&res->spinlock); | |
1cd04dbe KH |
1438 | if (res->state & (DLM_LOCK_RES_RECOVERING| |
1439 | DLM_LOCK_RES_MIGRATING)) { | |
6714d8e8 KH |
1440 | spin_unlock(&res->spinlock); |
1441 | mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " | |
1cd04dbe | 1442 | "being recovered/migrated\n"); |
6714d8e8 KH |
1443 | response = DLM_MASTER_RESP_ERROR; |
1444 | if (mle) | |
1445 | kmem_cache_free(dlm_mle_cache, mle); | |
1446 | goto send_response; | |
1447 | } | |
1448 | ||
1449 | if (res->owner == dlm->node_num) { | |
8d400b81 | 1450 | dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); |
6714d8e8 | 1451 | spin_unlock(&res->spinlock); |
6714d8e8 KH |
1452 | response = DLM_MASTER_RESP_YES; |
1453 | if (mle) | |
1454 | kmem_cache_free(dlm_mle_cache, mle); | |
1455 | ||
1456 | /* this node is the owner. | |
1457 | * there is some extra work that needs to | |
1458 | * happen now. the requesting node has | |
1459 | * caused all nodes up to this one to | |
1460 | * create mles. this node now needs to | |
1461 | * go back and clean those up. */ | |
9c6510a5 | 1462 | dispatch_assert = 1; |
6714d8e8 KH |
1463 | goto send_response; |
1464 | } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1465 | spin_unlock(&res->spinlock); | |
1466 | // mlog(0, "node %u is the master\n", res->owner); | |
1467 | response = DLM_MASTER_RESP_NO; | |
1468 | if (mle) | |
1469 | kmem_cache_free(dlm_mle_cache, mle); | |
1470 | goto send_response; | |
1471 | } | |
1472 | ||
1473 | /* ok, there is no owner. either this node is | |
1474 | * being blocked, or it is actively trying to | |
1475 | * master this lock. */ | |
1476 | if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { | |
1477 | mlog(ML_ERROR, "lock with no owner should be " | |
1478 | "in-progress!\n"); | |
1479 | BUG(); | |
1480 | } | |
1481 | ||
1482 | // mlog(0, "lockres is in progress...\n"); | |
1483 | spin_lock(&dlm->master_lock); | |
1484 | found = dlm_find_mle(dlm, &tmpmle, name, namelen); | |
1485 | if (!found) { | |
1486 | mlog(ML_ERROR, "no mle found for this lock!\n"); | |
1487 | BUG(); | |
1488 | } | |
1489 | set_maybe = 1; | |
1490 | spin_lock(&tmpmle->spinlock); | |
1491 | if (tmpmle->type == DLM_MLE_BLOCK) { | |
1492 | // mlog(0, "this node is waiting for " | |
1493 | // "lockres to be mastered\n"); | |
1494 | response = DLM_MASTER_RESP_NO; | |
1495 | } else if (tmpmle->type == DLM_MLE_MIGRATION) { | |
1496 | mlog(0, "node %u is master, but trying to migrate to " | |
1497 | "node %u.\n", tmpmle->master, tmpmle->new_master); | |
1498 | if (tmpmle->master == dlm->node_num) { | |
6714d8e8 KH |
1499 | mlog(ML_ERROR, "no owner on lockres, but this " |
1500 | "node is trying to migrate it to %u?!\n", | |
1501 | tmpmle->new_master); | |
1502 | BUG(); | |
1503 | } else { | |
1504 | /* the real master can respond on its own */ | |
1505 | response = DLM_MASTER_RESP_NO; | |
1506 | } | |
1507 | } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1508 | set_maybe = 0; | |
9c6510a5 | 1509 | if (tmpmle->master == dlm->node_num) { |
6714d8e8 | 1510 | response = DLM_MASTER_RESP_YES; |
9c6510a5 KH |
1511 | /* this node will be the owner. |
1512 | * go back and clean the mles on any | |
1513 | * other nodes */ | |
1514 | dispatch_assert = 1; | |
8d400b81 SM |
1515 | dlm_lockres_set_refmap_bit(dlm, res, |
1516 | request->node_idx); | |
9c6510a5 | 1517 | } else |
6714d8e8 KH |
1518 | response = DLM_MASTER_RESP_NO; |
1519 | } else { | |
1520 | // mlog(0, "this node is attempting to " | |
1521 | // "master lockres\n"); | |
1522 | response = DLM_MASTER_RESP_MAYBE; | |
1523 | } | |
1524 | if (set_maybe) | |
1525 | set_bit(request->node_idx, tmpmle->maybe_map); | |
1526 | spin_unlock(&tmpmle->spinlock); | |
1527 | ||
1528 | spin_unlock(&dlm->master_lock); | |
1529 | spin_unlock(&res->spinlock); | |
1530 | ||
1531 | /* keep the mle attached to heartbeat events */ | |
1532 | dlm_put_mle(tmpmle); | |
1533 | if (mle) | |
1534 | kmem_cache_free(dlm_mle_cache, mle); | |
1535 | goto send_response; | |
1536 | } | |
1537 | ||
1538 | /* | |
1539 | * lockres doesn't exist on this node | |
1540 | * if there is an MLE_BLOCK, return NO | |
1541 | * if there is an MLE_MASTER, return MAYBE | |
1542 | * otherwise, add an MLE_BLOCK, return NO | |
1543 | */ | |
1544 | spin_lock(&dlm->master_lock); | |
1545 | found = dlm_find_mle(dlm, &tmpmle, name, namelen); | |
1546 | if (!found) { | |
1547 | /* this lockid has never been seen on this node yet */ | |
1548 | // mlog(0, "no mle found\n"); | |
1549 | if (!mle) { | |
1550 | spin_unlock(&dlm->master_lock); | |
1551 | spin_unlock(&dlm->spinlock); | |
1552 | ||
3914ed0c | 1553 | mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); |
6714d8e8 | 1554 | if (!mle) { |
6714d8e8 | 1555 | response = DLM_MASTER_RESP_ERROR; |
9c6510a5 | 1556 | mlog_errno(-ENOMEM); |
6714d8e8 KH |
1557 | goto send_response; |
1558 | } | |
6714d8e8 KH |
1559 | goto way_up_top; |
1560 | } | |
1561 | ||
1562 | // mlog(0, "this is second time thru, already allocated, " | |
1563 | // "add the block.\n"); | |
41b8c8a1 | 1564 | dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); |
6714d8e8 | 1565 | set_bit(request->node_idx, mle->maybe_map); |
1c084577 | 1566 | __dlm_insert_mle(dlm, mle); |
6714d8e8 KH |
1567 | response = DLM_MASTER_RESP_NO; |
1568 | } else { | |
1569 | // mlog(0, "mle was found\n"); | |
1570 | set_maybe = 1; | |
1571 | spin_lock(&tmpmle->spinlock); | |
9c6510a5 KH |
1572 | if (tmpmle->master == dlm->node_num) { |
1573 | mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); | |
1574 | BUG(); | |
1575 | } | |
6714d8e8 KH |
1576 | if (tmpmle->type == DLM_MLE_BLOCK) |
1577 | response = DLM_MASTER_RESP_NO; | |
1578 | else if (tmpmle->type == DLM_MLE_MIGRATION) { | |
1579 | mlog(0, "migration mle was found (%u->%u)\n", | |
1580 | tmpmle->master, tmpmle->new_master); | |
6714d8e8 KH |
1581 | /* real master can respond on its own */ |
1582 | response = DLM_MASTER_RESP_NO; | |
9c6510a5 KH |
1583 | } else |
1584 | response = DLM_MASTER_RESP_MAYBE; | |
6714d8e8 KH |
1585 | if (set_maybe) |
1586 | set_bit(request->node_idx, tmpmle->maybe_map); | |
1587 | spin_unlock(&tmpmle->spinlock); | |
1588 | } | |
1589 | spin_unlock(&dlm->master_lock); | |
1590 | spin_unlock(&dlm->spinlock); | |
1591 | ||
1592 | if (found) { | |
1593 | /* keep the mle attached to heartbeat events */ | |
1594 | dlm_put_mle(tmpmle); | |
1595 | } | |
1596 | send_response: | |
b31cfc02 SM |
1597 | /* |
1598 | * __dlm_lookup_lockres() grabbed a reference to this lockres. | |
1599 | * The reference is released by dlm_assert_master_worker() under | |
1600 | * the call to dlm_dispatch_assert_master(). If | |
1601 | * dlm_assert_master_worker() isn't called, we drop it here. | |
1602 | */ | |
9c6510a5 KH |
1603 | if (dispatch_assert) { |
1604 | if (response != DLM_MASTER_RESP_YES) | |
1605 | mlog(ML_ERROR, "invalid response %d\n", response); | |
1606 | if (!res) { | |
1607 | mlog(ML_ERROR, "bad lockres while trying to assert!\n"); | |
1608 | BUG(); | |
1609 | } | |
1610 | mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", | |
1611 | dlm->node_num, res->lockname.len, res->lockname.name); | |
2bd63216 | 1612 | ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, |
9c6510a5 KH |
1613 | DLM_ASSERT_MASTER_MLE_CLEANUP); |
1614 | if (ret < 0) { | |
1615 | mlog(ML_ERROR, "failed to dispatch assert master work\n"); | |
1616 | response = DLM_MASTER_RESP_ERROR; | |
b31cfc02 | 1617 | dlm_lockres_put(res); |
9c6510a5 | 1618 | } |
b31cfc02 SM |
1619 | } else { |
1620 | if (res) | |
1621 | dlm_lockres_put(res); | |
9c6510a5 KH |
1622 | } |
1623 | ||
6714d8e8 KH |
1624 | dlm_put(dlm); |
1625 | return response; | |
1626 | } | |
1627 | ||
1628 | /* | |
1629 | * DLM_ASSERT_MASTER_MSG | |
1630 | */ | |
1631 | ||
1632 | ||
1633 | /* | |
1634 | * NOTE: this can be used for debugging | |
1635 | * can periodically run all locks owned by this node | |
1636 | * and re-assert across the cluster... | |
1637 | */ | |
05488bbe AB |
1638 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, |
1639 | struct dlm_lock_resource *res, | |
1640 | void *nodemap, u32 flags) | |
6714d8e8 KH |
1641 | { |
1642 | struct dlm_assert_master assert; | |
1643 | int to, tmpret; | |
1644 | struct dlm_node_iter iter; | |
1645 | int ret = 0; | |
9c6510a5 | 1646 | int reassert; |
ba2bf218 KH |
1647 | const char *lockname = res->lockname.name; |
1648 | unsigned int namelen = res->lockname.len; | |
6714d8e8 KH |
1649 | |
1650 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | |
f3f85464 SM |
1651 | |
1652 | spin_lock(&res->spinlock); | |
1653 | res->state |= DLM_LOCK_RES_SETREF_INPROG; | |
1654 | spin_unlock(&res->spinlock); | |
1655 | ||
9c6510a5 KH |
1656 | again: |
1657 | reassert = 0; | |
6714d8e8 KH |
1658 | |
1659 | /* note that if this nodemap is empty, it returns 0 */ | |
1660 | dlm_node_iter_init(nodemap, &iter); | |
1661 | while ((to = dlm_node_iter_next(&iter)) >= 0) { | |
1662 | int r = 0; | |
a9ee4c8a KH |
1663 | struct dlm_master_list_entry *mle = NULL; |
1664 | ||
6714d8e8 KH |
1665 | mlog(0, "sending assert master to %d (%.*s)\n", to, |
1666 | namelen, lockname); | |
1667 | memset(&assert, 0, sizeof(assert)); | |
1668 | assert.node_idx = dlm->node_num; | |
1669 | assert.namelen = namelen; | |
1670 | memcpy(assert.name, lockname, namelen); | |
1671 | assert.flags = cpu_to_be32(flags); | |
1672 | ||
1673 | tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, | |
1674 | &assert, sizeof(assert), to, &r); | |
1675 | if (tmpret < 0) { | |
a5196ec5 WW |
1676 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
1677 | "0x%x) to node %u\n", tmpret, | |
1678 | DLM_ASSERT_MASTER_MSG, dlm->key, to); | |
6714d8e8 | 1679 | if (!dlm_is_host_down(tmpret)) { |
3b3b84a8 | 1680 | mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); |
6714d8e8 KH |
1681 | BUG(); |
1682 | } | |
1683 | /* a node died. finish out the rest of the nodes. */ | |
3b3b84a8 | 1684 | mlog(0, "link to %d went down!\n", to); |
6714d8e8 KH |
1685 | /* any nonzero status return will do */ |
1686 | ret = tmpret; | |
ba2bf218 | 1687 | r = 0; |
6714d8e8 KH |
1688 | } else if (r < 0) { |
1689 | /* ok, something horribly messed. kill thyself. */ | |
1690 | mlog(ML_ERROR,"during assert master of %.*s to %u, " | |
1691 | "got %d.\n", namelen, lockname, to, r); | |
a9ee4c8a KH |
1692 | spin_lock(&dlm->spinlock); |
1693 | spin_lock(&dlm->master_lock); | |
1694 | if (dlm_find_mle(dlm, &mle, (char *)lockname, | |
1695 | namelen)) { | |
1696 | dlm_print_one_mle(mle); | |
1697 | __dlm_put_mle(mle); | |
1698 | } | |
1699 | spin_unlock(&dlm->master_lock); | |
1700 | spin_unlock(&dlm->spinlock); | |
6714d8e8 | 1701 | BUG(); |
ba2bf218 KH |
1702 | } |
1703 | ||
1704 | if (r & DLM_ASSERT_RESPONSE_REASSERT && | |
1705 | !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { | |
1706 | mlog(ML_ERROR, "%.*s: very strange, " | |
1707 | "master MLE but no lockres on %u\n", | |
1708 | namelen, lockname, to); | |
1709 | } | |
1710 | ||
1711 | if (r & DLM_ASSERT_RESPONSE_REASSERT) { | |
9c6510a5 | 1712 | mlog(0, "%.*s: node %u create mles on other " |
2bd63216 | 1713 | "nodes and requests a re-assert\n", |
9c6510a5 KH |
1714 | namelen, lockname, to); |
1715 | reassert = 1; | |
6714d8e8 | 1716 | } |
ba2bf218 KH |
1717 | if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { |
1718 | mlog(0, "%.*s: node %u has a reference to this " | |
1719 | "lockres, set the bit in the refmap\n", | |
1720 | namelen, lockname, to); | |
1721 | spin_lock(&res->spinlock); | |
8d400b81 | 1722 | dlm_lockres_set_refmap_bit(dlm, res, to); |
ba2bf218 KH |
1723 | spin_unlock(&res->spinlock); |
1724 | } | |
6714d8e8 KH |
1725 | } |
1726 | ||
9c6510a5 KH |
1727 | if (reassert) |
1728 | goto again; | |
1729 | ||
f3f85464 SM |
1730 | spin_lock(&res->spinlock); |
1731 | res->state &= ~DLM_LOCK_RES_SETREF_INPROG; | |
1732 | spin_unlock(&res->spinlock); | |
1733 | wake_up(&res->wq); | |
1734 | ||
6714d8e8 KH |
1735 | return ret; |
1736 | } | |
1737 | ||
1738 | /* | |
1739 | * locks that can be taken here: | |
1740 | * dlm->spinlock | |
1741 | * res->spinlock | |
1742 | * mle->spinlock | |
1743 | * dlm->master_list | |
1744 | * | |
1745 | * if possible, TRIM THIS DOWN!!! | |
1746 | */ | |
d74c9803 KH |
1747 | int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, |
1748 | void **ret_data) | |
6714d8e8 KH |
1749 | { |
1750 | struct dlm_ctxt *dlm = data; | |
1751 | struct dlm_master_list_entry *mle = NULL; | |
1752 | struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; | |
1753 | struct dlm_lock_resource *res = NULL; | |
1754 | char *name; | |
a3d33291 | 1755 | unsigned int namelen, hash; |
6714d8e8 | 1756 | u32 flags; |
ba2bf218 | 1757 | int master_request = 0, have_lockres_ref = 0; |
9c6510a5 | 1758 | int ret = 0; |
6714d8e8 KH |
1759 | |
1760 | if (!dlm_grab(dlm)) | |
1761 | return 0; | |
1762 | ||
1763 | name = assert->name; | |
1764 | namelen = assert->namelen; | |
a3d33291 | 1765 | hash = dlm_lockid_hash(name, namelen); |
6714d8e8 KH |
1766 | flags = be32_to_cpu(assert->flags); |
1767 | ||
1768 | if (namelen > DLM_LOCKID_NAME_MAX) { | |
1769 | mlog(ML_ERROR, "Invalid name length!"); | |
1770 | goto done; | |
1771 | } | |
1772 | ||
1773 | spin_lock(&dlm->spinlock); | |
1774 | ||
1775 | if (flags) | |
1776 | mlog(0, "assert_master with flags: %u\n", flags); | |
1777 | ||
1778 | /* find the MLE */ | |
1779 | spin_lock(&dlm->master_lock); | |
1780 | if (!dlm_find_mle(dlm, &mle, name, namelen)) { | |
1781 | /* not an error, could be master just re-asserting */ | |
1782 | mlog(0, "just got an assert_master from %u, but no " | |
1783 | "MLE for it! (%.*s)\n", assert->node_idx, | |
1784 | namelen, name); | |
1785 | } else { | |
1786 | int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); | |
1787 | if (bit >= O2NM_MAX_NODES) { | |
1788 | /* not necessarily an error, though less likely. | |
1789 | * could be master just re-asserting. */ | |
aa852354 | 1790 | mlog(0, "no bits set in the maybe_map, but %u " |
6714d8e8 KH |
1791 | "is asserting! (%.*s)\n", assert->node_idx, |
1792 | namelen, name); | |
1793 | } else if (bit != assert->node_idx) { | |
1794 | if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { | |
1795 | mlog(0, "master %u was found, %u should " | |
1796 | "back off\n", assert->node_idx, bit); | |
1797 | } else { | |
1798 | /* with the fix for bug 569, a higher node | |
1799 | * number winning the mastery will respond | |
1800 | * YES to mastery requests, but this node | |
1801 | * had no way of knowing. let it pass. */ | |
aa852354 | 1802 | mlog(0, "%u is the lowest node, " |
6714d8e8 KH |
1803 | "%u is asserting. (%.*s) %u must " |
1804 | "have begun after %u won.\n", bit, | |
1805 | assert->node_idx, namelen, name, bit, | |
1806 | assert->node_idx); | |
1807 | } | |
1808 | } | |
2d1a868c KH |
1809 | if (mle->type == DLM_MLE_MIGRATION) { |
1810 | if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { | |
1811 | mlog(0, "%s:%.*s: got cleanup assert" | |
1812 | " from %u for migration\n", | |
1813 | dlm->name, namelen, name, | |
1814 | assert->node_idx); | |
1815 | } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { | |
1816 | mlog(0, "%s:%.*s: got unrelated assert" | |
1817 | " from %u for migration, ignoring\n", | |
1818 | dlm->name, namelen, name, | |
1819 | assert->node_idx); | |
1820 | __dlm_put_mle(mle); | |
1821 | spin_unlock(&dlm->master_lock); | |
1822 | spin_unlock(&dlm->spinlock); | |
1823 | goto done; | |
2bd63216 | 1824 | } |
2d1a868c | 1825 | } |
6714d8e8 KH |
1826 | } |
1827 | spin_unlock(&dlm->master_lock); | |
1828 | ||
1829 | /* ok everything checks out with the MLE | |
1830 | * now check to see if there is a lockres */ | |
a3d33291 | 1831 | res = __dlm_lookup_lockres(dlm, name, namelen, hash); |
6714d8e8 KH |
1832 | if (res) { |
1833 | spin_lock(&res->spinlock); | |
1834 | if (res->state & DLM_LOCK_RES_RECOVERING) { | |
1835 | mlog(ML_ERROR, "%u asserting but %.*s is " | |
1836 | "RECOVERING!\n", assert->node_idx, namelen, name); | |
1837 | goto kill; | |
1838 | } | |
1839 | if (!mle) { | |
dc2ed195 KH |
1840 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && |
1841 | res->owner != assert->node_idx) { | |
53ecd25e SM |
1842 | mlog(ML_ERROR, "DIE! Mastery assert from %u, " |
1843 | "but current owner is %u! (%.*s)\n", | |
1844 | assert->node_idx, res->owner, namelen, | |
1845 | name); | |
1846 | __dlm_print_one_lock_resource(res); | |
1847 | BUG(); | |
6714d8e8 KH |
1848 | } |
1849 | } else if (mle->type != DLM_MLE_MIGRATION) { | |
1850 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1851 | /* owner is just re-asserting */ | |
1852 | if (res->owner == assert->node_idx) { | |
1853 | mlog(0, "owner %u re-asserting on " | |
1854 | "lock %.*s\n", assert->node_idx, | |
1855 | namelen, name); | |
1856 | goto ok; | |
1857 | } | |
1858 | mlog(ML_ERROR, "got assert_master from " | |
1859 | "node %u, but %u is the owner! " | |
1860 | "(%.*s)\n", assert->node_idx, | |
1861 | res->owner, namelen, name); | |
1862 | goto kill; | |
1863 | } | |
1864 | if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { | |
1865 | mlog(ML_ERROR, "got assert from %u, but lock " | |
1866 | "with no owner should be " | |
1867 | "in-progress! (%.*s)\n", | |
1868 | assert->node_idx, | |
1869 | namelen, name); | |
1870 | goto kill; | |
1871 | } | |
1872 | } else /* mle->type == DLM_MLE_MIGRATION */ { | |
1873 | /* should only be getting an assert from new master */ | |
1874 | if (assert->node_idx != mle->new_master) { | |
1875 | mlog(ML_ERROR, "got assert from %u, but " | |
1876 | "new master is %u, and old master " | |
1877 | "was %u (%.*s)\n", | |
1878 | assert->node_idx, mle->new_master, | |
1879 | mle->master, namelen, name); | |
1880 | goto kill; | |
1881 | } | |
1882 | ||
1883 | } | |
1884 | ok: | |
1885 | spin_unlock(&res->spinlock); | |
1886 | } | |
6714d8e8 KH |
1887 | |
1888 | // mlog(0, "woo! got an assert_master from node %u!\n", | |
1889 | // assert->node_idx); | |
1890 | if (mle) { | |
9c6510a5 KH |
1891 | int extra_ref = 0; |
1892 | int nn = -1; | |
a2bf0477 | 1893 | int rr, err = 0; |
2bd63216 | 1894 | |
6714d8e8 | 1895 | spin_lock(&mle->spinlock); |
9c6510a5 KH |
1896 | if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) |
1897 | extra_ref = 1; | |
1898 | else { | |
1899 | /* MASTER mle: if any bits set in the response map | |
1900 | * then the calling node needs to re-assert to clear | |
1901 | * up nodes that this node contacted */ | |
2bd63216 | 1902 | while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, |
9c6510a5 KH |
1903 | nn+1)) < O2NM_MAX_NODES) { |
1904 | if (nn != dlm->node_num && nn != assert->node_idx) | |
1905 | master_request = 1; | |
1906 | } | |
1907 | } | |
6714d8e8 KH |
1908 | mle->master = assert->node_idx; |
1909 | atomic_set(&mle->woken, 1); | |
1910 | wake_up(&mle->wq); | |
1911 | spin_unlock(&mle->spinlock); | |
1912 | ||
a2bf0477 | 1913 | if (res) { |
a6fa3640 | 1914 | int wake = 0; |
6714d8e8 | 1915 | spin_lock(&res->spinlock); |
a2bf0477 KH |
1916 | if (mle->type == DLM_MLE_MIGRATION) { |
1917 | mlog(0, "finishing off migration of lockres %.*s, " | |
1918 | "from %u to %u\n", | |
1919 | res->lockname.len, res->lockname.name, | |
1920 | dlm->node_num, mle->new_master); | |
1921 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
a6fa3640 | 1922 | wake = 1; |
a2bf0477 KH |
1923 | dlm_change_lockres_owner(dlm, res, mle->new_master); |
1924 | BUG_ON(res->state & DLM_LOCK_RES_DIRTY); | |
1925 | } else { | |
1926 | dlm_change_lockres_owner(dlm, res, mle->master); | |
1927 | } | |
6714d8e8 | 1928 | spin_unlock(&res->spinlock); |
ba2bf218 | 1929 | have_lockres_ref = 1; |
a6fa3640 KH |
1930 | if (wake) |
1931 | wake_up(&res->wq); | |
6714d8e8 | 1932 | } |
a2bf0477 KH |
1933 | |
1934 | /* master is known, detach if not already detached. | |
1935 | * ensures that only one assert_master call will happen | |
1936 | * on this mle. */ | |
a2bf0477 KH |
1937 | spin_lock(&dlm->master_lock); |
1938 | ||
1939 | rr = atomic_read(&mle->mle_refs.refcount); | |
1940 | if (mle->inuse > 0) { | |
1941 | if (extra_ref && rr < 3) | |
1942 | err = 1; | |
1943 | else if (!extra_ref && rr < 2) | |
1944 | err = 1; | |
1945 | } else { | |
1946 | if (extra_ref && rr < 2) | |
1947 | err = 1; | |
1948 | else if (!extra_ref && rr < 1) | |
1949 | err = 1; | |
1950 | } | |
1951 | if (err) { | |
1952 | mlog(ML_ERROR, "%s:%.*s: got assert master from %u " | |
1953 | "that will mess up this node, refs=%d, extra=%d, " | |
1954 | "inuse=%d\n", dlm->name, namelen, name, | |
1955 | assert->node_idx, rr, extra_ref, mle->inuse); | |
1956 | dlm_print_one_mle(mle); | |
1957 | } | |
1c084577 | 1958 | __dlm_unlink_mle(dlm, mle); |
a2bf0477 KH |
1959 | __dlm_mle_detach_hb_events(dlm, mle); |
1960 | __dlm_put_mle(mle); | |
6714d8e8 KH |
1961 | if (extra_ref) { |
1962 | /* the assert master message now balances the extra | |
1963 | * ref given by the master / migration request message. | |
1964 | * if this is the last put, it will be removed | |
1965 | * from the list. */ | |
a2bf0477 KH |
1966 | __dlm_put_mle(mle); |
1967 | } | |
1968 | spin_unlock(&dlm->master_lock); | |
a2bf0477 KH |
1969 | } else if (res) { |
1970 | if (res->owner != assert->node_idx) { | |
1971 | mlog(0, "assert_master from %u, but current " | |
1972 | "owner is %u (%.*s), no mle\n", assert->node_idx, | |
1973 | res->owner, namelen, name); | |
6714d8e8 KH |
1974 | } |
1975 | } | |
14741472 | 1976 | spin_unlock(&dlm->spinlock); |
6714d8e8 KH |
1977 | |
1978 | done: | |
9c6510a5 | 1979 | ret = 0; |
3b8118cf KH |
1980 | if (res) { |
1981 | spin_lock(&res->spinlock); | |
1982 | res->state |= DLM_LOCK_RES_SETREF_INPROG; | |
1983 | spin_unlock(&res->spinlock); | |
1984 | *ret_data = (void *)res; | |
1985 | } | |
6714d8e8 | 1986 | dlm_put(dlm); |
9c6510a5 KH |
1987 | if (master_request) { |
1988 | mlog(0, "need to tell master to reassert\n"); | |
ba2bf218 KH |
1989 | /* positive. negative would shoot down the node. */ |
1990 | ret |= DLM_ASSERT_RESPONSE_REASSERT; | |
1991 | if (!have_lockres_ref) { | |
1992 | mlog(ML_ERROR, "strange, got assert from %u, MASTER " | |
1993 | "mle present here for %s:%.*s, but no lockres!\n", | |
1994 | assert->node_idx, dlm->name, namelen, name); | |
1995 | } | |
1996 | } | |
1997 | if (have_lockres_ref) { | |
1998 | /* let the master know we have a reference to the lockres */ | |
1999 | ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; | |
2000 | mlog(0, "%s:%.*s: got assert from %u, need a ref\n", | |
2001 | dlm->name, namelen, name, assert->node_idx); | |
9c6510a5 KH |
2002 | } |
2003 | return ret; | |
6714d8e8 KH |
2004 | |
2005 | kill: | |
2006 | /* kill the caller! */ | |
a9ee4c8a KH |
2007 | mlog(ML_ERROR, "Bad message received from another node. Dumping state " |
2008 | "and killing the other node now! This node is OK and can continue.\n"); | |
2009 | __dlm_print_one_lock_resource(res); | |
6714d8e8 KH |
2010 | spin_unlock(&res->spinlock); |
2011 | spin_unlock(&dlm->spinlock); | |
2bd63216 | 2012 | *ret_data = (void *)res; |
6714d8e8 KH |
2013 | dlm_put(dlm); |
2014 | return -EINVAL; | |
2015 | } | |
2016 | ||
3b8118cf KH |
2017 | void dlm_assert_master_post_handler(int status, void *data, void *ret_data) |
2018 | { | |
2019 | struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; | |
2020 | ||
2021 | if (ret_data) { | |
2022 | spin_lock(&res->spinlock); | |
2023 | res->state &= ~DLM_LOCK_RES_SETREF_INPROG; | |
2024 | spin_unlock(&res->spinlock); | |
2025 | wake_up(&res->wq); | |
2026 | dlm_lockres_put(res); | |
2027 | } | |
2028 | return; | |
2029 | } | |
2030 | ||
6714d8e8 KH |
2031 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, |
2032 | struct dlm_lock_resource *res, | |
2033 | int ignore_higher, u8 request_from, u32 flags) | |
2034 | { | |
2035 | struct dlm_work_item *item; | |
cd861280 | 2036 | item = kzalloc(sizeof(*item), GFP_NOFS); |
6714d8e8 KH |
2037 | if (!item) |
2038 | return -ENOMEM; | |
2039 | ||
2040 | ||
2041 | /* queue up work for dlm_assert_master_worker */ | |
2042 | dlm_grab(dlm); /* get an extra ref for the work item */ | |
2043 | dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); | |
2044 | item->u.am.lockres = res; /* already have a ref */ | |
2045 | /* can optionally ignore node numbers higher than this node */ | |
2046 | item->u.am.ignore_higher = ignore_higher; | |
2047 | item->u.am.request_from = request_from; | |
2048 | item->u.am.flags = flags; | |
2049 | ||
2bd63216 SM |
2050 | if (ignore_higher) |
2051 | mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, | |
9c6510a5 | 2052 | res->lockname.name); |
2bd63216 | 2053 | |
6714d8e8 KH |
2054 | spin_lock(&dlm->work_lock); |
2055 | list_add_tail(&item->list, &dlm->work_list); | |
2056 | spin_unlock(&dlm->work_lock); | |
2057 | ||
3156d267 | 2058 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); |
6714d8e8 KH |
2059 | return 0; |
2060 | } | |
2061 | ||
2062 | static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) | |
2063 | { | |
2064 | struct dlm_ctxt *dlm = data; | |
2065 | int ret = 0; | |
2066 | struct dlm_lock_resource *res; | |
2067 | unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
2068 | int ignore_higher; | |
2069 | int bit; | |
2070 | u8 request_from; | |
2071 | u32 flags; | |
2072 | ||
2073 | dlm = item->dlm; | |
2074 | res = item->u.am.lockres; | |
2075 | ignore_higher = item->u.am.ignore_higher; | |
2076 | request_from = item->u.am.request_from; | |
2077 | flags = item->u.am.flags; | |
2078 | ||
2079 | spin_lock(&dlm->spinlock); | |
2080 | memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); | |
2081 | spin_unlock(&dlm->spinlock); | |
2082 | ||
2083 | clear_bit(dlm->node_num, nodemap); | |
2084 | if (ignore_higher) { | |
2085 | /* if is this just to clear up mles for nodes below | |
2086 | * this node, do not send the message to the original | |
2087 | * caller or any node number higher than this */ | |
2088 | clear_bit(request_from, nodemap); | |
2089 | bit = dlm->node_num; | |
2090 | while (1) { | |
2091 | bit = find_next_bit(nodemap, O2NM_MAX_NODES, | |
2092 | bit+1); | |
2093 | if (bit >= O2NM_MAX_NODES) | |
2094 | break; | |
2095 | clear_bit(bit, nodemap); | |
2096 | } | |
2097 | } | |
2098 | ||
36407488 KH |
2099 | /* |
2100 | * If we're migrating this lock to someone else, we are no | |
2101 | * longer allowed to assert out own mastery. OTOH, we need to | |
2102 | * prevent migration from starting while we're still asserting | |
2103 | * our dominance. The reserved ast delays migration. | |
2104 | */ | |
2105 | spin_lock(&res->spinlock); | |
2106 | if (res->state & DLM_LOCK_RES_MIGRATING) { | |
2107 | mlog(0, "Someone asked us to assert mastery, but we're " | |
2108 | "in the middle of migration. Skipping assert, " | |
2109 | "the new master will handle that.\n"); | |
2110 | spin_unlock(&res->spinlock); | |
2111 | goto put; | |
2112 | } else | |
2113 | __dlm_lockres_reserve_ast(res); | |
2114 | spin_unlock(&res->spinlock); | |
2115 | ||
6714d8e8 KH |
2116 | /* this call now finishes out the nodemap |
2117 | * even if one or more nodes die */ | |
2118 | mlog(0, "worker about to master %.*s here, this=%u\n", | |
2119 | res->lockname.len, res->lockname.name, dlm->node_num); | |
ba2bf218 | 2120 | ret = dlm_do_assert_master(dlm, res, nodemap, flags); |
6714d8e8 KH |
2121 | if (ret < 0) { |
2122 | /* no need to restart, we are done */ | |
3b3b84a8 KH |
2123 | if (!dlm_is_host_down(ret)) |
2124 | mlog_errno(ret); | |
6714d8e8 KH |
2125 | } |
2126 | ||
36407488 KH |
2127 | /* Ok, we've asserted ourselves. Let's let migration start. */ |
2128 | dlm_lockres_release_ast(dlm, res); | |
2129 | ||
2130 | put: | |
6714d8e8 KH |
2131 | dlm_lockres_put(res); |
2132 | ||
2133 | mlog(0, "finished with dlm_assert_master_worker\n"); | |
2134 | } | |
2135 | ||
c03872f5 KH |
2136 | /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. |
2137 | * We cannot wait for node recovery to complete to begin mastering this | |
2138 | * lockres because this lockres is used to kick off recovery! ;-) | |
2139 | * So, do a pre-check on all living nodes to see if any of those nodes | |
2140 | * think that $RECOVERY is currently mastered by a dead node. If so, | |
2141 | * we wait a short time to allow that node to get notified by its own | |
2142 | * heartbeat stack, then check again. All $RECOVERY lock resources | |
2bd63216 | 2143 | * mastered by dead nodes are purged when the hearbeat callback is |
c03872f5 KH |
2144 | * fired, so we can know for sure that it is safe to continue once |
2145 | * the node returns a live node or no node. */ | |
2146 | static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, | |
2147 | struct dlm_lock_resource *res) | |
2148 | { | |
2149 | struct dlm_node_iter iter; | |
2150 | int nodenum; | |
2151 | int ret = 0; | |
2152 | u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; | |
2153 | ||
2154 | spin_lock(&dlm->spinlock); | |
2155 | dlm_node_iter_init(dlm->domain_map, &iter); | |
2156 | spin_unlock(&dlm->spinlock); | |
2157 | ||
2158 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | |
2159 | /* do not send to self */ | |
2160 | if (nodenum == dlm->node_num) | |
2161 | continue; | |
2162 | ret = dlm_do_master_requery(dlm, res, nodenum, &master); | |
2163 | if (ret < 0) { | |
2164 | mlog_errno(ret); | |
2165 | if (!dlm_is_host_down(ret)) | |
2166 | BUG(); | |
2167 | /* host is down, so answer for that node would be | |
2168 | * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ | |
f42a100b | 2169 | ret = 0; |
c03872f5 KH |
2170 | } |
2171 | ||
2172 | if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
2173 | /* check to see if this master is in the recovery map */ | |
2174 | spin_lock(&dlm->spinlock); | |
2175 | if (test_bit(master, dlm->recovery_map)) { | |
2176 | mlog(ML_NOTICE, "%s: node %u has not seen " | |
2177 | "node %u go down yet, and thinks the " | |
2178 | "dead node is mastering the recovery " | |
2179 | "lock. must wait.\n", dlm->name, | |
2180 | nodenum, master); | |
2181 | ret = -EAGAIN; | |
2182 | } | |
2183 | spin_unlock(&dlm->spinlock); | |
2bd63216 | 2184 | mlog(0, "%s: reco lock master is %u\n", dlm->name, |
c03872f5 KH |
2185 | master); |
2186 | break; | |
2187 | } | |
2188 | } | |
2189 | return ret; | |
2190 | } | |
2191 | ||
ba2bf218 KH |
2192 | /* |
2193 | * DLM_DEREF_LOCKRES_MSG | |
2194 | */ | |
2195 | ||
2196 | int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |
2197 | { | |
2198 | struct dlm_deref_lockres deref; | |
2199 | int ret = 0, r; | |
2200 | const char *lockname; | |
2201 | unsigned int namelen; | |
2202 | ||
2203 | lockname = res->lockname.name; | |
2204 | namelen = res->lockname.len; | |
2205 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | |
2206 | ||
ba2bf218 KH |
2207 | memset(&deref, 0, sizeof(deref)); |
2208 | deref.node_idx = dlm->node_num; | |
2209 | deref.namelen = namelen; | |
2210 | memcpy(deref.name, lockname, namelen); | |
2211 | ||
2212 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, | |
2213 | &deref, sizeof(deref), res->owner, &r); | |
2214 | if (ret < 0) | |
8decab3c SM |
2215 | mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", |
2216 | dlm->name, namelen, lockname, ret, res->owner); | |
ba2bf218 KH |
2217 | else if (r < 0) { |
2218 | /* BAD. other node says I did not have a ref. */ | |
8decab3c SM |
2219 | mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", |
2220 | dlm->name, namelen, lockname, res->owner, r); | |
ba2bf218 KH |
2221 | dlm_print_one_lock_resource(res); |
2222 | BUG(); | |
2223 | } | |
2224 | return ret; | |
2225 | } | |
2226 | ||
d74c9803 KH |
2227 | int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, |
2228 | void **ret_data) | |
ba2bf218 KH |
2229 | { |
2230 | struct dlm_ctxt *dlm = data; | |
2231 | struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; | |
2232 | struct dlm_lock_resource *res = NULL; | |
2233 | char *name; | |
2234 | unsigned int namelen; | |
2235 | int ret = -EINVAL; | |
2236 | u8 node; | |
2237 | unsigned int hash; | |
f3f85464 SM |
2238 | struct dlm_work_item *item; |
2239 | int cleared = 0; | |
2240 | int dispatch = 0; | |
ba2bf218 KH |
2241 | |
2242 | if (!dlm_grab(dlm)) | |
2243 | return 0; | |
2244 | ||
2245 | name = deref->name; | |
2246 | namelen = deref->namelen; | |
2247 | node = deref->node_idx; | |
2248 | ||
2249 | if (namelen > DLM_LOCKID_NAME_MAX) { | |
2250 | mlog(ML_ERROR, "Invalid name length!"); | |
2251 | goto done; | |
2252 | } | |
2253 | if (deref->node_idx >= O2NM_MAX_NODES) { | |
2254 | mlog(ML_ERROR, "Invalid node number: %u\n", node); | |
2255 | goto done; | |
2256 | } | |
2257 | ||
2258 | hash = dlm_lockid_hash(name, namelen); | |
2259 | ||
2260 | spin_lock(&dlm->spinlock); | |
2261 | res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); | |
2262 | if (!res) { | |
2263 | spin_unlock(&dlm->spinlock); | |
2264 | mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", | |
2265 | dlm->name, namelen, name); | |
2266 | goto done; | |
2267 | } | |
2268 | spin_unlock(&dlm->spinlock); | |
2269 | ||
2270 | spin_lock(&res->spinlock); | |
f3f85464 SM |
2271 | if (res->state & DLM_LOCK_RES_SETREF_INPROG) |
2272 | dispatch = 1; | |
2273 | else { | |
2274 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | |
2275 | if (test_bit(node, res->refmap)) { | |
8d400b81 | 2276 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
f3f85464 SM |
2277 | cleared = 1; |
2278 | } | |
ba2bf218 KH |
2279 | } |
2280 | spin_unlock(&res->spinlock); | |
2281 | ||
f3f85464 SM |
2282 | if (!dispatch) { |
2283 | if (cleared) | |
2284 | dlm_lockres_calc_usage(dlm, res); | |
2285 | else { | |
2286 | mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " | |
2287 | "but it is already dropped!\n", dlm->name, | |
2288 | res->lockname.len, res->lockname.name, node); | |
2af37ce8 | 2289 | dlm_print_one_lock_resource(res); |
f3f85464 SM |
2290 | } |
2291 | ret = 0; | |
2292 | goto done; | |
2293 | } | |
2294 | ||
2295 | item = kzalloc(sizeof(*item), GFP_NOFS); | |
2296 | if (!item) { | |
2297 | ret = -ENOMEM; | |
2298 | mlog_errno(ret); | |
2299 | goto done; | |
2300 | } | |
2301 | ||
2302 | dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); | |
2303 | item->u.dl.deref_res = res; | |
2304 | item->u.dl.deref_node = node; | |
2305 | ||
2306 | spin_lock(&dlm->work_lock); | |
2307 | list_add_tail(&item->list, &dlm->work_list); | |
2308 | spin_unlock(&dlm->work_lock); | |
2309 | ||
2310 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); | |
2311 | return 0; | |
2312 | ||
ba2bf218 KH |
2313 | done: |
2314 | if (res) | |
2315 | dlm_lockres_put(res); | |
2316 | dlm_put(dlm); | |
f3f85464 | 2317 | |
ba2bf218 KH |
2318 | return ret; |
2319 | } | |
2320 | ||
f3f85464 SM |
2321 | static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) |
2322 | { | |
2323 | struct dlm_ctxt *dlm; | |
2324 | struct dlm_lock_resource *res; | |
2325 | u8 node; | |
2326 | u8 cleared = 0; | |
2327 | ||
2328 | dlm = item->dlm; | |
2329 | res = item->u.dl.deref_res; | |
2330 | node = item->u.dl.deref_node; | |
2331 | ||
2332 | spin_lock(&res->spinlock); | |
2333 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | |
2334 | if (test_bit(node, res->refmap)) { | |
2335 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); | |
8d400b81 | 2336 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
f3f85464 SM |
2337 | cleared = 1; |
2338 | } | |
2339 | spin_unlock(&res->spinlock); | |
2340 | ||
2341 | if (cleared) { | |
2342 | mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", | |
2343 | dlm->name, res->lockname.len, res->lockname.name, node); | |
2344 | dlm_lockres_calc_usage(dlm, res); | |
2345 | } else { | |
2346 | mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " | |
2347 | "but it is already dropped!\n", dlm->name, | |
2348 | res->lockname.len, res->lockname.name, node); | |
2af37ce8 | 2349 | dlm_print_one_lock_resource(res); |
f3f85464 SM |
2350 | } |
2351 | ||
2352 | dlm_lockres_put(res); | |
2353 | } | |
2354 | ||
9f62e960 SM |
2355 | /* |
2356 | * A migrateable resource is one that is : | |
2357 | * 1. locally mastered, and, | |
2358 | * 2. zero local locks, and, | |
2359 | * 3. one or more non-local locks, or, one or more references | |
2360 | * Returns 1 if yes, 0 if not. | |
2f5bf1f2 SM |
2361 | */ |
2362 | static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, | |
9f62e960 | 2363 | struct dlm_lock_resource *res) |
2f5bf1f2 | 2364 | { |
9f62e960 SM |
2365 | enum dlm_lockres_list idx; |
2366 | int nonlocal = 0, node_ref; | |
800deef3 | 2367 | struct list_head *queue; |
2f5bf1f2 | 2368 | struct dlm_lock *lock; |
9f62e960 | 2369 | u64 cookie; |
2f5bf1f2 SM |
2370 | |
2371 | assert_spin_locked(&res->spinlock); | |
2372 | ||
9f62e960 SM |
2373 | if (res->owner != dlm->node_num) |
2374 | return 0; | |
2f5bf1f2 | 2375 | |
9f62e960 SM |
2376 | for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { |
2377 | queue = dlm_list_idx_to_ptr(res, idx); | |
800deef3 | 2378 | list_for_each_entry(lock, queue, list) { |
9f62e960 SM |
2379 | if (lock->ml.node != dlm->node_num) { |
2380 | nonlocal++; | |
2381 | continue; | |
2f5bf1f2 | 2382 | } |
9f62e960 SM |
2383 | cookie = be64_to_cpu(lock->ml.cookie); |
2384 | mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on " | |
2385 | "%s list\n", dlm->name, res->lockname.len, | |
2386 | res->lockname.name, | |
2387 | dlm_get_lock_cookie_node(cookie), | |
2388 | dlm_get_lock_cookie_seq(cookie), | |
2389 | dlm_list_in_text(idx)); | |
2390 | return 0; | |
2f5bf1f2 | 2391 | } |
2f5bf1f2 SM |
2392 | } |
2393 | ||
9f62e960 SM |
2394 | if (!nonlocal) { |
2395 | node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); | |
2396 | if (node_ref >= O2NM_MAX_NODES) | |
2397 | return 0; | |
2398 | } | |
388c4bcb | 2399 | |
9f62e960 SM |
2400 | mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len, |
2401 | res->lockname.name); | |
2f5bf1f2 | 2402 | |
9f62e960 | 2403 | return 1; |
2f5bf1f2 | 2404 | } |
6714d8e8 KH |
2405 | |
2406 | /* | |
2407 | * DLM_MIGRATE_LOCKRES | |
2408 | */ | |
2409 | ||
2410 | ||
faf0ec9f | 2411 | static int dlm_migrate_lockres(struct dlm_ctxt *dlm, |
66effd3c | 2412 | struct dlm_lock_resource *res, u8 target) |
6714d8e8 KH |
2413 | { |
2414 | struct dlm_master_list_entry *mle = NULL; | |
2415 | struct dlm_master_list_entry *oldmle = NULL; | |
2416 | struct dlm_migratable_lockres *mres = NULL; | |
2f5bf1f2 | 2417 | int ret = 0; |
6714d8e8 KH |
2418 | const char *name; |
2419 | unsigned int namelen; | |
2420 | int mle_added = 0; | |
2f5bf1f2 | 2421 | int wake = 0; |
6714d8e8 KH |
2422 | |
2423 | if (!dlm_grab(dlm)) | |
2424 | return -EINVAL; | |
2425 | ||
66effd3c SM |
2426 | BUG_ON(target == O2NM_MAX_NODES); |
2427 | ||
6714d8e8 KH |
2428 | name = res->lockname.name; |
2429 | namelen = res->lockname.len; | |
2430 | ||
66effd3c SM |
2431 | mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, |
2432 | target); | |
6714d8e8 | 2433 | |
66effd3c | 2434 | /* preallocate up front. if this fails, abort */ |
6714d8e8 | 2435 | ret = -ENOMEM; |
ad8100e0 | 2436 | mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); |
6714d8e8 KH |
2437 | if (!mres) { |
2438 | mlog_errno(ret); | |
2439 | goto leave; | |
2440 | } | |
2441 | ||
3914ed0c | 2442 | mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); |
6714d8e8 KH |
2443 | if (!mle) { |
2444 | mlog_errno(ret); | |
2445 | goto leave; | |
2446 | } | |
2447 | ret = 0; | |
2448 | ||
6714d8e8 KH |
2449 | /* |
2450 | * clear any existing master requests and | |
2451 | * add the migration mle to the list | |
2452 | */ | |
66effd3c | 2453 | spin_lock(&dlm->spinlock); |
6714d8e8 KH |
2454 | spin_lock(&dlm->master_lock); |
2455 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, | |
2456 | namelen, target, dlm->node_num); | |
2457 | spin_unlock(&dlm->master_lock); | |
2458 | spin_unlock(&dlm->spinlock); | |
2459 | ||
2460 | if (ret == -EEXIST) { | |
2461 | mlog(0, "another process is already migrating it\n"); | |
2462 | goto fail; | |
2463 | } | |
2464 | mle_added = 1; | |
2465 | ||
2466 | /* | |
2467 | * set the MIGRATING flag and flush asts | |
2468 | * if we fail after this we need to re-dirty the lockres | |
2469 | */ | |
2470 | if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { | |
2471 | mlog(ML_ERROR, "tried to migrate %.*s to %u, but " | |
2472 | "the target went down.\n", res->lockname.len, | |
2473 | res->lockname.name, target); | |
2474 | spin_lock(&res->spinlock); | |
2475 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
a6fa3640 | 2476 | wake = 1; |
6714d8e8 KH |
2477 | spin_unlock(&res->spinlock); |
2478 | ret = -EINVAL; | |
2479 | } | |
2480 | ||
2481 | fail: | |
2482 | if (oldmle) { | |
2483 | /* master is known, detach if not already detached */ | |
2484 | dlm_mle_detach_hb_events(dlm, oldmle); | |
2485 | dlm_put_mle(oldmle); | |
2486 | } | |
2487 | ||
2488 | if (ret < 0) { | |
2489 | if (mle_added) { | |
2490 | dlm_mle_detach_hb_events(dlm, mle); | |
2491 | dlm_put_mle(mle); | |
2492 | } else if (mle) { | |
2493 | kmem_cache_free(dlm_mle_cache, mle); | |
66effd3c | 2494 | mle = NULL; |
6714d8e8 KH |
2495 | } |
2496 | goto leave; | |
2497 | } | |
2498 | ||
2499 | /* | |
2500 | * at this point, we have a migration target, an mle | |
2501 | * in the master list, and the MIGRATING flag set on | |
2502 | * the lockres | |
2503 | */ | |
2504 | ||
1cd04dbe KH |
2505 | /* now that remote nodes are spinning on the MIGRATING flag, |
2506 | * ensure that all assert_master work is flushed. */ | |
2507 | flush_workqueue(dlm->dlm_worker); | |
6714d8e8 KH |
2508 | |
2509 | /* get an extra reference on the mle. | |
2510 | * otherwise the assert_master from the new | |
2511 | * master will destroy this. | |
2512 | * also, make sure that all callers of dlm_get_mle | |
2513 | * take both dlm->spinlock and dlm->master_lock */ | |
2514 | spin_lock(&dlm->spinlock); | |
2515 | spin_lock(&dlm->master_lock); | |
a2bf0477 | 2516 | dlm_get_mle_inuse(mle); |
6714d8e8 KH |
2517 | spin_unlock(&dlm->master_lock); |
2518 | spin_unlock(&dlm->spinlock); | |
2519 | ||
2520 | /* notify new node and send all lock state */ | |
2521 | /* call send_one_lockres with migration flag. | |
2522 | * this serves as notice to the target node that a | |
2523 | * migration is starting. */ | |
2524 | ret = dlm_send_one_lockres(dlm, res, mres, target, | |
2525 | DLM_MRES_MIGRATION); | |
2526 | ||
2527 | if (ret < 0) { | |
2528 | mlog(0, "migration to node %u failed with %d\n", | |
2529 | target, ret); | |
2530 | /* migration failed, detach and clean up mle */ | |
2531 | dlm_mle_detach_hb_events(dlm, mle); | |
2532 | dlm_put_mle(mle); | |
a2bf0477 KH |
2533 | dlm_put_mle_inuse(mle); |
2534 | spin_lock(&res->spinlock); | |
2535 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
a6fa3640 | 2536 | wake = 1; |
a2bf0477 | 2537 | spin_unlock(&res->spinlock); |
df016c66 SM |
2538 | if (dlm_is_host_down(ret)) |
2539 | dlm_wait_for_node_death(dlm, target, | |
2540 | DLM_NODE_DEATH_WAIT_MAX); | |
6714d8e8 KH |
2541 | goto leave; |
2542 | } | |
2543 | ||
2544 | /* at this point, the target sends a message to all nodes, | |
2545 | * (using dlm_do_migrate_request). this node is skipped since | |
2546 | * we had to put an mle in the list to begin the process. this | |
2547 | * node now waits for target to do an assert master. this node | |
2548 | * will be the last one notified, ensuring that the migration | |
2549 | * is complete everywhere. if the target dies while this is | |
2550 | * going on, some nodes could potentially see the target as the | |
2551 | * master, so it is important that my recovery finds the migration | |
af901ca1 | 2552 | * mle and sets the master to UNKNOWN. */ |
6714d8e8 KH |
2553 | |
2554 | ||
2555 | /* wait for new node to assert master */ | |
2556 | while (1) { | |
2557 | ret = wait_event_interruptible_timeout(mle->wq, | |
2558 | (atomic_read(&mle->woken) == 1), | |
2559 | msecs_to_jiffies(5000)); | |
2560 | ||
2561 | if (ret >= 0) { | |
2562 | if (atomic_read(&mle->woken) == 1 || | |
2563 | res->owner == target) | |
2564 | break; | |
2565 | ||
1cd04dbe KH |
2566 | mlog(0, "%s:%.*s: timed out during migration\n", |
2567 | dlm->name, res->lockname.len, res->lockname.name); | |
2bd63216 | 2568 | /* avoid hang during shutdown when migrating lockres |
e2faea4c KH |
2569 | * to a node which also goes down */ |
2570 | if (dlm_is_node_dead(dlm, target)) { | |
aa852354 KH |
2571 | mlog(0, "%s:%.*s: expected migration " |
2572 | "target %u is no longer up, restarting\n", | |
e2faea4c KH |
2573 | dlm->name, res->lockname.len, |
2574 | res->lockname.name, target); | |
1cd04dbe KH |
2575 | ret = -EINVAL; |
2576 | /* migration failed, detach and clean up mle */ | |
2577 | dlm_mle_detach_hb_events(dlm, mle); | |
2578 | dlm_put_mle(mle); | |
2579 | dlm_put_mle_inuse(mle); | |
2580 | spin_lock(&res->spinlock); | |
2581 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
a6fa3640 | 2582 | wake = 1; |
1cd04dbe KH |
2583 | spin_unlock(&res->spinlock); |
2584 | goto leave; | |
e2faea4c | 2585 | } |
1cd04dbe KH |
2586 | } else |
2587 | mlog(0, "%s:%.*s: caught signal during migration\n", | |
2588 | dlm->name, res->lockname.len, res->lockname.name); | |
6714d8e8 KH |
2589 | } |
2590 | ||
2591 | /* all done, set the owner, clear the flag */ | |
2592 | spin_lock(&res->spinlock); | |
2593 | dlm_set_lockres_owner(dlm, res, target); | |
2594 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
2595 | dlm_remove_nonlocal_locks(dlm, res); | |
2596 | spin_unlock(&res->spinlock); | |
2597 | wake_up(&res->wq); | |
2598 | ||
2599 | /* master is known, detach if not already detached */ | |
2600 | dlm_mle_detach_hb_events(dlm, mle); | |
a2bf0477 | 2601 | dlm_put_mle_inuse(mle); |
6714d8e8 KH |
2602 | ret = 0; |
2603 | ||
2604 | dlm_lockres_calc_usage(dlm, res); | |
2605 | ||
2606 | leave: | |
2607 | /* re-dirty the lockres if we failed */ | |
2608 | if (ret < 0) | |
2609 | dlm_kick_thread(dlm, res); | |
2610 | ||
a6fa3640 KH |
2611 | /* wake up waiters if the MIGRATING flag got set |
2612 | * but migration failed */ | |
2613 | if (wake) | |
2614 | wake_up(&res->wq); | |
2615 | ||
6714d8e8 KH |
2616 | if (mres) |
2617 | free_page((unsigned long)mres); | |
2618 | ||
2619 | dlm_put(dlm); | |
2620 | ||
9f62e960 SM |
2621 | mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, |
2622 | name, target, ret); | |
6714d8e8 KH |
2623 | return ret; |
2624 | } | |
6714d8e8 | 2625 | |
ba2bf218 KH |
2626 | #define DLM_MIGRATION_RETRY_MS 100 |
2627 | ||
9f62e960 SM |
2628 | /* |
2629 | * Should be called only after beginning the domain leave process. | |
ba2bf218 KH |
2630 | * There should not be any remaining locks on nonlocal lock resources, |
2631 | * and there should be no local locks left on locally mastered resources. | |
2632 | * | |
2633 | * Called with the dlm spinlock held, may drop it to do migration, but | |
2634 | * will re-acquire before exit. | |
2635 | * | |
9f62e960 SM |
2636 | * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped |
2637 | */ | |
ba2bf218 KH |
2638 | int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
2639 | { | |
66effd3c | 2640 | int ret; |
ba2bf218 | 2641 | int lock_dropped = 0; |
66effd3c | 2642 | u8 target = O2NM_MAX_NODES; |
ba2bf218 | 2643 | |
9f62e960 | 2644 | assert_spin_locked(&dlm->spinlock); |
2f5bf1f2 | 2645 | |
9f62e960 | 2646 | spin_lock(&res->spinlock); |
66effd3c SM |
2647 | if (dlm_is_lockres_migrateable(dlm, res)) |
2648 | target = dlm_pick_migration_target(dlm, res); | |
b36c3f84 | 2649 | spin_unlock(&res->spinlock); |
66effd3c SM |
2650 | |
2651 | if (target == O2NM_MAX_NODES) | |
9f62e960 | 2652 | goto leave; |
ba2bf218 KH |
2653 | |
2654 | /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ | |
2655 | spin_unlock(&dlm->spinlock); | |
2656 | lock_dropped = 1; | |
66effd3c SM |
2657 | ret = dlm_migrate_lockres(dlm, res, target); |
2658 | if (ret) | |
2659 | mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n", | |
2660 | dlm->name, res->lockname.len, res->lockname.name, | |
2661 | target, ret); | |
ba2bf218 KH |
2662 | spin_lock(&dlm->spinlock); |
2663 | leave: | |
2664 | return lock_dropped; | |
2665 | } | |
2666 | ||
6714d8e8 KH |
2667 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
2668 | { | |
2669 | int ret; | |
2670 | spin_lock(&dlm->ast_lock); | |
2671 | spin_lock(&lock->spinlock); | |
2672 | ret = (list_empty(&lock->bast_list) && !lock->bast_pending); | |
2673 | spin_unlock(&lock->spinlock); | |
2674 | spin_unlock(&dlm->ast_lock); | |
2675 | return ret; | |
2676 | } | |
2677 | ||
2678 | static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, | |
2679 | struct dlm_lock_resource *res, | |
2680 | u8 mig_target) | |
2681 | { | |
2682 | int can_proceed; | |
2683 | spin_lock(&res->spinlock); | |
2684 | can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); | |
2685 | spin_unlock(&res->spinlock); | |
2686 | ||
2bd63216 | 2687 | /* target has died, so make the caller break out of the |
6714d8e8 KH |
2688 | * wait_event, but caller must recheck the domain_map */ |
2689 | spin_lock(&dlm->spinlock); | |
2690 | if (!test_bit(mig_target, dlm->domain_map)) | |
2691 | can_proceed = 1; | |
2692 | spin_unlock(&dlm->spinlock); | |
2693 | return can_proceed; | |
2694 | } | |
2695 | ||
faf0ec9f AB |
2696 | static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, |
2697 | struct dlm_lock_resource *res) | |
6714d8e8 KH |
2698 | { |
2699 | int ret; | |
2700 | spin_lock(&res->spinlock); | |
2701 | ret = !!(res->state & DLM_LOCK_RES_DIRTY); | |
2702 | spin_unlock(&res->spinlock); | |
2703 | return ret; | |
2704 | } | |
2705 | ||
2706 | ||
2707 | static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, | |
2708 | struct dlm_lock_resource *res, | |
2709 | u8 target) | |
2710 | { | |
2711 | int ret = 0; | |
2712 | ||
2713 | mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", | |
2714 | res->lockname.len, res->lockname.name, dlm->node_num, | |
2715 | target); | |
2716 | /* need to set MIGRATING flag on lockres. this is done by | |
2717 | * ensuring that all asts have been flushed for this lockres. */ | |
2718 | spin_lock(&res->spinlock); | |
2719 | BUG_ON(res->migration_pending); | |
2720 | res->migration_pending = 1; | |
2721 | /* strategy is to reserve an extra ast then release | |
2722 | * it below, letting the release do all of the work */ | |
2723 | __dlm_lockres_reserve_ast(res); | |
2724 | spin_unlock(&res->spinlock); | |
2725 | ||
ddc09c8d | 2726 | /* now flush all the pending asts */ |
6714d8e8 | 2727 | dlm_kick_thread(dlm, res); |
ddc09c8d KH |
2728 | /* before waiting on DIRTY, block processes which may |
2729 | * try to dirty the lockres before MIGRATING is set */ | |
2730 | spin_lock(&res->spinlock); | |
2731 | BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); | |
2732 | res->state |= DLM_LOCK_RES_BLOCK_DIRTY; | |
2733 | spin_unlock(&res->spinlock); | |
2734 | /* now wait on any pending asts and the DIRTY state */ | |
6714d8e8 KH |
2735 | wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); |
2736 | dlm_lockres_release_ast(dlm, res); | |
2737 | ||
2738 | mlog(0, "about to wait on migration_wq, dirty=%s\n", | |
2739 | res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); | |
2740 | /* if the extra ref we just put was the final one, this | |
2741 | * will pass thru immediately. otherwise, we need to wait | |
2742 | * for the last ast to finish. */ | |
2743 | again: | |
2744 | ret = wait_event_interruptible_timeout(dlm->migration_wq, | |
2745 | dlm_migration_can_proceed(dlm, res, target), | |
2746 | msecs_to_jiffies(1000)); | |
2747 | if (ret < 0) { | |
2748 | mlog(0, "woken again: migrating? %s, dead? %s\n", | |
2749 | res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", | |
2750 | test_bit(target, dlm->domain_map) ? "no":"yes"); | |
2751 | } else { | |
2752 | mlog(0, "all is well: migrating? %s, dead? %s\n", | |
2753 | res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", | |
2754 | test_bit(target, dlm->domain_map) ? "no":"yes"); | |
2755 | } | |
2756 | if (!dlm_migration_can_proceed(dlm, res, target)) { | |
2757 | mlog(0, "trying again...\n"); | |
2758 | goto again; | |
2759 | } | |
2760 | ||
a39953dd | 2761 | ret = 0; |
6714d8e8 KH |
2762 | /* did the target go down or die? */ |
2763 | spin_lock(&dlm->spinlock); | |
2764 | if (!test_bit(target, dlm->domain_map)) { | |
2765 | mlog(ML_ERROR, "aha. migration target %u just went down\n", | |
2766 | target); | |
2767 | ret = -EHOSTDOWN; | |
2768 | } | |
2769 | spin_unlock(&dlm->spinlock); | |
2770 | ||
a39953dd WW |
2771 | /* |
2772 | * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for | |
2773 | * another try; otherwise, we are sure the MIGRATING state is there, | |
2774 | * drop the unneded state which blocked threads trying to DIRTY | |
2775 | */ | |
2776 | spin_lock(&res->spinlock); | |
2777 | BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); | |
2778 | res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; | |
2779 | if (!ret) | |
2780 | BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); | |
2781 | spin_unlock(&res->spinlock); | |
2782 | ||
6714d8e8 KH |
2783 | /* |
2784 | * at this point: | |
2785 | * | |
a39953dd | 2786 | * o the DLM_LOCK_RES_MIGRATING flag is set if target not down |
6714d8e8 KH |
2787 | * o there are no pending asts on this lockres |
2788 | * o all processes trying to reserve an ast on this | |
2789 | * lockres must wait for the MIGRATING flag to clear | |
2790 | */ | |
2791 | return ret; | |
2792 | } | |
2793 | ||
2794 | /* last step in the migration process. | |
2795 | * original master calls this to free all of the dlm_lock | |
2796 | * structures that used to be for other nodes. */ | |
2797 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |
2798 | struct dlm_lock_resource *res) | |
2799 | { | |
6714d8e8 | 2800 | struct list_head *queue = &res->granted; |
ba2bf218 | 2801 | int i, bit; |
800deef3 | 2802 | struct dlm_lock *lock, *next; |
6714d8e8 KH |
2803 | |
2804 | assert_spin_locked(&res->spinlock); | |
2805 | ||
2806 | BUG_ON(res->owner == dlm->node_num); | |
2807 | ||
2808 | for (i=0; i<3; i++) { | |
800deef3 | 2809 | list_for_each_entry_safe(lock, next, queue, list) { |
6714d8e8 KH |
2810 | if (lock->ml.node != dlm->node_num) { |
2811 | mlog(0, "putting lock for node %u\n", | |
2812 | lock->ml.node); | |
2813 | /* be extra careful */ | |
2814 | BUG_ON(!list_empty(&lock->ast_list)); | |
2815 | BUG_ON(!list_empty(&lock->bast_list)); | |
2816 | BUG_ON(lock->ast_pending); | |
2817 | BUG_ON(lock->bast_pending); | |
8d400b81 SM |
2818 | dlm_lockres_clear_refmap_bit(dlm, res, |
2819 | lock->ml.node); | |
6714d8e8 KH |
2820 | list_del_init(&lock->list); |
2821 | dlm_lock_put(lock); | |
2c5c54ac SM |
2822 | /* In a normal unlock, we would have added a |
2823 | * DLM_UNLOCK_FREE_LOCK action. Force it. */ | |
2824 | dlm_lock_put(lock); | |
6714d8e8 KH |
2825 | } |
2826 | } | |
2827 | queue++; | |
2828 | } | |
ba2bf218 KH |
2829 | bit = 0; |
2830 | while (1) { | |
2831 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); | |
2832 | if (bit >= O2NM_MAX_NODES) | |
2833 | break; | |
2834 | /* do not clear the local node reference, if there is a | |
2835 | * process holding this, let it drop the ref itself */ | |
2836 | if (bit != dlm->node_num) { | |
2837 | mlog(0, "%s:%.*s: node %u had a ref to this " | |
2838 | "migrating lockres, clearing\n", dlm->name, | |
2839 | res->lockname.len, res->lockname.name, bit); | |
8d400b81 | 2840 | dlm_lockres_clear_refmap_bit(dlm, res, bit); |
ba2bf218 KH |
2841 | } |
2842 | bit++; | |
2843 | } | |
6714d8e8 KH |
2844 | } |
2845 | ||
66effd3c SM |
2846 | /* |
2847 | * Pick a node to migrate the lock resource to. This function selects a | |
2848 | * potential target based first on the locks and then on refmap. It skips | |
2849 | * nodes that are in the process of exiting the domain. | |
2850 | */ | |
6714d8e8 KH |
2851 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, |
2852 | struct dlm_lock_resource *res) | |
2853 | { | |
66effd3c | 2854 | enum dlm_lockres_list idx; |
6714d8e8 | 2855 | struct list_head *queue = &res->granted; |
6714d8e8 | 2856 | struct dlm_lock *lock; |
66effd3c SM |
2857 | int noderef; |
2858 | u8 nodenum = O2NM_MAX_NODES; | |
6714d8e8 KH |
2859 | |
2860 | assert_spin_locked(&dlm->spinlock); | |
66effd3c | 2861 | assert_spin_locked(&res->spinlock); |
6714d8e8 | 2862 | |
66effd3c SM |
2863 | /* Go through all the locks */ |
2864 | for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { | |
2865 | queue = dlm_list_idx_to_ptr(res, idx); | |
800deef3 | 2866 | list_for_each_entry(lock, queue, list) { |
66effd3c SM |
2867 | if (lock->ml.node == dlm->node_num) |
2868 | continue; | |
2869 | if (test_bit(lock->ml.node, dlm->exit_domain_map)) | |
2870 | continue; | |
2871 | nodenum = lock->ml.node; | |
2872 | goto bail; | |
6714d8e8 | 2873 | } |
6714d8e8 | 2874 | } |
388c4bcb | 2875 | |
66effd3c SM |
2876 | /* Go thru the refmap */ |
2877 | noderef = -1; | |
6714d8e8 | 2878 | while (1) { |
66effd3c SM |
2879 | noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, |
2880 | noderef + 1); | |
2881 | if (noderef >= O2NM_MAX_NODES) | |
6714d8e8 | 2882 | break; |
66effd3c SM |
2883 | if (noderef == dlm->node_num) |
2884 | continue; | |
2885 | if (test_bit(noderef, dlm->exit_domain_map)) | |
2886 | continue; | |
2887 | nodenum = noderef; | |
2888 | goto bail; | |
6714d8e8 KH |
2889 | } |
2890 | ||
66effd3c SM |
2891 | bail: |
2892 | return nodenum; | |
6714d8e8 KH |
2893 | } |
2894 | ||
6714d8e8 KH |
2895 | /* this is called by the new master once all lockres |
2896 | * data has been received */ | |
2897 | static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |
2898 | struct dlm_lock_resource *res, | |
2899 | u8 master, u8 new_master, | |
2900 | struct dlm_node_iter *iter) | |
2901 | { | |
2902 | struct dlm_migrate_request migrate; | |
2b832564 | 2903 | int ret, skip, status = 0; |
6714d8e8 KH |
2904 | int nodenum; |
2905 | ||
2906 | memset(&migrate, 0, sizeof(migrate)); | |
2907 | migrate.namelen = res->lockname.len; | |
2908 | memcpy(migrate.name, res->lockname.name, migrate.namelen); | |
2909 | migrate.new_master = new_master; | |
2910 | migrate.master = master; | |
2911 | ||
2912 | ret = 0; | |
2913 | ||
2914 | /* send message to all nodes, except the master and myself */ | |
2915 | while ((nodenum = dlm_node_iter_next(iter)) >= 0) { | |
2916 | if (nodenum == master || | |
2917 | nodenum == new_master) | |
2918 | continue; | |
2919 | ||
2b832564 SM |
2920 | /* We could race exit domain. If exited, skip. */ |
2921 | spin_lock(&dlm->spinlock); | |
2922 | skip = (!test_bit(nodenum, dlm->domain_map)); | |
2923 | spin_unlock(&dlm->spinlock); | |
2924 | if (skip) { | |
2925 | clear_bit(nodenum, iter->node_map); | |
2926 | continue; | |
2927 | } | |
2928 | ||
6714d8e8 KH |
2929 | ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, |
2930 | &migrate, sizeof(migrate), nodenum, | |
2931 | &status); | |
2b832564 | 2932 | if (ret < 0) { |
8decab3c SM |
2933 | mlog(ML_ERROR, "%s: res %.*s, Error %d send " |
2934 | "MIGRATE_REQUEST to node %u\n", dlm->name, | |
2935 | migrate.namelen, migrate.name, ret, nodenum); | |
2b832564 SM |
2936 | if (!dlm_is_host_down(ret)) { |
2937 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); | |
2938 | BUG(); | |
2939 | } | |
2940 | clear_bit(nodenum, iter->node_map); | |
2941 | ret = 0; | |
2942 | } else if (status < 0) { | |
6714d8e8 KH |
2943 | mlog(0, "migrate request (node %u) returned %d!\n", |
2944 | nodenum, status); | |
2945 | ret = status; | |
ba2bf218 KH |
2946 | } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { |
2947 | /* during the migration request we short-circuited | |
2948 | * the mastery of the lockres. make sure we have | |
2949 | * a mastery ref for nodenum */ | |
2950 | mlog(0, "%s:%.*s: need ref for node %u\n", | |
2951 | dlm->name, res->lockname.len, res->lockname.name, | |
2952 | nodenum); | |
2953 | spin_lock(&res->spinlock); | |
8d400b81 | 2954 | dlm_lockres_set_refmap_bit(dlm, res, nodenum); |
ba2bf218 | 2955 | spin_unlock(&res->spinlock); |
6714d8e8 KH |
2956 | } |
2957 | } | |
2958 | ||
2959 | if (ret < 0) | |
2960 | mlog_errno(ret); | |
2961 | ||
2962 | mlog(0, "returning ret=%d\n", ret); | |
2963 | return ret; | |
2964 | } | |
2965 | ||
2966 | ||
2967 | /* if there is an existing mle for this lockres, we now know who the master is. | |
2968 | * (the one who sent us *this* message) we can clear it up right away. | |
2969 | * since the process that put the mle on the list still has a reference to it, | |
2970 | * we can unhash it now, set the master and wake the process. as a result, | |
2971 | * we will have no mle in the list to start with. now we can add an mle for | |
2972 | * the migration and this should be the only one found for those scanning the | |
2973 | * list. */ | |
d74c9803 KH |
2974 | int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, |
2975 | void **ret_data) | |
6714d8e8 KH |
2976 | { |
2977 | struct dlm_ctxt *dlm = data; | |
2978 | struct dlm_lock_resource *res = NULL; | |
2979 | struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; | |
2980 | struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; | |
2981 | const char *name; | |
a3d33291 | 2982 | unsigned int namelen, hash; |
6714d8e8 KH |
2983 | int ret = 0; |
2984 | ||
2985 | if (!dlm_grab(dlm)) | |
2986 | return -EINVAL; | |
2987 | ||
2988 | name = migrate->name; | |
2989 | namelen = migrate->namelen; | |
a3d33291 | 2990 | hash = dlm_lockid_hash(name, namelen); |
6714d8e8 KH |
2991 | |
2992 | /* preallocate.. if this fails, abort */ | |
3914ed0c | 2993 | mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); |
6714d8e8 KH |
2994 | |
2995 | if (!mle) { | |
2996 | ret = -ENOMEM; | |
2997 | goto leave; | |
2998 | } | |
2999 | ||
3000 | /* check for pre-existing lock */ | |
3001 | spin_lock(&dlm->spinlock); | |
a3d33291 | 3002 | res = __dlm_lookup_lockres(dlm, name, namelen, hash); |
6714d8e8 KH |
3003 | if (res) { |
3004 | spin_lock(&res->spinlock); | |
3005 | if (res->state & DLM_LOCK_RES_RECOVERING) { | |
3006 | /* if all is working ok, this can only mean that we got | |
3007 | * a migrate request from a node that we now see as | |
3008 | * dead. what can we do here? drop it to the floor? */ | |
3009 | spin_unlock(&res->spinlock); | |
3010 | mlog(ML_ERROR, "Got a migrate request, but the " | |
3011 | "lockres is marked as recovering!"); | |
3012 | kmem_cache_free(dlm_mle_cache, mle); | |
3013 | ret = -EINVAL; /* need a better solution */ | |
3014 | goto unlock; | |
3015 | } | |
3016 | res->state |= DLM_LOCK_RES_MIGRATING; | |
3017 | spin_unlock(&res->spinlock); | |
3018 | } | |
3019 | ||
6d98c3cc | 3020 | spin_lock(&dlm->master_lock); |
6714d8e8 KH |
3021 | /* ignore status. only nonzero status would BUG. */ |
3022 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, | |
3023 | name, namelen, | |
3024 | migrate->new_master, | |
3025 | migrate->master); | |
3026 | ||
6714d8e8 | 3027 | spin_unlock(&dlm->master_lock); |
6d98c3cc | 3028 | unlock: |
6714d8e8 KH |
3029 | spin_unlock(&dlm->spinlock); |
3030 | ||
3031 | if (oldmle) { | |
3032 | /* master is known, detach if not already detached */ | |
3033 | dlm_mle_detach_hb_events(dlm, oldmle); | |
3034 | dlm_put_mle(oldmle); | |
3035 | } | |
3036 | ||
3037 | if (res) | |
3038 | dlm_lockres_put(res); | |
3039 | leave: | |
3040 | dlm_put(dlm); | |
3041 | return ret; | |
3042 | } | |
3043 | ||
3044 | /* must be holding dlm->spinlock and dlm->master_lock | |
3045 | * when adding a migration mle, we can clear any other mles | |
3046 | * in the master list because we know with certainty that | |
3047 | * the master is "master". so we remove any old mle from | |
3048 | * the list after setting it's master field, and then add | |
3049 | * the new migration mle. this way we can hold with the rule | |
3050 | * of having only one mle for a given lock name at all times. */ | |
3051 | static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | |
3052 | struct dlm_lock_resource *res, | |
3053 | struct dlm_master_list_entry *mle, | |
3054 | struct dlm_master_list_entry **oldmle, | |
3055 | const char *name, unsigned int namelen, | |
3056 | u8 new_master, u8 master) | |
3057 | { | |
3058 | int found; | |
3059 | int ret = 0; | |
3060 | ||
3061 | *oldmle = NULL; | |
3062 | ||
6714d8e8 KH |
3063 | assert_spin_locked(&dlm->spinlock); |
3064 | assert_spin_locked(&dlm->master_lock); | |
3065 | ||
3066 | /* caller is responsible for any ref taken here on oldmle */ | |
3067 | found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); | |
3068 | if (found) { | |
3069 | struct dlm_master_list_entry *tmp = *oldmle; | |
3070 | spin_lock(&tmp->spinlock); | |
3071 | if (tmp->type == DLM_MLE_MIGRATION) { | |
3072 | if (master == dlm->node_num) { | |
3073 | /* ah another process raced me to it */ | |
3074 | mlog(0, "tried to migrate %.*s, but some " | |
3075 | "process beat me to it\n", | |
3076 | namelen, name); | |
3077 | ret = -EEXIST; | |
3078 | } else { | |
3079 | /* bad. 2 NODES are trying to migrate! */ | |
3080 | mlog(ML_ERROR, "migration error mle: " | |
3081 | "master=%u new_master=%u // request: " | |
3082 | "master=%u new_master=%u // " | |
3083 | "lockres=%.*s\n", | |
3084 | tmp->master, tmp->new_master, | |
3085 | master, new_master, | |
3086 | namelen, name); | |
3087 | BUG(); | |
3088 | } | |
3089 | } else { | |
3090 | /* this is essentially what assert_master does */ | |
3091 | tmp->master = master; | |
3092 | atomic_set(&tmp->woken, 1); | |
3093 | wake_up(&tmp->wq); | |
1c084577 SM |
3094 | /* remove it so that only one mle will be found */ |
3095 | __dlm_unlink_mle(dlm, tmp); | |
ba2bf218 KH |
3096 | __dlm_mle_detach_hb_events(dlm, tmp); |
3097 | ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; | |
3098 | mlog(0, "%s:%.*s: master=%u, newmaster=%u, " | |
3099 | "telling master to get ref for cleared out mle " | |
3100 | "during migration\n", dlm->name, namelen, name, | |
3101 | master, new_master); | |
6714d8e8 KH |
3102 | } |
3103 | spin_unlock(&tmp->spinlock); | |
3104 | } | |
3105 | ||
3106 | /* now add a migration mle to the tail of the list */ | |
3107 | dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); | |
3108 | mle->new_master = new_master; | |
ba2bf218 KH |
3109 | /* the new master will be sending an assert master for this. |
3110 | * at that point we will get the refmap reference */ | |
6714d8e8 KH |
3111 | mle->master = master; |
3112 | /* do this for consistency with other mle types */ | |
3113 | set_bit(new_master, mle->maybe_map); | |
1c084577 | 3114 | __dlm_insert_mle(dlm, mle); |
6714d8e8 KH |
3115 | |
3116 | return ret; | |
3117 | } | |
3118 | ||
c2cd4a44 SM |
3119 | /* |
3120 | * Sets the owner of the lockres, associated to the mle, to UNKNOWN | |
3121 | */ | |
3122 | static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm, | |
3123 | struct dlm_master_list_entry *mle) | |
3124 | { | |
3125 | struct dlm_lock_resource *res; | |
c2cd4a44 SM |
3126 | |
3127 | /* Find the lockres associated to the mle and set its owner to UNK */ | |
7141514b SM |
3128 | res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, |
3129 | mle->mnamehash); | |
c2cd4a44 SM |
3130 | if (res) { |
3131 | spin_unlock(&dlm->master_lock); | |
3132 | ||
3133 | /* move lockres onto recovery list */ | |
3134 | spin_lock(&res->spinlock); | |
3135 | dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); | |
3136 | dlm_move_lockres_to_recovery_list(dlm, res); | |
3137 | spin_unlock(&res->spinlock); | |
3138 | dlm_lockres_put(res); | |
3139 | ||
3140 | /* about to get rid of mle, detach from heartbeat */ | |
3141 | __dlm_mle_detach_hb_events(dlm, mle); | |
3142 | ||
3143 | /* dump the mle */ | |
3144 | spin_lock(&dlm->master_lock); | |
3145 | __dlm_put_mle(mle); | |
3146 | spin_unlock(&dlm->master_lock); | |
3147 | } | |
3148 | ||
3149 | return res; | |
3150 | } | |
3151 | ||
3152 | static void dlm_clean_migration_mle(struct dlm_ctxt *dlm, | |
3153 | struct dlm_master_list_entry *mle) | |
3154 | { | |
3155 | __dlm_mle_detach_hb_events(dlm, mle); | |
3156 | ||
3157 | spin_lock(&mle->spinlock); | |
3158 | __dlm_unlink_mle(dlm, mle); | |
3159 | atomic_set(&mle->woken, 1); | |
3160 | spin_unlock(&mle->spinlock); | |
3161 | ||
3162 | wake_up(&mle->wq); | |
3163 | } | |
3164 | ||
3165 | static void dlm_clean_block_mle(struct dlm_ctxt *dlm, | |
3166 | struct dlm_master_list_entry *mle, u8 dead_node) | |
3167 | { | |
3168 | int bit; | |
3169 | ||
3170 | BUG_ON(mle->type != DLM_MLE_BLOCK); | |
3171 | ||
3172 | spin_lock(&mle->spinlock); | |
3173 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | |
3174 | if (bit != dead_node) { | |
3175 | mlog(0, "mle found, but dead node %u would not have been " | |
3176 | "master\n", dead_node); | |
3177 | spin_unlock(&mle->spinlock); | |
3178 | } else { | |
3179 | /* Must drop the refcount by one since the assert_master will | |
3180 | * never arrive. This may result in the mle being unlinked and | |
3181 | * freed, but there may still be a process waiting in the | |
3182 | * dlmlock path which is fine. */ | |
3183 | mlog(0, "node %u was expected master\n", dead_node); | |
3184 | atomic_set(&mle->woken, 1); | |
3185 | spin_unlock(&mle->spinlock); | |
3186 | wake_up(&mle->wq); | |
3187 | ||
3188 | /* Do not need events any longer, so detach from heartbeat */ | |
3189 | __dlm_mle_detach_hb_events(dlm, mle); | |
3190 | __dlm_put_mle(mle); | |
3191 | } | |
3192 | } | |
6714d8e8 KH |
3193 | |
3194 | void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) | |
3195 | { | |
2ed6c750 | 3196 | struct dlm_master_list_entry *mle; |
6714d8e8 | 3197 | struct dlm_lock_resource *res; |
2ed6c750 SM |
3198 | struct hlist_head *bucket; |
3199 | struct hlist_node *list; | |
3200 | unsigned int i; | |
6714d8e8 | 3201 | |
ef6b689b | 3202 | mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); |
6714d8e8 KH |
3203 | top: |
3204 | assert_spin_locked(&dlm->spinlock); | |
3205 | ||
3206 | /* clean the master list */ | |
3207 | spin_lock(&dlm->master_lock); | |
2ed6c750 SM |
3208 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
3209 | bucket = dlm_master_hash(dlm, i); | |
3210 | hlist_for_each(list, bucket) { | |
3211 | mle = hlist_entry(list, struct dlm_master_list_entry, | |
3212 | master_hash_node); | |
3213 | ||
67ae1f06 SM |
3214 | BUG_ON(mle->type != DLM_MLE_BLOCK && |
3215 | mle->type != DLM_MLE_MASTER && | |
3216 | mle->type != DLM_MLE_MIGRATION); | |
3217 | ||
3218 | /* MASTER mles are initiated locally. The waiting | |
3219 | * process will notice the node map change shortly. | |
3220 | * Let that happen as normal. */ | |
3221 | if (mle->type == DLM_MLE_MASTER) | |
3222 | continue; | |
3223 | ||
3224 | /* BLOCK mles are initiated by other nodes. Need to | |
3225 | * clean up if the dead node would have been the | |
3226 | * master. */ | |
3227 | if (mle->type == DLM_MLE_BLOCK) { | |
3228 | dlm_clean_block_mle(dlm, mle, dead_node); | |
3229 | continue; | |
3230 | } | |
6714d8e8 | 3231 | |
67ae1f06 SM |
3232 | /* Everything else is a MIGRATION mle */ |
3233 | ||
3234 | /* The rule for MIGRATION mles is that the master | |
3235 | * becomes UNKNOWN if *either* the original or the new | |
3236 | * master dies. All UNKNOWN lockres' are sent to | |
3237 | * whichever node becomes the recovery master. The new | |
3238 | * master is responsible for determining if there is | |
3239 | * still a master for this lockres, or if he needs to | |
3240 | * take over mastery. Either way, this node should | |
3241 | * expect another message to resolve this. */ | |
3242 | ||
3243 | if (mle->master != dead_node && | |
3244 | mle->new_master != dead_node) | |
3245 | continue; | |
3246 | ||
3247 | /* If we have reached this point, this mle needs to be | |
3248 | * removed from the list and freed. */ | |
3249 | dlm_clean_migration_mle(dlm, mle); | |
3250 | ||
3251 | mlog(0, "%s: node %u died during migration from " | |
3252 | "%u to %u!\n", dlm->name, dead_node, mle->master, | |
3253 | mle->new_master); | |
3254 | ||
3255 | /* If we find a lockres associated with the mle, we've | |
3256 | * hit this rare case that messes up our lock ordering. | |
3257 | * If so, we need to drop the master lock so that we can | |
3258 | * take the lockres lock, meaning that we will have to | |
3259 | * restart from the head of list. */ | |
3260 | res = dlm_reset_mleres_owner(dlm, mle); | |
3261 | if (res) | |
3262 | /* restart */ | |
3263 | goto top; | |
3264 | ||
3265 | /* This may be the last reference */ | |
3266 | __dlm_put_mle(mle); | |
6714d8e8 | 3267 | } |
2ed6c750 | 3268 | } |
6714d8e8 KH |
3269 | spin_unlock(&dlm->master_lock); |
3270 | } | |
3271 | ||
6714d8e8 KH |
3272 | int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, |
3273 | u8 old_master) | |
3274 | { | |
3275 | struct dlm_node_iter iter; | |
3276 | int ret = 0; | |
3277 | ||
3278 | spin_lock(&dlm->spinlock); | |
3279 | dlm_node_iter_init(dlm->domain_map, &iter); | |
3280 | clear_bit(old_master, iter.node_map); | |
3281 | clear_bit(dlm->node_num, iter.node_map); | |
3282 | spin_unlock(&dlm->spinlock); | |
3283 | ||
ba2bf218 KH |
3284 | /* ownership of the lockres is changing. account for the |
3285 | * mastery reference here since old_master will briefly have | |
3286 | * a reference after the migration completes */ | |
3287 | spin_lock(&res->spinlock); | |
8d400b81 | 3288 | dlm_lockres_set_refmap_bit(dlm, res, old_master); |
ba2bf218 KH |
3289 | spin_unlock(&res->spinlock); |
3290 | ||
6714d8e8 KH |
3291 | mlog(0, "now time to do a migrate request to other nodes\n"); |
3292 | ret = dlm_do_migrate_request(dlm, res, old_master, | |
3293 | dlm->node_num, &iter); | |
3294 | if (ret < 0) { | |
3295 | mlog_errno(ret); | |
3296 | goto leave; | |
3297 | } | |
3298 | ||
3299 | mlog(0, "doing assert master of %.*s to all except the original node\n", | |
3300 | res->lockname.len, res->lockname.name); | |
3301 | /* this call now finishes out the nodemap | |
3302 | * even if one or more nodes die */ | |
ba2bf218 | 3303 | ret = dlm_do_assert_master(dlm, res, iter.node_map, |
6714d8e8 KH |
3304 | DLM_ASSERT_MASTER_FINISH_MIGRATION); |
3305 | if (ret < 0) { | |
3306 | /* no longer need to retry. all living nodes contacted. */ | |
3307 | mlog_errno(ret); | |
3308 | ret = 0; | |
3309 | } | |
3310 | ||
3311 | memset(iter.node_map, 0, sizeof(iter.node_map)); | |
3312 | set_bit(old_master, iter.node_map); | |
3313 | mlog(0, "doing assert master of %.*s back to %u\n", | |
3314 | res->lockname.len, res->lockname.name, old_master); | |
ba2bf218 | 3315 | ret = dlm_do_assert_master(dlm, res, iter.node_map, |
6714d8e8 KH |
3316 | DLM_ASSERT_MASTER_FINISH_MIGRATION); |
3317 | if (ret < 0) { | |
3318 | mlog(0, "assert master to original master failed " | |
3319 | "with %d.\n", ret); | |
3320 | /* the only nonzero status here would be because of | |
3321 | * a dead original node. we're done. */ | |
3322 | ret = 0; | |
3323 | } | |
3324 | ||
3325 | /* all done, set the owner, clear the flag */ | |
3326 | spin_lock(&res->spinlock); | |
3327 | dlm_set_lockres_owner(dlm, res, dlm->node_num); | |
3328 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
3329 | spin_unlock(&res->spinlock); | |
3330 | /* re-dirty it on the new master */ | |
3331 | dlm_kick_thread(dlm, res); | |
3332 | wake_up(&res->wq); | |
3333 | leave: | |
3334 | return ret; | |
3335 | } | |
3336 | ||
3337 | /* | |
3338 | * LOCKRES AST REFCOUNT | |
3339 | * this is integral to migration | |
3340 | */ | |
3341 | ||
3342 | /* for future intent to call an ast, reserve one ahead of time. | |
3343 | * this should be called only after waiting on the lockres | |
3344 | * with dlm_wait_on_lockres, and while still holding the | |
3345 | * spinlock after the call. */ | |
3346 | void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) | |
3347 | { | |
3348 | assert_spin_locked(&res->spinlock); | |
3349 | if (res->state & DLM_LOCK_RES_MIGRATING) { | |
3350 | __dlm_print_one_lock_resource(res); | |
3351 | } | |
3352 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | |
3353 | ||
3354 | atomic_inc(&res->asts_reserved); | |
3355 | } | |
3356 | ||
3357 | /* | |
3358 | * used to drop the reserved ast, either because it went unused, | |
3359 | * or because the ast/bast was actually called. | |
3360 | * | |
3361 | * also, if there is a pending migration on this lockres, | |
3362 | * and this was the last pending ast on the lockres, | |
3363 | * atomically set the MIGRATING flag before we drop the lock. | |
3364 | * this is how we ensure that migration can proceed with no | |
3365 | * asts in progress. note that it is ok if the state of the | |
3366 | * queues is such that a lock should be granted in the future | |
3367 | * or that a bast should be fired, because the new master will | |
3368 | * shuffle the lists on this lockres as soon as it is migrated. | |
3369 | */ | |
3370 | void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | |
3371 | struct dlm_lock_resource *res) | |
3372 | { | |
3373 | if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) | |
3374 | return; | |
3375 | ||
3376 | if (!res->migration_pending) { | |
3377 | spin_unlock(&res->spinlock); | |
3378 | return; | |
3379 | } | |
3380 | ||
3381 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | |
3382 | res->migration_pending = 0; | |
3383 | res->state |= DLM_LOCK_RES_MIGRATING; | |
3384 | spin_unlock(&res->spinlock); | |
3385 | wake_up(&res->wq); | |
3386 | wake_up(&dlm->migration_wq); | |
3387 | } | |
5dad6c39 SE |
3388 | |
3389 | void dlm_force_free_mles(struct dlm_ctxt *dlm) | |
3390 | { | |
3391 | int i; | |
3392 | struct hlist_head *bucket; | |
3393 | struct dlm_master_list_entry *mle; | |
3394 | struct hlist_node *tmp, *list; | |
3395 | ||
3396 | /* | |
3397 | * We notified all other nodes that we are exiting the domain and | |
3398 | * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still | |
3399 | * around we force free them and wake any processes that are waiting | |
3400 | * on the mles | |
3401 | */ | |
3402 | spin_lock(&dlm->spinlock); | |
3403 | spin_lock(&dlm->master_lock); | |
3404 | ||
3405 | BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); | |
3406 | BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); | |
3407 | ||
3408 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | |
3409 | bucket = dlm_master_hash(dlm, i); | |
3410 | hlist_for_each_safe(list, tmp, bucket) { | |
3411 | mle = hlist_entry(list, struct dlm_master_list_entry, | |
3412 | master_hash_node); | |
3413 | if (mle->type != DLM_MLE_BLOCK) { | |
3414 | mlog(ML_ERROR, "bad mle: %p\n", mle); | |
3415 | dlm_print_one_mle(mle); | |
3416 | } | |
3417 | atomic_set(&mle->woken, 1); | |
3418 | wake_up(&mle->wq); | |
3419 | ||
3420 | __dlm_unlink_mle(dlm, mle); | |
3421 | __dlm_mle_detach_hb_events(dlm, mle); | |
3422 | __dlm_put_mle(mle); | |
3423 | } | |
3424 | } | |
3425 | spin_unlock(&dlm->master_lock); | |
3426 | spin_unlock(&dlm->spinlock); | |
3427 | } |