rhashtable: Change rhashtable_walk_start to return void
[linux-block.git] / fs / gfs2 / glock.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
cf45b752 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
d77d1b58
JP
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
b3b94faa
DT
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
b3b94faa
DT
15#include <linux/buffer_head.h>
16#include <linux/delay.h>
17#include <linux/sort.h>
0515480a 18#include <linux/hash.h>
b3b94faa 19#include <linux/jhash.h>
d0dc80db 20#include <linux/kallsyms.h>
5c676f6d 21#include <linux/gfs2_ondisk.h>
24264434 22#include <linux/list.h>
fee852e3 23#include <linux/wait.h>
95d97b7d 24#include <linux/module.h>
7c0f6ba6 25#include <linux/uaccess.h>
7c52b166
RP
26#include <linux/seq_file.h>
27#include <linux/debugfs.h>
8fbbfd21
SW
28#include <linux/kthread.h>
29#include <linux/freezer.h>
c4f68a13
BM
30#include <linux/workqueue.h>
31#include <linux/jiffies.h>
bc015cb8
SW
32#include <linux/rcupdate.h>
33#include <linux/rculist_bl.h>
34#include <linux/bit_spinlock.h>
a245769f 35#include <linux/percpu.h>
4506a519 36#include <linux/list_sort.h>
e66cf161 37#include <linux/lockref.h>
88ffbf3e 38#include <linux/rhashtable.h>
b3b94faa
DT
39
40#include "gfs2.h"
5c676f6d 41#include "incore.h"
b3b94faa
DT
42#include "glock.h"
43#include "glops.h"
44#include "inode.h"
b3b94faa
DT
45#include "lops.h"
46#include "meta_io.h"
47#include "quota.h"
48#include "super.h"
5c676f6d 49#include "util.h"
813e0c46 50#include "bmap.h"
63997775
SW
51#define CREATE_TRACE_POINTS
52#include "trace_gfs2.h"
b3b94faa 53
6802e340 54struct gfs2_glock_iter {
ba1ddcb6 55 struct gfs2_sbd *sdp; /* incore superblock */
88ffbf3e 56 struct rhashtable_iter hti; /* rhashtable iterator */
ba1ddcb6
SW
57 struct gfs2_glock *gl; /* current glock struct */
58 loff_t last_pos; /* last position */
7c52b166
RP
59};
60
b3b94faa
DT
61typedef void (*glock_examiner) (struct gfs2_glock * gl);
62
6802e340 63static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
c4f68a13 64
7c52b166 65static struct dentry *gfs2_root;
c4f68a13 66static struct workqueue_struct *glock_workqueue;
b94a170e 67struct workqueue_struct *gfs2_delete_workqueue;
97cc1025
SW
68static LIST_HEAD(lru_list);
69static atomic_t lru_count = ATOMIC_INIT(0);
eb8374e7 70static DEFINE_SPINLOCK(lru_lock);
08bc2dbc 71
b6397893 72#define GFS2_GL_HASH_SHIFT 15
47a9a527 73#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
087efdd3 74
d296b15e 75static const struct rhashtable_params ht_parms = {
88ffbf3e 76 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
972b044e 77 .key_len = offsetofend(struct lm_lockname, ln_type),
88ffbf3e
BP
78 .key_offset = offsetof(struct gfs2_glock, gl_name),
79 .head_offset = offsetof(struct gfs2_glock, gl_node),
80};
b3b94faa 81
88ffbf3e 82static struct rhashtable gl_hash_table;
b3b94faa 83
0515480a
AG
84#define GLOCK_WAIT_TABLE_BITS 12
85#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
86static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
87
88struct wait_glock_queue {
89 struct lm_lockname *name;
90 wait_queue_entry_t wait;
91};
92
93static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
94 int sync, void *key)
95{
96 struct wait_glock_queue *wait_glock =
97 container_of(wait, struct wait_glock_queue, wait);
98 struct lm_lockname *wait_name = wait_glock->name;
99 struct lm_lockname *wake_name = key;
100
101 if (wake_name->ln_sbd != wait_name->ln_sbd ||
102 wake_name->ln_number != wait_name->ln_number ||
103 wake_name->ln_type != wait_name->ln_type)
104 return 0;
105 return autoremove_wake_function(wait, mode, sync, key);
106}
107
108static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
109{
110 u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
111
112 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113}
114
0515480a
AG
115/**
116 * wake_up_glock - Wake up waiters on a glock
117 * @gl: the glock
118 */
119static void wake_up_glock(struct gfs2_glock *gl)
120{
121 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
122
123 if (waitqueue_active(wq))
124 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
125}
126
961ae1d8 127static void gfs2_glock_dealloc(struct rcu_head *rcu)
b3b94faa 128{
961ae1d8 129 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
b3b94faa 130
dba2d70c 131 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
bc015cb8 132 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
dba2d70c 133 } else {
4e2f8849 134 kfree(gl->gl_lksb.sb_lvbptr);
bc015cb8 135 kmem_cache_free(gfs2_glock_cachep, gl);
dba2d70c 136 }
961ae1d8
AG
137}
138
139void gfs2_glock_free(struct gfs2_glock *gl)
140{
141 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
142
0515480a
AG
143 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
144 smp_mb();
145 wake_up_glock(gl);
961ae1d8 146 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
bc015cb8
SW
147 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
148 wake_up(&sdp->sd_glock_wait);
b3b94faa
DT
149}
150
151/**
152 * gfs2_glock_hold() - increment reference count on glock
153 * @gl: The glock to hold
154 *
155 */
156
71c1b213 157void gfs2_glock_hold(struct gfs2_glock *gl)
b3b94faa 158{
e66cf161
SW
159 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
160 lockref_get(&gl->gl_lockref);
b3b94faa
DT
161}
162
8ff22a6f
BM
163/**
164 * demote_ok - Check to see if it's ok to unlock a glock
165 * @gl: the glock
166 *
167 * Returns: 1 if it's ok
168 */
169
170static int demote_ok(const struct gfs2_glock *gl)
171{
172 const struct gfs2_glock_operations *glops = gl->gl_ops;
173
174 if (gl->gl_state == LM_ST_UNLOCKED)
175 return 0;
f42ab085 176 if (!list_empty(&gl->gl_holders))
8ff22a6f
BM
177 return 0;
178 if (glops->go_demote_ok)
179 return glops->go_demote_ok(gl);
180 return 1;
181}
182
bc015cb8 183
29687a2a
SW
184void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
185{
186 spin_lock(&lru_lock);
187
188 if (!list_empty(&gl->gl_lru))
189 list_del_init(&gl->gl_lru);
190 else
191 atomic_inc(&lru_count);
192
193 list_add_tail(&gl->gl_lru, &lru_list);
627c10b7 194 set_bit(GLF_LRU, &gl->gl_flags);
29687a2a
SW
195 spin_unlock(&lru_lock);
196}
197
8f6cb409 198static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
f42ab085 199{
645ebd49
BP
200 if (!(gl->gl_ops->go_flags & GLOF_LRU))
201 return;
202
8f6cb409 203 spin_lock(&lru_lock);
f42ab085
SW
204 if (!list_empty(&gl->gl_lru)) {
205 list_del_init(&gl->gl_lru);
206 atomic_dec(&lru_count);
207 clear_bit(GLF_LRU, &gl->gl_flags);
208 }
209 spin_unlock(&lru_lock);
210}
211
6b0c7440
AG
212/*
213 * Enqueue the glock on the work queue. Passes one glock reference on to the
214 * work queue.
b3b94faa 215 */
6b0c7440
AG
216static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
217 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
218 /*
219 * We are holding the lockref spinlock, and the work was still
220 * queued above. The queued work (glock_work_func) takes that
221 * spinlock before dropping its glock reference(s), so it
222 * cannot have dropped them in the meantime.
223 */
224 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
225 gl->gl_lockref.count--;
226 }
227}
b3b94faa 228
6b0c7440
AG
229static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
230 spin_lock(&gl->gl_lockref.lock);
231 __gfs2_glock_queue_work(gl, delay);
232 spin_unlock(&gl->gl_lockref.lock);
233}
234
235static void __gfs2_glock_put(struct gfs2_glock *gl)
b3b94faa 236{
15562c43 237 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
bc015cb8 238 struct address_space *mapping = gfs2_glock2aspace(gl);
b3b94faa 239
e66cf161
SW
240 lockref_mark_dead(&gl->gl_lockref);
241
8f6cb409 242 gfs2_glock_remove_from_lru(gl);
e66cf161 243 spin_unlock(&gl->gl_lockref.lock);
e66cf161
SW
244 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
245 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
246 trace_gfs2_glock_put(gl);
247 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
b3b94faa
DT
248}
249
71c1b213
AG
250/*
251 * Cause the glock to be put in work queue context.
252 */
253void gfs2_glock_queue_put(struct gfs2_glock *gl)
254{
255 gfs2_glock_queue_work(gl, 0);
256}
257
6b0c7440
AG
258/**
259 * gfs2_glock_put() - Decrement reference count on glock
260 * @gl: The glock to put
261 *
262 */
263
264void gfs2_glock_put(struct gfs2_glock *gl)
265{
266 if (lockref_put_or_lock(&gl->gl_lockref))
267 return;
268
269 __gfs2_glock_put(gl);
270}
271
6802e340
SW
272/**
273 * may_grant - check if its ok to grant a new lock
274 * @gl: The glock
275 * @gh: The lock request which we wish to grant
276 *
277 * Returns: true if its ok to grant the lock
278 */
279
280static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
281{
282 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
283 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
284 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
285 return 0;
286 if (gl->gl_state == gh->gh_state)
287 return 1;
288 if (gh->gh_flags & GL_EXACT)
289 return 0;
209806ab
SW
290 if (gl->gl_state == LM_ST_EXCLUSIVE) {
291 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
292 return 1;
293 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
294 return 1;
295 }
6802e340
SW
296 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
297 return 1;
298 return 0;
299}
300
301static void gfs2_holder_wake(struct gfs2_holder *gh)
302{
303 clear_bit(HIF_WAIT, &gh->gh_iflags);
4e857c58 304 smp_mb__after_atomic();
6802e340
SW
305 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
306}
307
d5341a92
SW
308/**
309 * do_error - Something unexpected has happened during a lock request
310 *
311 */
312
a527b38e 313static void do_error(struct gfs2_glock *gl, const int ret)
d5341a92
SW
314{
315 struct gfs2_holder *gh, *tmp;
316
317 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
318 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
319 continue;
320 if (ret & LM_OUT_ERROR)
321 gh->gh_error = -EIO;
322 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
323 gh->gh_error = GLR_TRYFAILED;
324 else
325 continue;
326 list_del_init(&gh->gh_list);
327 trace_gfs2_glock_queue(gh, 0);
328 gfs2_holder_wake(gh);
329 }
330}
331
6802e340
SW
332/**
333 * do_promote - promote as many requests as possible on the current queue
334 * @gl: The glock
335 *
813e0c46
SW
336 * Returns: 1 if there is a blocked holder at the head of the list, or 2
337 * if a type specific operation is underway.
6802e340
SW
338 */
339
340static int do_promote(struct gfs2_glock *gl)
f3dd1649
AG
341__releases(&gl->gl_lockref.lock)
342__acquires(&gl->gl_lockref.lock)
6802e340
SW
343{
344 const struct gfs2_glock_operations *glops = gl->gl_ops;
345 struct gfs2_holder *gh, *tmp;
346 int ret;
347
348restart:
349 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
350 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
351 continue;
352 if (may_grant(gl, gh)) {
353 if (gh->gh_list.prev == &gl->gl_holders &&
354 glops->go_lock) {
f3dd1649 355 spin_unlock(&gl->gl_lockref.lock);
6802e340
SW
356 /* FIXME: eliminate this eventually */
357 ret = glops->go_lock(gh);
f3dd1649 358 spin_lock(&gl->gl_lockref.lock);
6802e340 359 if (ret) {
813e0c46
SW
360 if (ret == 1)
361 return 2;
6802e340
SW
362 gh->gh_error = ret;
363 list_del_init(&gh->gh_list);
63997775 364 trace_gfs2_glock_queue(gh, 0);
6802e340
SW
365 gfs2_holder_wake(gh);
366 goto restart;
367 }
368 set_bit(HIF_HOLDER, &gh->gh_iflags);
63997775 369 trace_gfs2_promote(gh, 1);
6802e340
SW
370 gfs2_holder_wake(gh);
371 goto restart;
372 }
373 set_bit(HIF_HOLDER, &gh->gh_iflags);
63997775 374 trace_gfs2_promote(gh, 0);
6802e340
SW
375 gfs2_holder_wake(gh);
376 continue;
377 }
378 if (gh->gh_list.prev == &gl->gl_holders)
379 return 1;
d5341a92 380 do_error(gl, 0);
6802e340
SW
381 break;
382 }
383 return 0;
384}
385
6802e340
SW
386/**
387 * find_first_waiter - find the first gh that's waiting for the glock
388 * @gl: the glock
389 */
390
391static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
392{
393 struct gfs2_holder *gh;
394
395 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
396 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
397 return gh;
398 }
399 return NULL;
400}
401
402/**
403 * state_change - record that the glock is now in a different state
404 * @gl: the glock
405 * @new_state the new state
406 *
407 */
408
409static void state_change(struct gfs2_glock *gl, unsigned int new_state)
410{
411 int held1, held2;
412
413 held1 = (gl->gl_state != LM_ST_UNLOCKED);
414 held2 = (new_state != LM_ST_UNLOCKED);
415
416 if (held1 != held2) {
e66cf161 417 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
6802e340 418 if (held2)
e66cf161 419 gl->gl_lockref.count++;
6802e340 420 else
e66cf161 421 gl->gl_lockref.count--;
6802e340 422 }
7b5e3d5f
SW
423 if (held1 && held2 && list_empty(&gl->gl_holders))
424 clear_bit(GLF_QUEUED, &gl->gl_flags);
6802e340 425
7cf8dcd3
BP
426 if (new_state != gl->gl_target)
427 /* shorten our minimum hold time */
428 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
429 GL_GLOCK_MIN_HOLD);
6802e340
SW
430 gl->gl_state = new_state;
431 gl->gl_tchange = jiffies;
432}
433
434static void gfs2_demote_wake(struct gfs2_glock *gl)
435{
436 gl->gl_demote_state = LM_ST_EXCLUSIVE;
437 clear_bit(GLF_DEMOTE, &gl->gl_flags);
4e857c58 438 smp_mb__after_atomic();
6802e340
SW
439 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
440}
441
442/**
443 * finish_xmote - The DLM has replied to one of our lock requests
444 * @gl: The glock
445 * @ret: The status from the DLM
446 *
447 */
448
449static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
450{
451 const struct gfs2_glock_operations *glops = gl->gl_ops;
452 struct gfs2_holder *gh;
453 unsigned state = ret & LM_OUT_ST_MASK;
813e0c46 454 int rv;
6802e340 455
f3dd1649 456 spin_lock(&gl->gl_lockref.lock);
63997775 457 trace_gfs2_glock_state_change(gl, state);
6802e340
SW
458 state_change(gl, state);
459 gh = find_first_waiter(gl);
460
461 /* Demote to UN request arrived during demote to SH or DF */
462 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
463 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
464 gl->gl_target = LM_ST_UNLOCKED;
465
466 /* Check for state != intended state */
467 if (unlikely(state != gl->gl_target)) {
468 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
469 /* move to back of queue and try next entry */
470 if (ret & LM_OUT_CANCELED) {
471 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
472 list_move_tail(&gh->gh_list, &gl->gl_holders);
473 gh = find_first_waiter(gl);
474 gl->gl_target = gh->gh_state;
475 goto retry;
476 }
477 /* Some error or failed "try lock" - report it */
478 if ((ret & LM_OUT_ERROR) ||
479 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
480 gl->gl_target = gl->gl_state;
481 do_error(gl, ret);
482 goto out;
483 }
484 }
485 switch(state) {
486 /* Unlocked due to conversion deadlock, try again */
487 case LM_ST_UNLOCKED:
488retry:
489 do_xmote(gl, gh, gl->gl_target);
490 break;
491 /* Conversion fails, unlock and try again */
492 case LM_ST_SHARED:
493 case LM_ST_DEFERRED:
494 do_xmote(gl, gh, LM_ST_UNLOCKED);
495 break;
496 default: /* Everything else */
d77d1b58 497 pr_err("wanted %u got %u\n", gl->gl_target, state);
6802e340
SW
498 GLOCK_BUG_ON(gl, 1);
499 }
f3dd1649 500 spin_unlock(&gl->gl_lockref.lock);
6802e340
SW
501 return;
502 }
503
504 /* Fast path - we got what we asked for */
505 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
506 gfs2_demote_wake(gl);
507 if (state != LM_ST_UNLOCKED) {
508 if (glops->go_xmote_bh) {
f3dd1649 509 spin_unlock(&gl->gl_lockref.lock);
6802e340 510 rv = glops->go_xmote_bh(gl, gh);
f3dd1649 511 spin_lock(&gl->gl_lockref.lock);
6802e340
SW
512 if (rv) {
513 do_error(gl, rv);
514 goto out;
515 }
516 }
813e0c46
SW
517 rv = do_promote(gl);
518 if (rv == 2)
519 goto out_locked;
6802e340
SW
520 }
521out:
522 clear_bit(GLF_LOCK, &gl->gl_flags);
813e0c46 523out_locked:
f3dd1649 524 spin_unlock(&gl->gl_lockref.lock);
6802e340
SW
525}
526
6802e340
SW
527/**
528 * do_xmote - Calls the DLM to change the state of a lock
529 * @gl: The lock state
530 * @gh: The holder (only for promotes)
531 * @target: The target lock state
532 *
533 */
534
535static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
f3dd1649
AG
536__releases(&gl->gl_lockref.lock)
537__acquires(&gl->gl_lockref.lock)
6802e340
SW
538{
539 const struct gfs2_glock_operations *glops = gl->gl_ops;
15562c43 540 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
b58bf407 541 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
6802e340
SW
542 int ret;
543
ed17545d
BP
544 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
545 target != LM_ST_UNLOCKED)
0d1c7ae9 546 return;
6802e340
SW
547 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
548 LM_FLAG_PRIORITY);
921169ca
SW
549 GLOCK_BUG_ON(gl, gl->gl_state == target);
550 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
6802e340
SW
551 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
552 glops->go_inval) {
553 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
554 do_error(gl, 0); /* Fail queued try locks */
555 }
47a25380 556 gl->gl_req = target;
a245769f
SW
557 set_bit(GLF_BLOCKING, &gl->gl_flags);
558 if ((gl->gl_req == LM_ST_UNLOCKED) ||
559 (gl->gl_state == LM_ST_EXCLUSIVE) ||
560 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
561 clear_bit(GLF_BLOCKING, &gl->gl_flags);
f3dd1649 562 spin_unlock(&gl->gl_lockref.lock);
06dfc306
BP
563 if (glops->go_sync)
564 glops->go_sync(gl);
6802e340
SW
565 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
566 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
567 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
568
569 gfs2_glock_hold(gl);
921169ca
SW
570 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
571 /* lock_dlm */
572 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
3e11e530
BM
573 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
574 target == LM_ST_UNLOCKED &&
575 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
576 finish_xmote(gl, target);
6b0c7440 577 gfs2_glock_queue_work(gl, 0);
3e11e530
BM
578 }
579 else if (ret) {
d77d1b58 580 pr_err("lm_lock ret %d\n", ret);
ed17545d
BP
581 GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
582 &sdp->sd_flags));
dba2d70c 583 }
921169ca
SW
584 } else { /* lock_nolock */
585 finish_xmote(gl, target);
6b0c7440 586 gfs2_glock_queue_work(gl, 0);
6802e340 587 }
921169ca 588
f3dd1649 589 spin_lock(&gl->gl_lockref.lock);
6802e340
SW
590}
591
592/**
593 * find_first_holder - find the first "holder" gh
594 * @gl: the glock
595 */
596
597static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
598{
599 struct gfs2_holder *gh;
600
601 if (!list_empty(&gl->gl_holders)) {
602 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
603 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
604 return gh;
605 }
606 return NULL;
607}
608
609/**
610 * run_queue - do all outstanding tasks related to a glock
611 * @gl: The glock in question
612 * @nonblock: True if we must not block in run_queue
613 *
614 */
615
616static void run_queue(struct gfs2_glock *gl, const int nonblock)
f3dd1649
AG
617__releases(&gl->gl_lockref.lock)
618__acquires(&gl->gl_lockref.lock)
6802e340
SW
619{
620 struct gfs2_holder *gh = NULL;
813e0c46 621 int ret;
6802e340
SW
622
623 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
624 return;
625
626 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
627
628 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
629 gl->gl_demote_state != gl->gl_state) {
630 if (find_first_holder(gl))
d8348de0 631 goto out_unlock;
6802e340
SW
632 if (nonblock)
633 goto out_sched;
634 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
265d529c 635 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
6802e340
SW
636 gl->gl_target = gl->gl_demote_state;
637 } else {
638 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
639 gfs2_demote_wake(gl);
813e0c46
SW
640 ret = do_promote(gl);
641 if (ret == 0)
d8348de0 642 goto out_unlock;
813e0c46 643 if (ret == 2)
a228df63 644 goto out;
6802e340
SW
645 gh = find_first_waiter(gl);
646 gl->gl_target = gh->gh_state;
647 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
648 do_error(gl, 0); /* Fail queued try locks */
649 }
650 do_xmote(gl, gh, gl->gl_target);
a228df63 651out:
6802e340
SW
652 return;
653
654out_sched:
7e71c55e 655 clear_bit(GLF_LOCK, &gl->gl_flags);
4e857c58 656 smp_mb__after_atomic();
e66cf161 657 gl->gl_lockref.count++;
6b0c7440 658 __gfs2_glock_queue_work(gl, 0);
7e71c55e
SW
659 return;
660
d8348de0 661out_unlock:
6802e340 662 clear_bit(GLF_LOCK, &gl->gl_flags);
4e857c58 663 smp_mb__after_atomic();
7e71c55e 664 return;
6802e340
SW
665}
666
b94a170e
BM
667static void delete_work_func(struct work_struct *work)
668{
669 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
15562c43 670 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
ec5ec66b 671 struct inode *inode;
044b9414
SW
672 u64 no_addr = gl->gl_name.ln_number;
673
a4923865
BP
674 /* If someone's using this glock to create a new dinode, the block must
675 have been freed by another node, then re-used, in which case our
676 iopen callback is too late after the fact. Ignore it. */
677 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
678 goto out;
679
ec5ec66b 680 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
044b9414
SW
681 if (inode && !IS_ERR(inode)) {
682 d_prune_aliases(inode);
683 iput(inode);
b94a170e 684 }
a4923865 685out:
b94a170e
BM
686 gfs2_glock_put(gl);
687}
688
c4f68a13
BM
689static void glock_work_func(struct work_struct *work)
690{
6802e340 691 unsigned long delay = 0;
c4f68a13 692 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
6b0c7440 693 unsigned int drop_refs = 1;
c4f68a13 694
26bb7505 695 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
6802e340 696 finish_xmote(gl, gl->gl_reply);
6b0c7440 697 drop_refs++;
26bb7505 698 }
f3dd1649 699 spin_lock(&gl->gl_lockref.lock);
f90e5b5b 700 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
265d529c
SW
701 gl->gl_state != LM_ST_UNLOCKED &&
702 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
6802e340 703 unsigned long holdtime, now = jiffies;
f90e5b5b 704
7cf8dcd3 705 holdtime = gl->gl_tchange + gl->gl_hold_time;
6802e340
SW
706 if (time_before(now, holdtime))
707 delay = holdtime - now;
f90e5b5b
BP
708
709 if (!delay) {
710 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
711 set_bit(GLF_DEMOTE, &gl->gl_flags);
712 }
6802e340
SW
713 }
714 run_queue(gl, 0);
6b0c7440
AG
715 if (delay) {
716 /* Keep one glock reference for the work we requeue. */
717 drop_refs--;
7cf8dcd3
BP
718 if (gl->gl_name.ln_type != LM_TYPE_INODE)
719 delay = 0;
6b0c7440 720 __gfs2_glock_queue_work(gl, delay);
7cf8dcd3 721 }
6b0c7440
AG
722
723 /*
724 * Drop the remaining glock references manually here. (Mind that
725 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
726 * here as well.)
727 */
728 gl->gl_lockref.count -= drop_refs;
729 if (!gl->gl_lockref.count) {
730 __gfs2_glock_put(gl);
731 return;
732 }
733 spin_unlock(&gl->gl_lockref.lock);
c4f68a13
BM
734}
735
0515480a
AG
736static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
737 struct gfs2_glock *new)
738{
739 struct wait_glock_queue wait;
a91323e2 740 wait_queue_head_t *wq = glock_waitqueue(name);
0515480a
AG
741 struct gfs2_glock *gl;
742
a91323e2
AG
743 wait.name = name;
744 init_wait(&wait.wait);
745 wait.wait.func = glock_wake_function;
746
0515480a 747again:
a91323e2 748 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
0515480a
AG
749 rcu_read_lock();
750 if (new) {
751 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
752 &new->gl_node, ht_parms);
753 if (IS_ERR(gl))
754 goto out;
755 } else {
756 gl = rhashtable_lookup_fast(&gl_hash_table,
757 name, ht_parms);
758 }
759 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
760 rcu_read_unlock();
761 schedule();
762 goto again;
763 }
764out:
765 rcu_read_unlock();
a91323e2 766 finish_wait(wq, &wait.wait);
0515480a
AG
767 return gl;
768}
769
b3b94faa
DT
770/**
771 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
772 * @sdp: The GFS2 superblock
773 * @number: the lock number
774 * @glops: The glock_operations to use
775 * @create: If 0, don't create the glock if it doesn't exist
776 * @glp: the glock is returned here
777 *
778 * This does not lock a glock, just finds/creates structures for one.
779 *
780 * Returns: errno
781 */
782
cd915493 783int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
8fb4b536 784 const struct gfs2_glock_operations *glops, int create,
b3b94faa
DT
785 struct gfs2_glock **glp)
786{
009d8518 787 struct super_block *s = sdp->sd_vfs;
15562c43
BP
788 struct lm_lockname name = { .ln_number = number,
789 .ln_type = glops->go_type,
790 .ln_sbd = sdp };
0a52aba7 791 struct gfs2_glock *gl, *tmp;
009d8518 792 struct address_space *mapping;
bc015cb8 793 struct kmem_cache *cachep;
0a52aba7 794 int ret = 0;
b3b94faa 795
0515480a
AG
796 gl = find_insert_glock(&name, NULL);
797 if (gl) {
798 *glp = gl;
b3b94faa 799 return 0;
0515480a 800 }
64d576ba
SW
801 if (!create)
802 return -ENOENT;
b3b94faa 803
009d8518 804 if (glops->go_flags & GLOF_ASPACE)
bc015cb8 805 cachep = gfs2_glock_aspace_cachep;
009d8518 806 else
bc015cb8 807 cachep = gfs2_glock_cachep;
fe0bbd29 808 gl = kmem_cache_alloc(cachep, GFP_NOFS);
b3b94faa
DT
809 if (!gl)
810 return -ENOMEM;
811
dba2d70c 812 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
dba2d70c
DT
813
814 if (glops->go_flags & GLOF_LVB) {
fe0bbd29 815 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
4e2f8849 816 if (!gl->gl_lksb.sb_lvbptr) {
dba2d70c
DT
817 kmem_cache_free(cachep, gl);
818 return -ENOMEM;
819 }
dba2d70c
DT
820 }
821
8f05228e 822 atomic_inc(&sdp->sd_glock_disposal);
88ffbf3e 823 gl->gl_node.next = NULL;
ec45d9f5 824 gl->gl_flags = 0;
b3b94faa 825 gl->gl_name = name;
e66cf161 826 gl->gl_lockref.count = 1;
b3b94faa 827 gl->gl_state = LM_ST_UNLOCKED;
6802e340 828 gl->gl_target = LM_ST_UNLOCKED;
c4f68a13 829 gl->gl_demote_state = LM_ST_EXCLUSIVE;
b3b94faa 830 gl->gl_ops = glops;
8b0e1953 831 gl->gl_dstamp = 0;
a245769f
SW
832 preempt_disable();
833 /* We use the global stats to estimate the initial per-glock stats */
834 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
835 preempt_enable();
836 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
837 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
c4f68a13 838 gl->gl_tchange = jiffies;
ec45d9f5 839 gl->gl_object = NULL;
7cf8dcd3 840 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
c4f68a13 841 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
b94a170e 842 INIT_WORK(&gl->gl_delete, delete_work_func);
b3b94faa 843
009d8518
SW
844 mapping = gfs2_glock2aspace(gl);
845 if (mapping) {
846 mapping->a_ops = &gfs2_meta_aops;
847 mapping->host = s->s_bdev->bd_inode;
848 mapping->flags = 0;
849 mapping_set_gfp_mask(mapping, GFP_NOFS);
252aa6f5 850 mapping->private_data = NULL;
009d8518 851 mapping->writeback_index = 0;
b3b94faa
DT
852 }
853
0515480a 854 tmp = find_insert_glock(&name, gl);
0a52aba7 855 if (!tmp) {
88ffbf3e 856 *glp = gl;
0a52aba7 857 goto out;
b3b94faa 858 }
0a52aba7
AG
859 if (IS_ERR(tmp)) {
860 ret = PTR_ERR(tmp);
861 goto out_free;
862 }
0515480a 863 *glp = tmp;
0a52aba7
AG
864
865out_free:
88ffbf3e
BP
866 kfree(gl->gl_lksb.sb_lvbptr);
867 kmem_cache_free(cachep, gl);
868 atomic_dec(&sdp->sd_glock_disposal);
b3b94faa 869
0a52aba7 870out:
88ffbf3e 871 return ret;
b3b94faa
DT
872}
873
874/**
875 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
876 * @gl: the glock
877 * @state: the state we're requesting
878 * @flags: the modifier flags
879 * @gh: the holder structure
880 *
881 */
882
b58bf407 883void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
b3b94faa
DT
884 struct gfs2_holder *gh)
885{
886 INIT_LIST_HEAD(&gh->gh_list);
887 gh->gh_gl = gl;
d29c0afe 888 gh->gh_ip = _RET_IP_;
b1e058da 889 gh->gh_owner_pid = get_pid(task_pid(current));
b3b94faa
DT
890 gh->gh_state = state;
891 gh->gh_flags = flags;
892 gh->gh_error = 0;
893 gh->gh_iflags = 0;
b3b94faa
DT
894 gfs2_glock_hold(gl);
895}
896
897/**
898 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
899 * @state: the state we're requesting
900 * @flags: the modifier flags
901 * @gh: the holder structure
902 *
903 * Don't mess with the glock.
904 *
905 */
906
b58bf407 907void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
b3b94faa
DT
908{
909 gh->gh_state = state;
579b78a4 910 gh->gh_flags = flags;
3b8249f6 911 gh->gh_iflags = 0;
d29c0afe 912 gh->gh_ip = _RET_IP_;
30badc95 913 put_pid(gh->gh_owner_pid);
1a0eae88 914 gh->gh_owner_pid = get_pid(task_pid(current));
b3b94faa
DT
915}
916
917/**
918 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
919 * @gh: the holder structure
920 *
921 */
922
923void gfs2_holder_uninit(struct gfs2_holder *gh)
924{
b1e058da 925 put_pid(gh->gh_owner_pid);
b3b94faa 926 gfs2_glock_put(gh->gh_gl);
6df9f9a2 927 gfs2_holder_mark_uninitialized(gh);
d0dc80db 928 gh->gh_ip = 0;
b3b94faa
DT
929}
930
07a79049
BP
931/**
932 * gfs2_glock_wait - wait on a glock acquisition
933 * @gh: the glock holder
934 *
935 * Returns: 0 on success
936 */
937
938int gfs2_glock_wait(struct gfs2_holder *gh)
da755fdb 939{
7cf8dcd3
BP
940 unsigned long time1 = jiffies;
941
6802e340 942 might_sleep();
74316201 943 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
7cf8dcd3
BP
944 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
945 /* Lengthen the minimum hold time. */
946 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
947 GL_GLOCK_HOLD_INCR,
948 GL_GLOCK_MAX_HOLD);
07a79049 949 return gh->gh_error;
da755fdb
SW
950}
951
b3b94faa 952/**
6802e340
SW
953 * handle_callback - process a demote request
954 * @gl: the glock
955 * @state: the state the caller wants us to change to
b3b94faa 956 *
6802e340
SW
957 * There are only two requests that we are going to see in actual
958 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
b3b94faa
DT
959 */
960
6802e340 961static void handle_callback(struct gfs2_glock *gl, unsigned int state,
81ffbf65 962 unsigned long delay, bool remote)
b3b94faa 963{
6802e340 964 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
b3b94faa 965
6802e340
SW
966 set_bit(bit, &gl->gl_flags);
967 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
968 gl->gl_demote_state = state;
969 gl->gl_demote_time = jiffies;
6802e340
SW
970 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
971 gl->gl_demote_state != state) {
972 gl->gl_demote_state = LM_ST_UNLOCKED;
b3b94faa 973 }
b94a170e 974 if (gl->gl_ops->go_callback)
81ffbf65 975 gl->gl_ops->go_callback(gl, remote);
7bd8b2eb 976 trace_gfs2_demote_rq(gl, remote);
b3b94faa
DT
977}
978
6802e340 979void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
7c52b166 980{
5e69069c 981 struct va_format vaf;
7c52b166
RP
982 va_list args;
983
984 va_start(args, fmt);
5e69069c 985
6802e340 986 if (seq) {
1bb49303 987 seq_vprintf(seq, fmt, args);
6802e340 988 } else {
5e69069c
JP
989 vaf.fmt = fmt;
990 vaf.va = &args;
991
d77d1b58 992 pr_err("%pV", &vaf);
6802e340 993 }
5e69069c 994
7c52b166
RP
995 va_end(args);
996}
997
b3b94faa
DT
998/**
999 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1000 * @gh: the holder structure to add
1001 *
6802e340
SW
1002 * Eventually we should move the recursive locking trap to a
1003 * debugging option or something like that. This is the fast
1004 * path and needs to have the minimum number of distractions.
1005 *
b3b94faa
DT
1006 */
1007
6802e340 1008static inline void add_to_queue(struct gfs2_holder *gh)
f3dd1649
AG
1009__releases(&gl->gl_lockref.lock)
1010__acquires(&gl->gl_lockref.lock)
b3b94faa
DT
1011{
1012 struct gfs2_glock *gl = gh->gh_gl;
15562c43 1013 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
6802e340
SW
1014 struct list_head *insert_pt = NULL;
1015 struct gfs2_holder *gh2;
e5dc76b9 1016 int try_futile = 0;
b3b94faa 1017
b1e058da 1018 BUG_ON(gh->gh_owner_pid == NULL);
fee852e3
SW
1019 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1020 BUG();
190562bd 1021
6802e340
SW
1022 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1023 if (test_bit(GLF_LOCK, &gl->gl_flags))
e5dc76b9 1024 try_futile = !may_grant(gl, gh);
6802e340
SW
1025 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1026 goto fail;
1027 }
1028
1029 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1030 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1031 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1032 goto trap_recursive;
e5dc76b9
BP
1033 if (try_futile &&
1034 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
6802e340
SW
1035fail:
1036 gh->gh_error = GLR_TRYFAILED;
1037 gfs2_holder_wake(gh);
1038 return;
b4c20166 1039 }
6802e340
SW
1040 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1041 continue;
1042 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1043 insert_pt = &gh2->gh_list;
1044 }
7b5e3d5f 1045 set_bit(GLF_QUEUED, &gl->gl_flags);
edae38a6 1046 trace_gfs2_glock_queue(gh, 1);
a245769f
SW
1047 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1048 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
6802e340
SW
1049 if (likely(insert_pt == NULL)) {
1050 list_add_tail(&gh->gh_list, &gl->gl_holders);
1051 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1052 goto do_cancel;
1053 return;
1054 }
1055 list_add_tail(&gh->gh_list, insert_pt);
1056do_cancel:
1057 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1058 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
f3dd1649 1059 spin_unlock(&gl->gl_lockref.lock);
048bca22 1060 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
f057f6cd 1061 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
f3dd1649 1062 spin_lock(&gl->gl_lockref.lock);
b3b94faa 1063 }
6802e340 1064 return;
b3b94faa 1065
6802e340 1066trap_recursive:
fc554ed3
FF
1067 pr_err("original: %pSR\n", (void *)gh2->gh_ip);
1068 pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
1069 pr_err("lock type: %d req lock state : %d\n",
6802e340 1070 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
fc554ed3
FF
1071 pr_err("new: %pSR\n", (void *)gh->gh_ip);
1072 pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
1073 pr_err("lock type: %d req lock state : %d\n",
6802e340 1074 gh->gh_gl->gl_name.ln_type, gh->gh_state);
8eae1ca0 1075 gfs2_dump_glock(NULL, gl);
6802e340 1076 BUG();
b3b94faa
DT
1077}
1078
1079/**
1080 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1081 * @gh: the holder structure
1082 *
1083 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1084 *
1085 * Returns: 0, GLR_TRYFAILED, or errno on failure
1086 */
1087
1088int gfs2_glock_nq(struct gfs2_holder *gh)
1089{
1090 struct gfs2_glock *gl = gh->gh_gl;
15562c43 1091 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
b3b94faa
DT
1092 int error = 0;
1093
6802e340 1094 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
b3b94faa 1095 return -EIO;
b3b94faa 1096
f42ab085
SW
1097 if (test_bit(GLF_LRU, &gl->gl_flags))
1098 gfs2_glock_remove_from_lru(gl);
1099
f3dd1649 1100 spin_lock(&gl->gl_lockref.lock);
b3b94faa 1101 add_to_queue(gh);
01b172b7
BP
1102 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1103 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
0809f6ec 1104 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
01b172b7 1105 gl->gl_lockref.count++;
6b0c7440 1106 __gfs2_glock_queue_work(gl, 0);
01b172b7 1107 }
6802e340 1108 run_queue(gl, 1);
f3dd1649 1109 spin_unlock(&gl->gl_lockref.lock);
b3b94faa 1110
6802e340
SW
1111 if (!(gh->gh_flags & GL_ASYNC))
1112 error = gfs2_glock_wait(gh);
b3b94faa 1113
b3b94faa
DT
1114 return error;
1115}
1116
1117/**
1118 * gfs2_glock_poll - poll to see if an async request has been completed
1119 * @gh: the holder
1120 *
1121 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1122 */
1123
1124int gfs2_glock_poll(struct gfs2_holder *gh)
1125{
6802e340 1126 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
b3b94faa
DT
1127}
1128
1129/**
1130 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1131 * @gh: the glock holder
1132 *
1133 */
1134
1135void gfs2_glock_dq(struct gfs2_holder *gh)
1136{
1137 struct gfs2_glock *gl = gh->gh_gl;
8fb4b536 1138 const struct gfs2_glock_operations *glops = gl->gl_ops;
c4f68a13 1139 unsigned delay = 0;
6802e340 1140 int fast_path = 0;
b3b94faa 1141
f3dd1649 1142 spin_lock(&gl->gl_lockref.lock);
b3b94faa 1143 if (gh->gh_flags & GL_NOCACHE)
81ffbf65 1144 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
b3b94faa 1145
b3b94faa 1146 list_del_init(&gh->gh_list);
7508abc4 1147 clear_bit(HIF_HOLDER, &gh->gh_iflags);
6802e340 1148 if (find_first_holder(gl) == NULL) {
3042a2cc 1149 if (glops->go_unlock) {
6802e340 1150 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
f3dd1649 1151 spin_unlock(&gl->gl_lockref.lock);
b3b94faa 1152 glops->go_unlock(gh);
f3dd1649 1153 spin_lock(&gl->gl_lockref.lock);
6802e340 1154 clear_bit(GLF_LOCK, &gl->gl_flags);
3042a2cc 1155 }
6802e340
SW
1156 if (list_empty(&gl->gl_holders) &&
1157 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1158 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1159 fast_path = 1;
b3b94faa 1160 }
e7ccaf5f
BP
1161 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1162 (glops->go_flags & GLOF_LRU))
4abb6ad9
BP
1163 gfs2_glock_add_to_lru(gl);
1164
63997775 1165 trace_gfs2_glock_queue(gh, 0);
6b0c7440
AG
1166 if (unlikely(!fast_path)) {
1167 gl->gl_lockref.count++;
1168 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1169 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1170 gl->gl_name.ln_type == LM_TYPE_INODE)
1171 delay = gl->gl_hold_time;
1172 __gfs2_glock_queue_work(gl, delay);
1173 }
f3dd1649 1174 spin_unlock(&gl->gl_lockref.lock);
b3b94faa
DT
1175}
1176
d93cfa98
AD
1177void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1178{
1179 struct gfs2_glock *gl = gh->gh_gl;
1180 gfs2_glock_dq(gh);
81e1d450 1181 might_sleep();
74316201 1182 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
d93cfa98
AD
1183}
1184
b3b94faa
DT
1185/**
1186 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1187 * @gh: the holder structure
1188 *
1189 */
1190
1191void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1192{
1193 gfs2_glock_dq(gh);
1194 gfs2_holder_uninit(gh);
1195}
1196
1197/**
1198 * gfs2_glock_nq_num - acquire a glock based on lock number
1199 * @sdp: the filesystem
1200 * @number: the lock number
1201 * @glops: the glock operations for the type of glock
1202 * @state: the state to acquire the glock in
25985edc 1203 * @flags: modifier flags for the acquisition
b3b94faa
DT
1204 * @gh: the struct gfs2_holder
1205 *
1206 * Returns: errno
1207 */
1208
cd915493 1209int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
8fb4b536 1210 const struct gfs2_glock_operations *glops,
b58bf407 1211 unsigned int state, u16 flags, struct gfs2_holder *gh)
b3b94faa
DT
1212{
1213 struct gfs2_glock *gl;
1214 int error;
1215
1216 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1217 if (!error) {
1218 error = gfs2_glock_nq_init(gl, state, flags, gh);
1219 gfs2_glock_put(gl);
1220 }
1221
1222 return error;
1223}
1224
1225/**
1226 * glock_compare - Compare two struct gfs2_glock structures for sorting
1227 * @arg_a: the first structure
1228 * @arg_b: the second structure
1229 *
1230 */
1231
1232static int glock_compare(const void *arg_a, const void *arg_b)
1233{
a5e08a9e
SW
1234 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1235 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1236 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1237 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
b3b94faa
DT
1238
1239 if (a->ln_number > b->ln_number)
a5e08a9e
SW
1240 return 1;
1241 if (a->ln_number < b->ln_number)
1242 return -1;
1c0f4872 1243 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
a5e08a9e 1244 return 0;
b3b94faa
DT
1245}
1246
1247/**
1248 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1249 * @num_gh: the number of structures
1250 * @ghs: an array of struct gfs2_holder structures
1251 *
1252 * Returns: 0 on success (all glocks acquired),
1253 * errno on failure (no glocks acquired)
1254 */
1255
1256static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1257 struct gfs2_holder **p)
1258{
1259 unsigned int x;
1260 int error = 0;
1261
1262 for (x = 0; x < num_gh; x++)
1263 p[x] = &ghs[x];
1264
1265 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1266
1267 for (x = 0; x < num_gh; x++) {
1268 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1269
1270 error = gfs2_glock_nq(p[x]);
1271 if (error) {
1272 while (x--)
1273 gfs2_glock_dq(p[x]);
1274 break;
1275 }
1276 }
1277
1278 return error;
1279}
1280
1281/**
1282 * gfs2_glock_nq_m - acquire multiple glocks
1283 * @num_gh: the number of structures
1284 * @ghs: an array of struct gfs2_holder structures
1285 *
b3b94faa
DT
1286 *
1287 * Returns: 0 on success (all glocks acquired),
1288 * errno on failure (no glocks acquired)
1289 */
1290
1291int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1292{
eaf5bd3c
SW
1293 struct gfs2_holder *tmp[4];
1294 struct gfs2_holder **pph = tmp;
b3b94faa
DT
1295 int error = 0;
1296
eaf5bd3c
SW
1297 switch(num_gh) {
1298 case 0:
b3b94faa 1299 return 0;
eaf5bd3c 1300 case 1:
b3b94faa
DT
1301 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1302 return gfs2_glock_nq(ghs);
eaf5bd3c
SW
1303 default:
1304 if (num_gh <= 4)
b3b94faa 1305 break;
eaf5bd3c
SW
1306 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1307 if (!pph)
1308 return -ENOMEM;
b3b94faa
DT
1309 }
1310
eaf5bd3c 1311 error = nq_m_sync(num_gh, ghs, pph);
b3b94faa 1312
eaf5bd3c
SW
1313 if (pph != tmp)
1314 kfree(pph);
b3b94faa
DT
1315
1316 return error;
1317}
1318
1319/**
1320 * gfs2_glock_dq_m - release multiple glocks
1321 * @num_gh: the number of structures
1322 * @ghs: an array of struct gfs2_holder structures
1323 *
1324 */
1325
1326void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1327{
fa1bbdea
BP
1328 while (num_gh--)
1329 gfs2_glock_dq(&ghs[num_gh]);
b3b94faa
DT
1330}
1331
f057f6cd 1332void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
da755fdb 1333{
c4f68a13
BM
1334 unsigned long delay = 0;
1335 unsigned long holdtime;
1336 unsigned long now = jiffies;
b3b94faa 1337
f057f6cd 1338 gfs2_glock_hold(gl);
7cf8dcd3
BP
1339 holdtime = gl->gl_tchange + gl->gl_hold_time;
1340 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1341 gl->gl_name.ln_type == LM_TYPE_INODE) {
7b5e3d5f
SW
1342 if (time_before(now, holdtime))
1343 delay = holdtime - now;
1344 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
7cf8dcd3 1345 delay = gl->gl_hold_time;
7b5e3d5f 1346 }
b3b94faa 1347
f3dd1649 1348 spin_lock(&gl->gl_lockref.lock);
81ffbf65 1349 handle_callback(gl, state, delay, true);
6b0c7440 1350 __gfs2_glock_queue_work(gl, delay);
f3dd1649 1351 spin_unlock(&gl->gl_lockref.lock);
b3b94faa
DT
1352}
1353
0809f6ec
SW
1354/**
1355 * gfs2_should_freeze - Figure out if glock should be frozen
1356 * @gl: The glock in question
1357 *
1358 * Glocks are not frozen if (a) the result of the dlm operation is
1359 * an error, (b) the locking operation was an unlock operation or
1360 * (c) if there is a "noexp" flagged request anywhere in the queue
1361 *
1362 * Returns: 1 if freezing should occur, 0 otherwise
1363 */
1364
1365static int gfs2_should_freeze(const struct gfs2_glock *gl)
1366{
1367 const struct gfs2_holder *gh;
1368
1369 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1370 return 0;
1371 if (gl->gl_target == LM_ST_UNLOCKED)
1372 return 0;
1373
1374 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1375 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1376 continue;
1377 if (LM_FLAG_NOEXP & gh->gh_flags)
1378 return 0;
1379 }
1380
1381 return 1;
1382}
1383
b3b94faa 1384/**
f057f6cd
SW
1385 * gfs2_glock_complete - Callback used by locking
1386 * @gl: Pointer to the glock
1387 * @ret: The return value from the dlm
b3b94faa 1388 *
f3dd1649 1389 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
47a25380 1390 * to use a bitfield shared with other glock state fields.
b3b94faa
DT
1391 */
1392
f057f6cd 1393void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
b3b94faa 1394{
15562c43 1395 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
0809f6ec 1396
f3dd1649 1397 spin_lock(&gl->gl_lockref.lock);
f057f6cd 1398 gl->gl_reply = ret;
0809f6ec 1399
e0c2a9aa 1400 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
0809f6ec 1401 if (gfs2_should_freeze(gl)) {
f057f6cd 1402 set_bit(GLF_FROZEN, &gl->gl_flags);
f3dd1649 1403 spin_unlock(&gl->gl_lockref.lock);
b3b94faa 1404 return;
0809f6ec 1405 }
b3b94faa 1406 }
47a25380 1407
e66cf161 1408 gl->gl_lockref.count++;
f057f6cd 1409 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
6b0c7440 1410 __gfs2_glock_queue_work(gl, 0);
f3dd1649 1411 spin_unlock(&gl->gl_lockref.lock);
b3b94faa
DT
1412}
1413
4506a519
SW
1414static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1415{
1416 struct gfs2_glock *gla, *glb;
1417
1418 gla = list_entry(a, struct gfs2_glock, gl_lru);
1419 glb = list_entry(b, struct gfs2_glock, gl_lru);
1420
1421 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1422 return 1;
1423 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1424 return -1;
1425
1426 return 0;
1427}
1428
1429/**
1430 * gfs2_dispose_glock_lru - Demote a list of glocks
1431 * @list: The list to dispose of
1432 *
1433 * Disposing of glocks may involve disk accesses, so that here we sort
1434 * the glocks by number (i.e. disk location of the inodes) so that if
1435 * there are any such accesses, they'll be sent in order (mostly).
1436 *
1437 * Must be called under the lru_lock, but may drop and retake this
1438 * lock. While the lru_lock is dropped, entries may vanish from the
1439 * list, but no new entries will appear on the list (since it is
1440 * private)
1441 */
1442
1443static void gfs2_dispose_glock_lru(struct list_head *list)
1444__releases(&lru_lock)
1445__acquires(&lru_lock)
1446{
1447 struct gfs2_glock *gl;
1448
1449 list_sort(NULL, list, glock_cmp);
1450
1451 while(!list_empty(list)) {
1452 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1453 list_del_init(&gl->gl_lru);
f3dd1649 1454 if (!spin_trylock(&gl->gl_lockref.lock)) {
94a09a39 1455add_back_to_lru:
e66cf161
SW
1456 list_add(&gl->gl_lru, &lru_list);
1457 atomic_inc(&lru_count);
1458 continue;
1459 }
94a09a39 1460 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
f3dd1649 1461 spin_unlock(&gl->gl_lockref.lock);
94a09a39
SW
1462 goto add_back_to_lru;
1463 }
4506a519 1464 clear_bit(GLF_LRU, &gl->gl_flags);
e66cf161 1465 gl->gl_lockref.count++;
4506a519 1466 if (demote_ok(gl))
81ffbf65 1467 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
4506a519 1468 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
6b0c7440 1469 __gfs2_glock_queue_work(gl, 0);
f3dd1649 1470 spin_unlock(&gl->gl_lockref.lock);
94a09a39 1471 cond_resched_lock(&lru_lock);
4506a519
SW
1472 }
1473}
1474
2a005855
SW
1475/**
1476 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1477 * @nr: The number of entries to scan
1478 *
4506a519
SW
1479 * This function selects the entries on the LRU which are able to
1480 * be demoted, and then kicks off the process by calling
1481 * gfs2_dispose_glock_lru() above.
2a005855 1482 */
b3b94faa 1483
1ab6c499 1484static long gfs2_scan_glock_lru(int nr)
b3b94faa
DT
1485{
1486 struct gfs2_glock *gl;
97cc1025 1487 LIST_HEAD(skipped);
4506a519 1488 LIST_HEAD(dispose);
1ab6c499 1489 long freed = 0;
b3b94faa 1490
97cc1025 1491 spin_lock(&lru_lock);
1ab6c499 1492 while ((nr-- >= 0) && !list_empty(&lru_list)) {
97cc1025 1493 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
97cc1025
SW
1494
1495 /* Test for being demotable */
94a09a39 1496 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
4506a519
SW
1497 list_move(&gl->gl_lru, &dispose);
1498 atomic_dec(&lru_count);
1ab6c499 1499 freed++;
2163b1e6 1500 continue;
97cc1025 1501 }
4506a519
SW
1502
1503 list_move(&gl->gl_lru, &skipped);
b3b94faa 1504 }
97cc1025 1505 list_splice(&skipped, &lru_list);
4506a519
SW
1506 if (!list_empty(&dispose))
1507 gfs2_dispose_glock_lru(&dispose);
97cc1025 1508 spin_unlock(&lru_lock);
1ab6c499
DC
1509
1510 return freed;
2a005855
SW
1511}
1512
1ab6c499
DC
1513static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1514 struct shrink_control *sc)
2a005855 1515{
1ab6c499
DC
1516 if (!(sc->gfp_mask & __GFP_FS))
1517 return SHRINK_STOP;
1518 return gfs2_scan_glock_lru(sc->nr_to_scan);
1519}
2a005855 1520
1ab6c499
DC
1521static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1522 struct shrink_control *sc)
1523{
55f841ce 1524 return vfs_pressure_ratio(atomic_read(&lru_count));
b3b94faa
DT
1525}
1526
97cc1025 1527static struct shrinker glock_shrinker = {
97cc1025 1528 .seeks = DEFAULT_SEEKS,
1ab6c499
DC
1529 .count_objects = gfs2_glock_shrink_count,
1530 .scan_objects = gfs2_glock_shrink_scan,
97cc1025
SW
1531};
1532
b3b94faa
DT
1533/**
1534 * examine_bucket - Call a function for glock in a hash bucket
1535 * @examiner: the function
1536 * @sdp: the filesystem
1537 * @bucket: the bucket
1538 *
98687f42
HX
1539 * Note that the function can be called multiple times on the same
1540 * object. So the user must ensure that the function can cope with
1541 * that.
b3b94faa
DT
1542 */
1543
88ffbf3e 1544static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
b3b94faa 1545{
bc015cb8 1546 struct gfs2_glock *gl;
98687f42
HX
1547 struct rhashtable_iter iter;
1548
1549 rhashtable_walk_enter(&gl_hash_table, &iter);
1550
1551 do {
97a6ec4a 1552 rhashtable_walk_start(&iter);
98687f42
HX
1553
1554 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
27c3b415 1555 if (gl->gl_name.ln_sbd == sdp &&
88ffbf3e
BP
1556 lockref_get_not_dead(&gl->gl_lockref))
1557 examiner(gl);
98687f42
HX
1558
1559 rhashtable_walk_stop(&iter);
1560 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1561
1562 rhashtable_walk_exit(&iter);
bc015cb8
SW
1563}
1564
f057f6cd
SW
1565/**
1566 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1567 * @gl: The glock to thaw
1568 *
f057f6cd
SW
1569 */
1570
1571static void thaw_glock(struct gfs2_glock *gl)
1572{
6b0c7440 1573 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
f057f6cd 1574 gfs2_glock_put(gl);
6b0c7440 1575 return;
7286b31e 1576 }
6b0c7440
AG
1577 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1578 gfs2_glock_queue_work(gl, 0);
f057f6cd
SW
1579}
1580
b3b94faa
DT
1581/**
1582 * clear_glock - look at a glock and see if we can free it from glock cache
1583 * @gl: the glock to look at
1584 *
1585 */
1586
1587static void clear_glock(struct gfs2_glock *gl)
1588{
f42ab085 1589 gfs2_glock_remove_from_lru(gl);
b3b94faa 1590
f3dd1649 1591 spin_lock(&gl->gl_lockref.lock);
c741c455 1592 if (gl->gl_state != LM_ST_UNLOCKED)
81ffbf65 1593 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
6b0c7440 1594 __gfs2_glock_queue_work(gl, 0);
f3dd1649 1595 spin_unlock(&gl->gl_lockref.lock);
b3b94faa
DT
1596}
1597
f057f6cd
SW
1598/**
1599 * gfs2_glock_thaw - Thaw any frozen glocks
1600 * @sdp: The super block
1601 *
1602 */
1603
1604void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1605{
bc015cb8
SW
1606 glock_hash_walk(thaw_glock, sdp);
1607}
f057f6cd 1608
ac3beb6a 1609static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
bc015cb8 1610{
f3dd1649 1611 spin_lock(&gl->gl_lockref.lock);
ac3beb6a 1612 gfs2_dump_glock(seq, gl);
f3dd1649 1613 spin_unlock(&gl->gl_lockref.lock);
bc015cb8
SW
1614}
1615
1616static void dump_glock_func(struct gfs2_glock *gl)
1617{
1618 dump_glock(NULL, gl);
f057f6cd
SW
1619}
1620
b3b94faa
DT
1621/**
1622 * gfs2_gl_hash_clear - Empty out the glock hash table
1623 * @sdp: the filesystem
1624 * @wait: wait until it's all gone
1625 *
1bdad606 1626 * Called when unmounting the filesystem.
b3b94faa
DT
1627 */
1628
fefc03bf 1629void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
b3b94faa 1630{
fb6791d1 1631 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
222cb538 1632 flush_workqueue(glock_workqueue);
bc015cb8 1633 glock_hash_walk(clear_glock, sdp);
8f05228e 1634 flush_workqueue(glock_workqueue);
2aba1b5b
BP
1635 wait_event_timeout(sdp->sd_glock_wait,
1636 atomic_read(&sdp->sd_glock_disposal) == 0,
1637 HZ * 600);
bc015cb8 1638 glock_hash_walk(dump_glock_func, sdp);
b3b94faa
DT
1639}
1640
813e0c46
SW
1641void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1642{
1643 struct gfs2_glock *gl = ip->i_gl;
1644 int ret;
1645
1646 ret = gfs2_truncatei_resume(ip);
15562c43 1647 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
813e0c46 1648
f3dd1649 1649 spin_lock(&gl->gl_lockref.lock);
813e0c46
SW
1650 clear_bit(GLF_LOCK, &gl->gl_flags);
1651 run_queue(gl, 1);
f3dd1649 1652 spin_unlock(&gl->gl_lockref.lock);
813e0c46
SW
1653}
1654
6802e340 1655static const char *state2str(unsigned state)
04b933f2 1656{
6802e340
SW
1657 switch(state) {
1658 case LM_ST_UNLOCKED:
1659 return "UN";
1660 case LM_ST_SHARED:
1661 return "SH";
1662 case LM_ST_DEFERRED:
1663 return "DF";
1664 case LM_ST_EXCLUSIVE:
1665 return "EX";
1666 }
1667 return "??";
1668}
1669
b58bf407 1670static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
6802e340
SW
1671{
1672 char *p = buf;
1673 if (flags & LM_FLAG_TRY)
1674 *p++ = 't';
1675 if (flags & LM_FLAG_TRY_1CB)
1676 *p++ = 'T';
1677 if (flags & LM_FLAG_NOEXP)
1678 *p++ = 'e';
1679 if (flags & LM_FLAG_ANY)
f057f6cd 1680 *p++ = 'A';
6802e340
SW
1681 if (flags & LM_FLAG_PRIORITY)
1682 *p++ = 'p';
1683 if (flags & GL_ASYNC)
1684 *p++ = 'a';
1685 if (flags & GL_EXACT)
1686 *p++ = 'E';
6802e340
SW
1687 if (flags & GL_NOCACHE)
1688 *p++ = 'c';
1689 if (test_bit(HIF_HOLDER, &iflags))
1690 *p++ = 'H';
1691 if (test_bit(HIF_WAIT, &iflags))
1692 *p++ = 'W';
1693 if (test_bit(HIF_FIRST, &iflags))
1694 *p++ = 'F';
1695 *p = 0;
1696 return buf;
04b933f2
RP
1697}
1698
b3b94faa
DT
1699/**
1700 * dump_holder - print information about a glock holder
6802e340 1701 * @seq: the seq_file struct
b3b94faa
DT
1702 * @gh: the glock holder
1703 *
b3b94faa
DT
1704 */
1705
ac3beb6a 1706static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
b3b94faa 1707{
6802e340 1708 struct task_struct *gh_owner = NULL;
6802e340 1709 char flags_buf[32];
b3b94faa 1710
0b3a2c99 1711 rcu_read_lock();
6802e340 1712 if (gh->gh_owner_pid)
b1e058da 1713 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
cc18152e
JP
1714 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1715 state2str(gh->gh_state),
1716 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1717 gh->gh_error,
1718 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1719 gh_owner ? gh_owner->comm : "(ended)",
1720 (void *)gh->gh_ip);
0b3a2c99 1721 rcu_read_unlock();
b3b94faa
DT
1722}
1723
627c10b7 1724static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
6802e340 1725{
627c10b7 1726 const unsigned long *gflags = &gl->gl_flags;
6802e340 1727 char *p = buf;
627c10b7 1728
6802e340
SW
1729 if (test_bit(GLF_LOCK, gflags))
1730 *p++ = 'l';
6802e340
SW
1731 if (test_bit(GLF_DEMOTE, gflags))
1732 *p++ = 'D';
1733 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1734 *p++ = 'd';
1735 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1736 *p++ = 'p';
1737 if (test_bit(GLF_DIRTY, gflags))
1738 *p++ = 'y';
1739 if (test_bit(GLF_LFLUSH, gflags))
1740 *p++ = 'f';
1741 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1742 *p++ = 'i';
1743 if (test_bit(GLF_REPLY_PENDING, gflags))
1744 *p++ = 'r';
f057f6cd 1745 if (test_bit(GLF_INITIAL, gflags))
d8348de0 1746 *p++ = 'I';
f057f6cd
SW
1747 if (test_bit(GLF_FROZEN, gflags))
1748 *p++ = 'F';
7b5e3d5f
SW
1749 if (test_bit(GLF_QUEUED, gflags))
1750 *p++ = 'q';
627c10b7
SW
1751 if (test_bit(GLF_LRU, gflags))
1752 *p++ = 'L';
1753 if (gl->gl_object)
1754 *p++ = 'o';
a245769f
SW
1755 if (test_bit(GLF_BLOCKING, gflags))
1756 *p++ = 'b';
6802e340
SW
1757 *p = 0;
1758 return buf;
b3b94faa
DT
1759}
1760
1761/**
8eae1ca0 1762 * gfs2_dump_glock - print information about a glock
6802e340 1763 * @seq: The seq_file struct
b3b94faa 1764 * @gl: the glock
6802e340
SW
1765 *
1766 * The file format is as follows:
1767 * One line per object, capital letters are used to indicate objects
1768 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1769 * other objects are indented by a single space and follow the glock to
1770 * which they are related. Fields are indicated by lower case letters
1771 * followed by a colon and the field value, except for strings which are in
1772 * [] so that its possible to see if they are composed of spaces for
1773 * example. The field's are n = number (id of the object), f = flags,
1774 * t = type, s = state, r = refcount, e = error, p = pid.
b3b94faa 1775 *
b3b94faa
DT
1776 */
1777
ac3beb6a 1778void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
b3b94faa 1779{
6802e340
SW
1780 const struct gfs2_glock_operations *glops = gl->gl_ops;
1781 unsigned long long dtime;
1782 const struct gfs2_holder *gh;
1783 char gflags_buf[32];
b3b94faa 1784
6802e340
SW
1785 dtime = jiffies - gl->gl_demote_time;
1786 dtime *= 1000000/HZ; /* demote time in uSec */
1787 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1788 dtime = 0;
7cf8dcd3 1789 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
6802e340
SW
1790 state2str(gl->gl_state),
1791 gl->gl_name.ln_type,
1792 (unsigned long long)gl->gl_name.ln_number,
627c10b7 1793 gflags2str(gflags_buf, gl),
6802e340
SW
1794 state2str(gl->gl_target),
1795 state2str(gl->gl_demote_state), dtime,
6802e340 1796 atomic_read(&gl->gl_ail_count),
f42ab085 1797 atomic_read(&gl->gl_revokes),
e66cf161 1798 (int)gl->gl_lockref.count, gl->gl_hold_time);
b3b94faa 1799
ac3beb6a
SW
1800 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1801 dump_holder(seq, gh);
1802
6802e340 1803 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
ac3beb6a 1804 glops->go_dump(seq, gl);
b3b94faa
DT
1805}
1806
a245769f
SW
1807static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1808{
1809 struct gfs2_glock *gl = iter_ptr;
1810
4d207133 1811 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
a245769f
SW
1812 gl->gl_name.ln_type,
1813 (unsigned long long)gl->gl_name.ln_number,
4d207133
BH
1814 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1815 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1816 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1817 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1818 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1819 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1820 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1821 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
a245769f
SW
1822 return 0;
1823}
1824
1825static const char *gfs2_gltype[] = {
1826 "type",
1827 "reserved",
1828 "nondisk",
1829 "inode",
1830 "rgrp",
1831 "meta",
1832 "iopen",
1833 "flock",
1834 "plock",
1835 "quota",
1836 "journal",
1837};
1838
1839static const char *gfs2_stype[] = {
1840 [GFS2_LKS_SRTT] = "srtt",
1841 [GFS2_LKS_SRTTVAR] = "srttvar",
1842 [GFS2_LKS_SRTTB] = "srttb",
1843 [GFS2_LKS_SRTTVARB] = "srttvarb",
1844 [GFS2_LKS_SIRT] = "sirt",
1845 [GFS2_LKS_SIRTVAR] = "sirtvar",
1846 [GFS2_LKS_DCOUNT] = "dlm",
1847 [GFS2_LKS_QCOUNT] = "queue",
1848};
1849
1850#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1851
1852static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1853{
81648d04
AG
1854 struct gfs2_sbd *sdp = seq->private;
1855 loff_t pos = *(loff_t *)iter_ptr;
1856 unsigned index = pos >> 3;
1857 unsigned subindex = pos & 0x07;
a245769f
SW
1858 int i;
1859
1860 if (index == 0 && subindex != 0)
1861 return 0;
6802e340 1862
a245769f
SW
1863 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1864 (index == 0) ? "cpu": gfs2_stype[subindex]);
b3b94faa 1865
a245769f
SW
1866 for_each_possible_cpu(i) {
1867 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
8f7e0a80
AG
1868
1869 if (index == 0)
1870 seq_printf(seq, " %15u", i);
1871 else
1872 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1873 lkstats[index - 1].stats[subindex]);
a245769f
SW
1874 }
1875 seq_putc(seq, '\n');
1876 return 0;
1877}
8fbbfd21 1878
85d1da67
SW
1879int __init gfs2_glock_init(void)
1880{
0515480a 1881 int i, ret;
88ffbf3e
BP
1882
1883 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1884 if (ret < 0)
1885 return ret;
8fbbfd21 1886
d2115778 1887 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
58a69cb4 1888 WQ_HIGHPRI | WQ_FREEZABLE, 0);
88ffbf3e
BP
1889 if (!glock_workqueue) {
1890 rhashtable_destroy(&gl_hash_table);
dfc4616d 1891 return -ENOMEM;
88ffbf3e 1892 }
d2115778 1893 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
58a69cb4 1894 WQ_MEM_RECLAIM | WQ_FREEZABLE,
d2115778 1895 0);
dfc4616d 1896 if (!gfs2_delete_workqueue) {
b94a170e 1897 destroy_workqueue(glock_workqueue);
88ffbf3e 1898 rhashtable_destroy(&gl_hash_table);
dfc4616d 1899 return -ENOMEM;
b94a170e 1900 }
97cc1025 1901
e0d735c1
CY
1902 ret = register_shrinker(&glock_shrinker);
1903 if (ret) {
1904 destroy_workqueue(gfs2_delete_workqueue);
1905 destroy_workqueue(glock_workqueue);
1906 rhashtable_destroy(&gl_hash_table);
1907 return ret;
1908 }
c4f68a13 1909
0515480a
AG
1910 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
1911 init_waitqueue_head(glock_wait_table + i);
1912
85d1da67
SW
1913 return 0;
1914}
1915
8fbbfd21
SW
1916void gfs2_glock_exit(void)
1917{
97cc1025 1918 unregister_shrinker(&glock_shrinker);
88ffbf3e 1919 rhashtable_destroy(&gl_hash_table);
c4f68a13 1920 destroy_workqueue(glock_workqueue);
b94a170e 1921 destroy_workqueue(gfs2_delete_workqueue);
8fbbfd21
SW
1922}
1923
88ffbf3e 1924static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
bc015cb8 1925{
14d37564 1926 while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
88ffbf3e
BP
1927 if (IS_ERR(gi->gl)) {
1928 if (PTR_ERR(gi->gl) == -EAGAIN)
1929 continue;
1930 gi->gl = NULL;
14d37564 1931 return;
bc015cb8 1932 }
14d37564
DC
1933 /* Skip entries for other sb and dead entries */
1934 if (gi->sdp == gi->gl->gl_name.ln_sbd &&
1935 !__lockref_is_dead(&gi->gl->gl_lockref))
1936 return;
1937 }
7c52b166
RP
1938}
1939
6802e340 1940static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
27c3b415 1941 __acquires(RCU)
7c52b166 1942{
6802e340 1943 struct gfs2_glock_iter *gi = seq->private;
7c52b166 1944 loff_t n = *pos;
ba1ddcb6 1945
10201655 1946 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
97a6ec4a 1947 if (rhashtable_walk_start_check(&gi->hti) != 0)
88ffbf3e 1948 return NULL;
7c52b166 1949
6802e340 1950 do {
88ffbf3e
BP
1951 gfs2_glock_iter_next(gi);
1952 } while (gi->gl && n--);
7c52b166 1953
ba1ddcb6 1954 gi->last_pos = *pos;
10201655 1955
6802e340 1956 return gi->gl;
7c52b166
RP
1957}
1958
6802e340 1959static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
7c52b166
RP
1960 loff_t *pos)
1961{
6802e340 1962 struct gfs2_glock_iter *gi = seq->private;
7c52b166
RP
1963
1964 (*pos)++;
ba1ddcb6 1965 gi->last_pos = *pos;
88ffbf3e 1966 gfs2_glock_iter_next(gi);
10201655 1967
6802e340 1968 return gi->gl;
7c52b166
RP
1969}
1970
6802e340 1971static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
27c3b415 1972 __releases(RCU)
7c52b166 1973{
6802e340 1974 struct gfs2_glock_iter *gi = seq->private;
bc015cb8 1975
bc015cb8 1976 gi->gl = NULL;
88ffbf3e 1977 rhashtable_walk_stop(&gi->hti);
10201655 1978 rhashtable_walk_exit(&gi->hti);
7c52b166
RP
1979}
1980
6802e340 1981static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
7c52b166 1982{
ac3beb6a
SW
1983 dump_glock(seq, iter_ptr);
1984 return 0;
7c52b166
RP
1985}
1986
a245769f
SW
1987static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1988{
81648d04 1989 preempt_disable();
a245769f
SW
1990 if (*pos >= GFS2_NR_SBSTATS)
1991 return NULL;
81648d04 1992 return pos;
a245769f
SW
1993}
1994
1995static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1996 loff_t *pos)
1997{
a245769f 1998 (*pos)++;
81648d04 1999 if (*pos >= GFS2_NR_SBSTATS)
a245769f 2000 return NULL;
81648d04 2001 return pos;
a245769f
SW
2002}
2003
2004static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2005{
2006 preempt_enable();
2007}
2008
4ef29002 2009static const struct seq_operations gfs2_glock_seq_ops = {
7c52b166
RP
2010 .start = gfs2_glock_seq_start,
2011 .next = gfs2_glock_seq_next,
2012 .stop = gfs2_glock_seq_stop,
2013 .show = gfs2_glock_seq_show,
2014};
2015
a245769f
SW
2016static const struct seq_operations gfs2_glstats_seq_ops = {
2017 .start = gfs2_glock_seq_start,
2018 .next = gfs2_glock_seq_next,
2019 .stop = gfs2_glock_seq_stop,
2020 .show = gfs2_glstats_seq_show,
2021};
2022
2023static const struct seq_operations gfs2_sbstats_seq_ops = {
2024 .start = gfs2_sbstats_seq_start,
2025 .next = gfs2_sbstats_seq_next,
2026 .stop = gfs2_sbstats_seq_stop,
2027 .show = gfs2_sbstats_seq_show,
2028};
2029
0fe2f1e9
SW
2030#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2031
92ecd73a
AG
2032static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2033 const struct seq_operations *ops)
7c52b166 2034{
92ecd73a 2035 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
6802e340
SW
2036 if (ret == 0) {
2037 struct seq_file *seq = file->private_data;
2038 struct gfs2_glock_iter *gi = seq->private;
88ffbf3e 2039
6802e340 2040 gi->sdp = inode->i_private;
0fe2f1e9 2041 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
df5d2f55 2042 if (seq->buf)
0fe2f1e9 2043 seq->size = GFS2_SEQ_GOODSIZE;
88ffbf3e 2044 gi->gl = NULL;
6802e340
SW
2045 }
2046 return ret;
7c52b166
RP
2047}
2048
92ecd73a
AG
2049static int gfs2_glocks_open(struct inode *inode, struct file *file)
2050{
2051 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2052}
2053
88ffbf3e
BP
2054static int gfs2_glocks_release(struct inode *inode, struct file *file)
2055{
2056 struct seq_file *seq = file->private_data;
2057 struct gfs2_glock_iter *gi = seq->private;
2058
2059 gi->gl = NULL;
88ffbf3e
BP
2060 return seq_release_private(inode, file);
2061}
2062
a245769f
SW
2063static int gfs2_glstats_open(struct inode *inode, struct file *file)
2064{
92ecd73a 2065 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
a245769f
SW
2066}
2067
2068static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2069{
81648d04 2070 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
a245769f
SW
2071 if (ret == 0) {
2072 struct seq_file *seq = file->private_data;
81648d04 2073 seq->private = inode->i_private; /* sdp */
a245769f
SW
2074 }
2075 return ret;
2076}
2077
2078static const struct file_operations gfs2_glocks_fops = {
2079 .owner = THIS_MODULE,
2080 .open = gfs2_glocks_open,
2081 .read = seq_read,
2082 .llseek = seq_lseek,
88ffbf3e 2083 .release = gfs2_glocks_release,
a245769f
SW
2084};
2085
2086static const struct file_operations gfs2_glstats_fops = {
7c52b166 2087 .owner = THIS_MODULE,
a245769f
SW
2088 .open = gfs2_glstats_open,
2089 .read = seq_read,
2090 .llseek = seq_lseek,
88ffbf3e 2091 .release = gfs2_glocks_release,
a245769f
SW
2092};
2093
2094static const struct file_operations gfs2_sbstats_fops = {
2095 .owner = THIS_MODULE,
2096 .open = gfs2_sbstats_open,
7c52b166
RP
2097 .read = seq_read,
2098 .llseek = seq_lseek,
81648d04 2099 .release = seq_release,
7c52b166
RP
2100};
2101
2102int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2103{
7b4ddfa7
CS
2104 struct dentry *dent;
2105
2106 dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2107 if (IS_ERR_OR_NULL(dent))
2108 goto fail;
2109 sdp->debugfs_dir = dent;
2110
2111 dent = debugfs_create_file("glocks",
2112 S_IFREG | S_IRUGO,
2113 sdp->debugfs_dir, sdp,
2114 &gfs2_glocks_fops);
2115 if (IS_ERR_OR_NULL(dent))
a245769f 2116 goto fail;
7b4ddfa7 2117 sdp->debugfs_dentry_glocks = dent;
a245769f 2118
7b4ddfa7
CS
2119 dent = debugfs_create_file("glstats",
2120 S_IFREG | S_IRUGO,
2121 sdp->debugfs_dir, sdp,
2122 &gfs2_glstats_fops);
2123 if (IS_ERR_OR_NULL(dent))
a245769f 2124 goto fail;
7b4ddfa7 2125 sdp->debugfs_dentry_glstats = dent;
a245769f 2126
7b4ddfa7
CS
2127 dent = debugfs_create_file("sbstats",
2128 S_IFREG | S_IRUGO,
2129 sdp->debugfs_dir, sdp,
2130 &gfs2_sbstats_fops);
2131 if (IS_ERR_OR_NULL(dent))
a245769f 2132 goto fail;
7b4ddfa7 2133 sdp->debugfs_dentry_sbstats = dent;
7c52b166
RP
2134
2135 return 0;
a245769f
SW
2136fail:
2137 gfs2_delete_debugfs_file(sdp);
7b4ddfa7 2138 return dent ? PTR_ERR(dent) : -ENOMEM;
7c52b166
RP
2139}
2140
2141void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2142{
a245769f 2143 if (sdp->debugfs_dir) {
5f882096
RP
2144 if (sdp->debugfs_dentry_glocks) {
2145 debugfs_remove(sdp->debugfs_dentry_glocks);
2146 sdp->debugfs_dentry_glocks = NULL;
2147 }
a245769f
SW
2148 if (sdp->debugfs_dentry_glstats) {
2149 debugfs_remove(sdp->debugfs_dentry_glstats);
2150 sdp->debugfs_dentry_glstats = NULL;
2151 }
2152 if (sdp->debugfs_dentry_sbstats) {
2153 debugfs_remove(sdp->debugfs_dentry_sbstats);
2154 sdp->debugfs_dentry_sbstats = NULL;
2155 }
5f882096
RP
2156 debugfs_remove(sdp->debugfs_dir);
2157 sdp->debugfs_dir = NULL;
2158 }
7c52b166
RP
2159}
2160
2161int gfs2_register_debugfs(void)
2162{
2163 gfs2_root = debugfs_create_dir("gfs2", NULL);
7b4ddfa7
CS
2164 if (IS_ERR(gfs2_root))
2165 return PTR_ERR(gfs2_root);
7c52b166
RP
2166 return gfs2_root ? 0 : -ENOMEM;
2167}
2168
2169void gfs2_unregister_debugfs(void)
2170{
2171 debugfs_remove(gfs2_root);
5f882096 2172 gfs2_root = NULL;
7c52b166 2173}