GFS2: Add more detail to debugfs glock dumps
[linux-2.6-block.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43
44 struct gfs2_gl_hash_bucket {
45         struct hlist_head hb_list;
46 };
47
48 struct gfs2_glock_iter {
49         int hash;                       /* hash bucket index         */
50         struct gfs2_sbd *sdp;           /* incore superblock         */
51         struct gfs2_glock *gl;          /* current glock struct      */
52         char string[512];               /* scratch space             */
53 };
54
55 typedef void (*glock_examiner) (struct gfs2_glock * gl);
56
57 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
58 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61
62 static DECLARE_RWSEM(gfs2_umount_flush_sem);
63 static struct dentry *gfs2_root;
64 static struct task_struct *scand_process;
65 static unsigned int scand_secs = 5;
66 static struct workqueue_struct *glock_workqueue;
67
68 #define GFS2_GL_HASH_SHIFT      15
69 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
70 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
71
72 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
73 static struct dentry *gfs2_root;
74
75 /*
76  * Despite what you might think, the numbers below are not arbitrary :-)
77  * They are taken from the ipv4 routing hash code, which is well tested
78  * and thus should be nearly optimal. Later on we might tweek the numbers
79  * but for now this should be fine.
80  *
81  * The reason for putting the locks in a separate array from the list heads
82  * is that we can have fewer locks than list heads and save memory. We use
83  * the same hash function for both, but with a different hash mask.
84  */
85 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
86         defined(CONFIG_PROVE_LOCKING)
87
88 #ifdef CONFIG_LOCKDEP
89 # define GL_HASH_LOCK_SZ        256
90 #else
91 # if NR_CPUS >= 32
92 #  define GL_HASH_LOCK_SZ       4096
93 # elif NR_CPUS >= 16
94 #  define GL_HASH_LOCK_SZ       2048
95 # elif NR_CPUS >= 8
96 #  define GL_HASH_LOCK_SZ       1024
97 # elif NR_CPUS >= 4
98 #  define GL_HASH_LOCK_SZ       512
99 # else
100 #  define GL_HASH_LOCK_SZ       256
101 # endif
102 #endif
103
104 /* We never want more locks than chains */
105 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
106 # undef GL_HASH_LOCK_SZ
107 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
108 #endif
109
110 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
111
112 static inline rwlock_t *gl_lock_addr(unsigned int x)
113 {
114         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
115 }
116 #else /* not SMP, so no spinlocks required */
117 static inline rwlock_t *gl_lock_addr(unsigned int x)
118 {
119         return NULL;
120 }
121 #endif
122
123 /**
124  * gl_hash() - Turn glock number into hash bucket number
125  * @lock: The glock number
126  *
127  * Returns: The number of the corresponding hash bucket
128  */
129
130 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
131                             const struct lm_lockname *name)
132 {
133         unsigned int h;
134
135         h = jhash(&name->ln_number, sizeof(u64), 0);
136         h = jhash(&name->ln_type, sizeof(unsigned int), h);
137         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
138         h &= GFS2_GL_HASH_MASK;
139
140         return h;
141 }
142
143 /**
144  * glock_free() - Perform a few checks and then release struct gfs2_glock
145  * @gl: The glock to release
146  *
147  * Also calls lock module to release its internal structure for this glock.
148  *
149  */
150
151 static void glock_free(struct gfs2_glock *gl)
152 {
153         struct gfs2_sbd *sdp = gl->gl_sbd;
154         struct inode *aspace = gl->gl_aspace;
155
156         if (sdp->sd_lockstruct.ls_ops->lm_put_lock)
157                 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
158
159         if (aspace)
160                 gfs2_aspace_put(aspace);
161
162         kmem_cache_free(gfs2_glock_cachep, gl);
163 }
164
165 /**
166  * gfs2_glock_hold() - increment reference count on glock
167  * @gl: The glock to hold
168  *
169  */
170
171 static void gfs2_glock_hold(struct gfs2_glock *gl)
172 {
173         atomic_inc(&gl->gl_ref);
174 }
175
176 /**
177  * gfs2_glock_put() - Decrement reference count on glock
178  * @gl: The glock to put
179  *
180  */
181
182 int gfs2_glock_put(struct gfs2_glock *gl)
183 {
184         int rv = 0;
185
186         write_lock(gl_lock_addr(gl->gl_hash));
187         if (atomic_dec_and_test(&gl->gl_ref)) {
188                 hlist_del(&gl->gl_list);
189                 write_unlock(gl_lock_addr(gl->gl_hash));
190                 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
191                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim));
192                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
193                 glock_free(gl);
194                 rv = 1;
195                 goto out;
196         }
197         write_unlock(gl_lock_addr(gl->gl_hash));
198 out:
199         return rv;
200 }
201
202 /**
203  * search_bucket() - Find struct gfs2_glock by lock number
204  * @bucket: the bucket to search
205  * @name: The lock name
206  *
207  * Returns: NULL, or the struct gfs2_glock with the requested number
208  */
209
210 static struct gfs2_glock *search_bucket(unsigned int hash,
211                                         const struct gfs2_sbd *sdp,
212                                         const struct lm_lockname *name)
213 {
214         struct gfs2_glock *gl;
215         struct hlist_node *h;
216
217         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
218                 if (!lm_name_equal(&gl->gl_name, name))
219                         continue;
220                 if (gl->gl_sbd != sdp)
221                         continue;
222
223                 atomic_inc(&gl->gl_ref);
224
225                 return gl;
226         }
227
228         return NULL;
229 }
230
231 /**
232  * gfs2_glock_find() - Find glock by lock number
233  * @sdp: The GFS2 superblock
234  * @name: The lock name
235  *
236  * Returns: NULL, or the struct gfs2_glock with the requested number
237  */
238
239 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
240                                           const struct lm_lockname *name)
241 {
242         unsigned int hash = gl_hash(sdp, name);
243         struct gfs2_glock *gl;
244
245         read_lock(gl_lock_addr(hash));
246         gl = search_bucket(hash, sdp, name);
247         read_unlock(gl_lock_addr(hash));
248
249         return gl;
250 }
251
252 /**
253  * may_grant - check if its ok to grant a new lock
254  * @gl: The glock
255  * @gh: The lock request which we wish to grant
256  *
257  * Returns: true if its ok to grant the lock
258  */
259
260 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
261 {
262         const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
263         if ((gh->gh_state == LM_ST_EXCLUSIVE ||
264              gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
265                 return 0;
266         if (gl->gl_state == gh->gh_state)
267                 return 1;
268         if (gh->gh_flags & GL_EXACT)
269                 return 0;
270         if (gl->gl_state == LM_ST_EXCLUSIVE) {
271                 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
272                         return 1;
273                 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
274                         return 1;
275         }
276         if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
277                 return 1;
278         return 0;
279 }
280
281 static void gfs2_holder_wake(struct gfs2_holder *gh)
282 {
283         clear_bit(HIF_WAIT, &gh->gh_iflags);
284         smp_mb__after_clear_bit();
285         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
286 }
287
288 /**
289  * do_promote - promote as many requests as possible on the current queue
290  * @gl: The glock
291  * 
292  * Returns: true if there is a blocked holder at the head of the list
293  */
294
295 static int do_promote(struct gfs2_glock *gl)
296 __releases(&gl->gl_spin)
297 __acquires(&gl->gl_spin)
298 {
299         const struct gfs2_glock_operations *glops = gl->gl_ops;
300         struct gfs2_holder *gh, *tmp;
301         int ret;
302
303 restart:
304         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
305                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
306                         continue;
307                 if (may_grant(gl, gh)) {
308                         if (gh->gh_list.prev == &gl->gl_holders &&
309                             glops->go_lock) {
310                                 spin_unlock(&gl->gl_spin);
311                                 /* FIXME: eliminate this eventually */
312                                 ret = glops->go_lock(gh);
313                                 spin_lock(&gl->gl_spin);
314                                 if (ret) {
315                                         gh->gh_error = ret;
316                                         list_del_init(&gh->gh_list);
317                                         gfs2_holder_wake(gh);
318                                         goto restart;
319                                 }
320                                 set_bit(HIF_HOLDER, &gh->gh_iflags);
321                                 gfs2_holder_wake(gh);
322                                 goto restart;
323                         }
324                         set_bit(HIF_HOLDER, &gh->gh_iflags);
325                         gfs2_holder_wake(gh);
326                         continue;
327                 }
328                 if (gh->gh_list.prev == &gl->gl_holders)
329                         return 1;
330                 break;
331         }
332         return 0;
333 }
334
335 /**
336  * do_error - Something unexpected has happened during a lock request
337  *
338  */
339
340 static inline void do_error(struct gfs2_glock *gl, const int ret)
341 {
342         struct gfs2_holder *gh, *tmp;
343
344         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
345                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
346                         continue;
347                 if (ret & LM_OUT_ERROR)
348                         gh->gh_error = -EIO;
349                 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
350                         gh->gh_error = GLR_TRYFAILED;
351                 else
352                         continue;
353                 list_del_init(&gh->gh_list);
354                 gfs2_holder_wake(gh);
355         }
356 }
357
358 /**
359  * find_first_waiter - find the first gh that's waiting for the glock
360  * @gl: the glock
361  */
362
363 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
364 {
365         struct gfs2_holder *gh;
366
367         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
368                 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
369                         return gh;
370         }
371         return NULL;
372 }
373
374 /**
375  * state_change - record that the glock is now in a different state
376  * @gl: the glock
377  * @new_state the new state
378  *
379  */
380
381 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
382 {
383         int held1, held2;
384
385         held1 = (gl->gl_state != LM_ST_UNLOCKED);
386         held2 = (new_state != LM_ST_UNLOCKED);
387
388         if (held1 != held2) {
389                 if (held2)
390                         gfs2_glock_hold(gl);
391                 else
392                         gfs2_glock_put(gl);
393         }
394
395         gl->gl_state = new_state;
396         gl->gl_tchange = jiffies;
397 }
398
399 static void gfs2_demote_wake(struct gfs2_glock *gl)
400 {
401         gl->gl_demote_state = LM_ST_EXCLUSIVE;
402         clear_bit(GLF_DEMOTE, &gl->gl_flags);
403         smp_mb__after_clear_bit();
404         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
405 }
406
407 /**
408  * finish_xmote - The DLM has replied to one of our lock requests
409  * @gl: The glock
410  * @ret: The status from the DLM
411  *
412  */
413
414 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
415 {
416         const struct gfs2_glock_operations *glops = gl->gl_ops;
417         struct gfs2_holder *gh;
418         unsigned state = ret & LM_OUT_ST_MASK;
419
420         spin_lock(&gl->gl_spin);
421         state_change(gl, state);
422         gh = find_first_waiter(gl);
423
424         /* Demote to UN request arrived during demote to SH or DF */
425         if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
426             state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
427                 gl->gl_target = LM_ST_UNLOCKED;
428
429         /* Check for state != intended state */
430         if (unlikely(state != gl->gl_target)) {
431                 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
432                         /* move to back of queue and try next entry */
433                         if (ret & LM_OUT_CANCELED) {
434                                 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
435                                         list_move_tail(&gh->gh_list, &gl->gl_holders);
436                                 gh = find_first_waiter(gl);
437                                 gl->gl_target = gh->gh_state;
438                                 goto retry;
439                         }
440                         /* Some error or failed "try lock" - report it */
441                         if ((ret & LM_OUT_ERROR) ||
442                             (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
443                                 gl->gl_target = gl->gl_state;
444                                 do_error(gl, ret);
445                                 goto out;
446                         }
447                 }
448                 switch(state) {
449                 /* Unlocked due to conversion deadlock, try again */
450                 case LM_ST_UNLOCKED:
451 retry:
452                         do_xmote(gl, gh, gl->gl_target);
453                         break;
454                 /* Conversion fails, unlock and try again */
455                 case LM_ST_SHARED:
456                 case LM_ST_DEFERRED:
457                         do_xmote(gl, gh, LM_ST_UNLOCKED);
458                         break;
459                 default: /* Everything else */
460                         printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
461                         GLOCK_BUG_ON(gl, 1);
462                 }
463                 spin_unlock(&gl->gl_spin);
464                 gfs2_glock_put(gl);
465                 return;
466         }
467
468         /* Fast path - we got what we asked for */
469         if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
470                 gfs2_demote_wake(gl);
471         if (state != LM_ST_UNLOCKED) {
472                 if (glops->go_xmote_bh) {
473                         int rv;
474                         spin_unlock(&gl->gl_spin);
475                         rv = glops->go_xmote_bh(gl, gh);
476                         if (rv == -EAGAIN)
477                                 return;
478                         spin_lock(&gl->gl_spin);
479                         if (rv) {
480                                 do_error(gl, rv);
481                                 goto out;
482                         }
483                 }
484                 do_promote(gl);
485         }
486 out:
487         clear_bit(GLF_LOCK, &gl->gl_flags);
488         spin_unlock(&gl->gl_spin);
489         gfs2_glock_put(gl);
490 }
491
492 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
493                                  unsigned int cur_state, unsigned int req_state,
494                                  unsigned int flags)
495 {
496         int ret = LM_OUT_ERROR;
497
498         if (!sdp->sd_lockstruct.ls_ops->lm_lock)
499                 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
500
501         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
502                 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
503                                                          req_state, flags);
504         return ret;
505 }
506
507 /**
508  * do_xmote - Calls the DLM to change the state of a lock
509  * @gl: The lock state
510  * @gh: The holder (only for promotes)
511  * @target: The target lock state
512  *
513  */
514
515 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
516 __releases(&gl->gl_spin)
517 __acquires(&gl->gl_spin)
518 {
519         const struct gfs2_glock_operations *glops = gl->gl_ops;
520         struct gfs2_sbd *sdp = gl->gl_sbd;
521         unsigned int lck_flags = gh ? gh->gh_flags : 0;
522         int ret;
523
524         lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
525                       LM_FLAG_PRIORITY);
526         BUG_ON(gl->gl_state == target);
527         BUG_ON(gl->gl_state == gl->gl_target);
528         if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
529             glops->go_inval) {
530                 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
531                 do_error(gl, 0); /* Fail queued try locks */
532         }
533         spin_unlock(&gl->gl_spin);
534         if (glops->go_xmote_th)
535                 glops->go_xmote_th(gl);
536         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
537                 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
538         clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
539
540         gfs2_glock_hold(gl);
541         if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
542             gl->gl_state == LM_ST_DEFERRED) &&
543             !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
544                 lck_flags |= LM_FLAG_TRY_1CB;
545         ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
546
547         if (!(ret & LM_OUT_ASYNC)) {
548                 finish_xmote(gl, ret);
549                 gfs2_glock_hold(gl);
550                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
551                         gfs2_glock_put(gl);
552         } else {
553                 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
554         }
555         spin_lock(&gl->gl_spin);
556 }
557
558 /**
559  * find_first_holder - find the first "holder" gh
560  * @gl: the glock
561  */
562
563 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
564 {
565         struct gfs2_holder *gh;
566
567         if (!list_empty(&gl->gl_holders)) {
568                 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
569                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
570                         return gh;
571         }
572         return NULL;
573 }
574
575 /**
576  * run_queue - do all outstanding tasks related to a glock
577  * @gl: The glock in question
578  * @nonblock: True if we must not block in run_queue
579  *
580  */
581
582 static void run_queue(struct gfs2_glock *gl, const int nonblock)
583 __releases(&gl->gl_spin)
584 __acquires(&gl->gl_spin)
585 {
586         struct gfs2_holder *gh = NULL;
587
588         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
589                 return;
590
591         GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
592
593         if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
594             gl->gl_demote_state != gl->gl_state) {
595                 if (find_first_holder(gl))
596                         goto out;
597                 if (nonblock)
598                         goto out_sched;
599                 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
600                 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
601                 gl->gl_target = gl->gl_demote_state;
602         } else {
603                 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
604                         gfs2_demote_wake(gl);
605                 if (do_promote(gl) == 0)
606                         goto out;
607                 gh = find_first_waiter(gl);
608                 gl->gl_target = gh->gh_state;
609                 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
610                         do_error(gl, 0); /* Fail queued try locks */
611         }
612         do_xmote(gl, gh, gl->gl_target);
613         return;
614
615 out_sched:
616         gfs2_glock_hold(gl);
617         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
618                 gfs2_glock_put(gl);
619 out:
620         clear_bit(GLF_LOCK, &gl->gl_flags);
621 }
622
623 static void glock_work_func(struct work_struct *work)
624 {
625         unsigned long delay = 0;
626         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
627
628         if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
629                 finish_xmote(gl, gl->gl_reply);
630         spin_lock(&gl->gl_spin);
631         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
632             gl->gl_state != LM_ST_UNLOCKED &&
633             gl->gl_demote_state != LM_ST_EXCLUSIVE) {
634                 unsigned long holdtime, now = jiffies;
635                 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
636                 if (time_before(now, holdtime))
637                         delay = holdtime - now;
638                 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
639         }
640         run_queue(gl, 0);
641         spin_unlock(&gl->gl_spin);
642         if (!delay ||
643             queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
644                 gfs2_glock_put(gl);
645 }
646
647 static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
648                      void **lockp)
649 {
650         int error = -EIO;
651         if (!sdp->sd_lockstruct.ls_ops->lm_get_lock)
652                 return 0;
653         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
654                 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
655                                 sdp->sd_lockstruct.ls_lockspace, name, lockp);
656         return error;
657 }
658
659 /**
660  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
661  * @sdp: The GFS2 superblock
662  * @number: the lock number
663  * @glops: The glock_operations to use
664  * @create: If 0, don't create the glock if it doesn't exist
665  * @glp: the glock is returned here
666  *
667  * This does not lock a glock, just finds/creates structures for one.
668  *
669  * Returns: errno
670  */
671
672 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
673                    const struct gfs2_glock_operations *glops, int create,
674                    struct gfs2_glock **glp)
675 {
676         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
677         struct gfs2_glock *gl, *tmp;
678         unsigned int hash = gl_hash(sdp, &name);
679         int error;
680
681         read_lock(gl_lock_addr(hash));
682         gl = search_bucket(hash, sdp, &name);
683         read_unlock(gl_lock_addr(hash));
684
685         if (gl || !create) {
686                 *glp = gl;
687                 return 0;
688         }
689
690         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
691         if (!gl)
692                 return -ENOMEM;
693
694         gl->gl_flags = 0;
695         gl->gl_name = name;
696         atomic_set(&gl->gl_ref, 1);
697         gl->gl_state = LM_ST_UNLOCKED;
698         gl->gl_target = LM_ST_UNLOCKED;
699         gl->gl_demote_state = LM_ST_EXCLUSIVE;
700         gl->gl_hash = hash;
701         gl->gl_ops = glops;
702         gl->gl_stamp = jiffies;
703         gl->gl_tchange = jiffies;
704         gl->gl_object = NULL;
705         gl->gl_sbd = sdp;
706         gl->gl_aspace = NULL;
707         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
708
709         /* If this glock protects actual on-disk data or metadata blocks,
710            create a VFS inode to manage the pages/buffers holding them. */
711         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
712                 gl->gl_aspace = gfs2_aspace_get(sdp);
713                 if (!gl->gl_aspace) {
714                         error = -ENOMEM;
715                         goto fail;
716                 }
717         }
718
719         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
720         if (error)
721                 goto fail_aspace;
722
723         write_lock(gl_lock_addr(hash));
724         tmp = search_bucket(hash, sdp, &name);
725         if (tmp) {
726                 write_unlock(gl_lock_addr(hash));
727                 glock_free(gl);
728                 gl = tmp;
729         } else {
730                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
731                 write_unlock(gl_lock_addr(hash));
732         }
733
734         *glp = gl;
735
736         return 0;
737
738 fail_aspace:
739         if (gl->gl_aspace)
740                 gfs2_aspace_put(gl->gl_aspace);
741 fail:
742         kmem_cache_free(gfs2_glock_cachep, gl);
743         return error;
744 }
745
746 /**
747  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
748  * @gl: the glock
749  * @state: the state we're requesting
750  * @flags: the modifier flags
751  * @gh: the holder structure
752  *
753  */
754
755 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
756                       struct gfs2_holder *gh)
757 {
758         INIT_LIST_HEAD(&gh->gh_list);
759         gh->gh_gl = gl;
760         gh->gh_ip = (unsigned long)__builtin_return_address(0);
761         gh->gh_owner_pid = get_pid(task_pid(current));
762         gh->gh_state = state;
763         gh->gh_flags = flags;
764         gh->gh_error = 0;
765         gh->gh_iflags = 0;
766         gfs2_glock_hold(gl);
767 }
768
769 /**
770  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
771  * @state: the state we're requesting
772  * @flags: the modifier flags
773  * @gh: the holder structure
774  *
775  * Don't mess with the glock.
776  *
777  */
778
779 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
780 {
781         gh->gh_state = state;
782         gh->gh_flags = flags;
783         gh->gh_iflags = 0;
784         gh->gh_ip = (unsigned long)__builtin_return_address(0);
785 }
786
787 /**
788  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
789  * @gh: the holder structure
790  *
791  */
792
793 void gfs2_holder_uninit(struct gfs2_holder *gh)
794 {
795         put_pid(gh->gh_owner_pid);
796         gfs2_glock_put(gh->gh_gl);
797         gh->gh_gl = NULL;
798         gh->gh_ip = 0;
799 }
800
801 static int just_schedule(void *word)
802 {
803         schedule();
804         return 0;
805 }
806
807 static void wait_on_holder(struct gfs2_holder *gh)
808 {
809         might_sleep();
810         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
811 }
812
813 static void wait_on_demote(struct gfs2_glock *gl)
814 {
815         might_sleep();
816         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
817 }
818
819 /**
820  * handle_callback - process a demote request
821  * @gl: the glock
822  * @state: the state the caller wants us to change to
823  *
824  * There are only two requests that we are going to see in actual
825  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
826  */
827
828 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
829                             int remote, unsigned long delay)
830 {
831         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
832
833         set_bit(bit, &gl->gl_flags);
834         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
835                 gl->gl_demote_state = state;
836                 gl->gl_demote_time = jiffies;
837                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
838                     gl->gl_object)
839                         gfs2_glock_schedule_for_reclaim(gl);
840         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
841                         gl->gl_demote_state != state) {
842                 gl->gl_demote_state = LM_ST_UNLOCKED;
843         }
844 }
845
846 /**
847  * gfs2_glock_wait - wait on a glock acquisition
848  * @gh: the glock holder
849  *
850  * Returns: 0 on success
851  */
852
853 int gfs2_glock_wait(struct gfs2_holder *gh)
854 {
855         wait_on_holder(gh);
856         return gh->gh_error;
857 }
858
859 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
860 {
861         va_list args;
862
863         va_start(args, fmt);
864         if (seq) {
865                 struct gfs2_glock_iter *gi = seq->private;
866                 vsprintf(gi->string, fmt, args);
867                 seq_printf(seq, gi->string);
868         } else {
869                 printk(KERN_ERR " ");
870                 vprintk(fmt, args);
871         }
872         va_end(args);
873 }
874
875 /**
876  * add_to_queue - Add a holder to the wait queue (but look for recursion)
877  * @gh: the holder structure to add
878  *
879  * Eventually we should move the recursive locking trap to a
880  * debugging option or something like that. This is the fast
881  * path and needs to have the minimum number of distractions.
882  * 
883  */
884
885 static inline void add_to_queue(struct gfs2_holder *gh)
886 __releases(&gl->gl_spin)
887 __acquires(&gl->gl_spin)
888 {
889         struct gfs2_glock *gl = gh->gh_gl;
890         struct gfs2_sbd *sdp = gl->gl_sbd;
891         struct list_head *insert_pt = NULL;
892         struct gfs2_holder *gh2;
893         int try_lock = 0;
894
895         BUG_ON(gh->gh_owner_pid == NULL);
896         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
897                 BUG();
898
899         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
900                 if (test_bit(GLF_LOCK, &gl->gl_flags))
901                         try_lock = 1;
902                 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
903                         goto fail;
904         }
905
906         list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
907                 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
908                     (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
909                         goto trap_recursive;
910                 if (try_lock &&
911                     !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
912                     !may_grant(gl, gh)) {
913 fail:
914                         gh->gh_error = GLR_TRYFAILED;
915                         gfs2_holder_wake(gh);
916                         return;
917                 }
918                 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
919                         continue;
920                 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
921                         insert_pt = &gh2->gh_list;
922         }
923         if (likely(insert_pt == NULL)) {
924                 list_add_tail(&gh->gh_list, &gl->gl_holders);
925                 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
926                         goto do_cancel;
927                 return;
928         }
929         list_add_tail(&gh->gh_list, insert_pt);
930 do_cancel:
931         gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
932         if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
933                 spin_unlock(&gl->gl_spin);
934                 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
935                         sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
936                 spin_lock(&gl->gl_spin);
937         }
938         return;
939
940 trap_recursive:
941         print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
942         printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
943         printk(KERN_ERR "lock type: %d req lock state : %d\n",
944                gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
945         print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
946         printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
947         printk(KERN_ERR "lock type: %d req lock state : %d\n",
948                gh->gh_gl->gl_name.ln_type, gh->gh_state);
949         __dump_glock(NULL, gl);
950         BUG();
951 }
952
953 /**
954  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
955  * @gh: the holder structure
956  *
957  * if (gh->gh_flags & GL_ASYNC), this never returns an error
958  *
959  * Returns: 0, GLR_TRYFAILED, or errno on failure
960  */
961
962 int gfs2_glock_nq(struct gfs2_holder *gh)
963 {
964         struct gfs2_glock *gl = gh->gh_gl;
965         struct gfs2_sbd *sdp = gl->gl_sbd;
966         int error = 0;
967
968         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
969                 return -EIO;
970
971         spin_lock(&gl->gl_spin);
972         add_to_queue(gh);
973         run_queue(gl, 1);
974         spin_unlock(&gl->gl_spin);
975
976         if (!(gh->gh_flags & GL_ASYNC))
977                 error = gfs2_glock_wait(gh);
978
979         return error;
980 }
981
982 /**
983  * gfs2_glock_poll - poll to see if an async request has been completed
984  * @gh: the holder
985  *
986  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
987  */
988
989 int gfs2_glock_poll(struct gfs2_holder *gh)
990 {
991         return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
992 }
993
994 /**
995  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
996  * @gh: the glock holder
997  *
998  */
999
1000 void gfs2_glock_dq(struct gfs2_holder *gh)
1001 {
1002         struct gfs2_glock *gl = gh->gh_gl;
1003         const struct gfs2_glock_operations *glops = gl->gl_ops;
1004         unsigned delay = 0;
1005         int fast_path = 0;
1006
1007         spin_lock(&gl->gl_spin);
1008         if (gh->gh_flags & GL_NOCACHE)
1009                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1010
1011         list_del_init(&gh->gh_list);
1012         if (find_first_holder(gl) == NULL) {
1013                 if (glops->go_unlock) {
1014                         GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1015                         spin_unlock(&gl->gl_spin);
1016                         glops->go_unlock(gh);
1017                         spin_lock(&gl->gl_spin);
1018                         clear_bit(GLF_LOCK, &gl->gl_flags);
1019                 }
1020                 gl->gl_stamp = jiffies;
1021                 if (list_empty(&gl->gl_holders) &&
1022                     !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1023                     !test_bit(GLF_DEMOTE, &gl->gl_flags))
1024                         fast_path = 1;
1025         }
1026         spin_unlock(&gl->gl_spin);
1027         if (likely(fast_path))
1028                 return;
1029
1030         gfs2_glock_hold(gl);
1031         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1032             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1033                 delay = gl->gl_ops->go_min_hold_time;
1034         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1035                 gfs2_glock_put(gl);
1036 }
1037
1038 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1039 {
1040         struct gfs2_glock *gl = gh->gh_gl;
1041         gfs2_glock_dq(gh);
1042         wait_on_demote(gl);
1043 }
1044
1045 /**
1046  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1047  * @gh: the holder structure
1048  *
1049  */
1050
1051 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1052 {
1053         gfs2_glock_dq(gh);
1054         gfs2_holder_uninit(gh);
1055 }
1056
1057 /**
1058  * gfs2_glock_nq_num - acquire a glock based on lock number
1059  * @sdp: the filesystem
1060  * @number: the lock number
1061  * @glops: the glock operations for the type of glock
1062  * @state: the state to acquire the glock in
1063  * @flags: modifier flags for the aquisition
1064  * @gh: the struct gfs2_holder
1065  *
1066  * Returns: errno
1067  */
1068
1069 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1070                       const struct gfs2_glock_operations *glops,
1071                       unsigned int state, int flags, struct gfs2_holder *gh)
1072 {
1073         struct gfs2_glock *gl;
1074         int error;
1075
1076         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1077         if (!error) {
1078                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1079                 gfs2_glock_put(gl);
1080         }
1081
1082         return error;
1083 }
1084
1085 /**
1086  * glock_compare - Compare two struct gfs2_glock structures for sorting
1087  * @arg_a: the first structure
1088  * @arg_b: the second structure
1089  *
1090  */
1091
1092 static int glock_compare(const void *arg_a, const void *arg_b)
1093 {
1094         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1095         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1096         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1097         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1098
1099         if (a->ln_number > b->ln_number)
1100                 return 1;
1101         if (a->ln_number < b->ln_number)
1102                 return -1;
1103         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1104         return 0;
1105 }
1106
1107 /**
1108  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1109  * @num_gh: the number of structures
1110  * @ghs: an array of struct gfs2_holder structures
1111  *
1112  * Returns: 0 on success (all glocks acquired),
1113  *          errno on failure (no glocks acquired)
1114  */
1115
1116 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1117                      struct gfs2_holder **p)
1118 {
1119         unsigned int x;
1120         int error = 0;
1121
1122         for (x = 0; x < num_gh; x++)
1123                 p[x] = &ghs[x];
1124
1125         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1126
1127         for (x = 0; x < num_gh; x++) {
1128                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1129
1130                 error = gfs2_glock_nq(p[x]);
1131                 if (error) {
1132                         while (x--)
1133                                 gfs2_glock_dq(p[x]);
1134                         break;
1135                 }
1136         }
1137
1138         return error;
1139 }
1140
1141 /**
1142  * gfs2_glock_nq_m - acquire multiple glocks
1143  * @num_gh: the number of structures
1144  * @ghs: an array of struct gfs2_holder structures
1145  *
1146  *
1147  * Returns: 0 on success (all glocks acquired),
1148  *          errno on failure (no glocks acquired)
1149  */
1150
1151 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1152 {
1153         struct gfs2_holder *tmp[4];
1154         struct gfs2_holder **pph = tmp;
1155         int error = 0;
1156
1157         switch(num_gh) {
1158         case 0:
1159                 return 0;
1160         case 1:
1161                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1162                 return gfs2_glock_nq(ghs);
1163         default:
1164                 if (num_gh <= 4)
1165                         break;
1166                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1167                 if (!pph)
1168                         return -ENOMEM;
1169         }
1170
1171         error = nq_m_sync(num_gh, ghs, pph);
1172
1173         if (pph != tmp)
1174                 kfree(pph);
1175
1176         return error;
1177 }
1178
1179 /**
1180  * gfs2_glock_dq_m - release multiple glocks
1181  * @num_gh: the number of structures
1182  * @ghs: an array of struct gfs2_holder structures
1183  *
1184  */
1185
1186 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1187 {
1188         unsigned int x;
1189
1190         for (x = 0; x < num_gh; x++)
1191                 gfs2_glock_dq(&ghs[x]);
1192 }
1193
1194 /**
1195  * gfs2_glock_dq_uninit_m - release multiple glocks
1196  * @num_gh: the number of structures
1197  * @ghs: an array of struct gfs2_holder structures
1198  *
1199  */
1200
1201 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1202 {
1203         unsigned int x;
1204
1205         for (x = 0; x < num_gh; x++)
1206                 gfs2_glock_dq_uninit(&ghs[x]);
1207 }
1208
1209 static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1210 {
1211         int error = -EIO;
1212         if (!sdp->sd_lockstruct.ls_ops->lm_hold_lvb)
1213                 return 0;
1214         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1215                 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1216         return error;
1217 }
1218
1219 /**
1220  * gfs2_lvb_hold - attach a LVB from a glock
1221  * @gl: The glock in question
1222  *
1223  */
1224
1225 int gfs2_lvb_hold(struct gfs2_glock *gl)
1226 {
1227         int error;
1228
1229         if (!atomic_read(&gl->gl_lvb_count)) {
1230                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1231                 if (error) 
1232                         return error;
1233                 gfs2_glock_hold(gl);
1234         }
1235         atomic_inc(&gl->gl_lvb_count);
1236
1237         return 0;
1238 }
1239
1240 /**
1241  * gfs2_lvb_unhold - detach a LVB from a glock
1242  * @gl: The glock in question
1243  *
1244  */
1245
1246 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1247 {
1248         struct gfs2_sbd *sdp = gl->gl_sbd;
1249
1250         gfs2_glock_hold(gl);
1251         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1252         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1253                 if (sdp->sd_lockstruct.ls_ops->lm_unhold_lvb)
1254                         sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1255                 gl->gl_lvb = NULL;
1256                 gfs2_glock_put(gl);
1257         }
1258         gfs2_glock_put(gl);
1259 }
1260
1261 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1262                         unsigned int state)
1263 {
1264         struct gfs2_glock *gl;
1265         unsigned long delay = 0;
1266         unsigned long holdtime;
1267         unsigned long now = jiffies;
1268
1269         gl = gfs2_glock_find(sdp, name);
1270         if (!gl)
1271                 return;
1272
1273         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1274         if (time_before(now, holdtime))
1275                 delay = holdtime - now;
1276         if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1277                 delay = gl->gl_ops->go_min_hold_time;
1278
1279         spin_lock(&gl->gl_spin);
1280         handle_callback(gl, state, 1, delay);
1281         spin_unlock(&gl->gl_spin);
1282         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1283                 gfs2_glock_put(gl);
1284 }
1285
1286 /**
1287  * gfs2_glock_cb - Callback used by locking module
1288  * @sdp: Pointer to the superblock
1289  * @type: Type of callback
1290  * @data: Type dependent data pointer
1291  *
1292  * Called by the locking module when it wants to tell us something.
1293  * Either we need to drop a lock, one of our ASYNC requests completed, or
1294  * a journal from another client needs to be recovered.
1295  */
1296
1297 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1298 {
1299         struct gfs2_sbd *sdp = cb_data;
1300
1301         switch (type) {
1302         case LM_CB_NEED_E:
1303                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1304                 return;
1305
1306         case LM_CB_NEED_D:
1307                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1308                 return;
1309
1310         case LM_CB_NEED_S:
1311                 blocking_cb(sdp, data, LM_ST_SHARED);
1312                 return;
1313
1314         case LM_CB_ASYNC: {
1315                 struct lm_async_cb *async = data;
1316                 struct gfs2_glock *gl;
1317
1318                 down_read(&gfs2_umount_flush_sem);
1319                 gl = gfs2_glock_find(sdp, &async->lc_name);
1320                 if (gfs2_assert_warn(sdp, gl))
1321                         return;
1322                 gl->gl_reply = async->lc_ret;
1323                 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1324                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1325                         gfs2_glock_put(gl);
1326                 up_read(&gfs2_umount_flush_sem);
1327                 return;
1328         }
1329
1330         case LM_CB_NEED_RECOVERY:
1331                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1332                 if (sdp->sd_recoverd_process)
1333                         wake_up_process(sdp->sd_recoverd_process);
1334                 return;
1335
1336         default:
1337                 gfs2_assert_warn(sdp, 0);
1338                 return;
1339         }
1340 }
1341
1342 /**
1343  * demote_ok - Check to see if it's ok to unlock a glock
1344  * @gl: the glock
1345  *
1346  * Returns: 1 if it's ok
1347  */
1348
1349 static int demote_ok(struct gfs2_glock *gl)
1350 {
1351         const struct gfs2_glock_operations *glops = gl->gl_ops;
1352         int demote = 1;
1353
1354         if (test_bit(GLF_STICKY, &gl->gl_flags))
1355                 demote = 0;
1356         else if (glops->go_demote_ok)
1357                 demote = glops->go_demote_ok(gl);
1358
1359         return demote;
1360 }
1361
1362 /**
1363  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1364  * @gl: the glock
1365  *
1366  */
1367
1368 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1369 {
1370         struct gfs2_sbd *sdp = gl->gl_sbd;
1371
1372         spin_lock(&sdp->sd_reclaim_lock);
1373         if (list_empty(&gl->gl_reclaim)) {
1374                 gfs2_glock_hold(gl);
1375                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1376                 atomic_inc(&sdp->sd_reclaim_count);
1377                 spin_unlock(&sdp->sd_reclaim_lock);
1378                 wake_up(&sdp->sd_reclaim_wq);
1379         } else
1380                 spin_unlock(&sdp->sd_reclaim_lock);
1381 }
1382
1383 /**
1384  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1385  * @sdp: the filesystem
1386  *
1387  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1388  * different glock and we notice that there are a lot of glocks in the
1389  * reclaim list.
1390  *
1391  */
1392
1393 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1394 {
1395         struct gfs2_glock *gl;
1396         int done_callback = 0;
1397
1398         spin_lock(&sdp->sd_reclaim_lock);
1399         if (list_empty(&sdp->sd_reclaim_list)) {
1400                 spin_unlock(&sdp->sd_reclaim_lock);
1401                 return;
1402         }
1403         gl = list_entry(sdp->sd_reclaim_list.next,
1404                         struct gfs2_glock, gl_reclaim);
1405         list_del_init(&gl->gl_reclaim);
1406         spin_unlock(&sdp->sd_reclaim_lock);
1407
1408         atomic_dec(&sdp->sd_reclaim_count);
1409         atomic_inc(&sdp->sd_reclaimed);
1410
1411         spin_lock(&gl->gl_spin);
1412         if (find_first_holder(gl) == NULL &&
1413             gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) {
1414                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1415                 done_callback = 1;
1416         }
1417         spin_unlock(&gl->gl_spin);
1418         if (!done_callback ||
1419             queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1420                 gfs2_glock_put(gl);
1421 }
1422
1423 /**
1424  * examine_bucket - Call a function for glock in a hash bucket
1425  * @examiner: the function
1426  * @sdp: the filesystem
1427  * @bucket: the bucket
1428  *
1429  * Returns: 1 if the bucket has entries
1430  */
1431
1432 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1433                           unsigned int hash)
1434 {
1435         struct gfs2_glock *gl, *prev = NULL;
1436         int has_entries = 0;
1437         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1438
1439         read_lock(gl_lock_addr(hash));
1440         /* Can't use hlist_for_each_entry - don't want prefetch here */
1441         if (hlist_empty(head))
1442                 goto out;
1443         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1444         while(1) {
1445                 if (!sdp || gl->gl_sbd == sdp) {
1446                         gfs2_glock_hold(gl);
1447                         read_unlock(gl_lock_addr(hash));
1448                         if (prev)
1449                                 gfs2_glock_put(prev);
1450                         prev = gl;
1451                         examiner(gl);
1452                         has_entries = 1;
1453                         read_lock(gl_lock_addr(hash));
1454                 }
1455                 if (gl->gl_list.next == NULL)
1456                         break;
1457                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1458         }
1459 out:
1460         read_unlock(gl_lock_addr(hash));
1461         if (prev)
1462                 gfs2_glock_put(prev);
1463         cond_resched();
1464         return has_entries;
1465 }
1466
1467 /**
1468  * scan_glock - look at a glock and see if we can reclaim it
1469  * @gl: the glock to look at
1470  *
1471  */
1472
1473 static void scan_glock(struct gfs2_glock *gl)
1474 {
1475         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1476                 return;
1477         if (test_bit(GLF_LOCK, &gl->gl_flags))
1478                 return;
1479
1480         spin_lock(&gl->gl_spin);
1481         if (find_first_holder(gl) == NULL &&
1482             gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1483                 gfs2_glock_schedule_for_reclaim(gl);
1484         spin_unlock(&gl->gl_spin);
1485 }
1486
1487 /**
1488  * clear_glock - look at a glock and see if we can free it from glock cache
1489  * @gl: the glock to look at
1490  *
1491  */
1492
1493 static void clear_glock(struct gfs2_glock *gl)
1494 {
1495         struct gfs2_sbd *sdp = gl->gl_sbd;
1496         int released;
1497
1498         spin_lock(&sdp->sd_reclaim_lock);
1499         if (!list_empty(&gl->gl_reclaim)) {
1500                 list_del_init(&gl->gl_reclaim);
1501                 atomic_dec(&sdp->sd_reclaim_count);
1502                 spin_unlock(&sdp->sd_reclaim_lock);
1503                 released = gfs2_glock_put(gl);
1504                 gfs2_assert(sdp, !released);
1505         } else {
1506                 spin_unlock(&sdp->sd_reclaim_lock);
1507         }
1508
1509         spin_lock(&gl->gl_spin);
1510         if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1511                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1512         spin_unlock(&gl->gl_spin);
1513         gfs2_glock_hold(gl);
1514         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1515                 gfs2_glock_put(gl);
1516 }
1517
1518 /**
1519  * gfs2_gl_hash_clear - Empty out the glock hash table
1520  * @sdp: the filesystem
1521  * @wait: wait until it's all gone
1522  *
1523  * Called when unmounting the filesystem.
1524  */
1525
1526 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1527 {
1528         unsigned long t;
1529         unsigned int x;
1530         int cont;
1531
1532         t = jiffies;
1533
1534         for (;;) {
1535                 cont = 0;
1536                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1537                         if (examine_bucket(clear_glock, sdp, x))
1538                                 cont = 1;
1539                 }
1540
1541                 if (!cont)
1542                         break;
1543
1544                 if (time_after_eq(jiffies,
1545                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1546                         fs_warn(sdp, "Unmount seems to be stalled. "
1547                                      "Dumping lock state...\n");
1548                         gfs2_dump_lockstate(sdp);
1549                         t = jiffies;
1550                 }
1551
1552                 down_write(&gfs2_umount_flush_sem);
1553                 invalidate_inodes(sdp->sd_vfs);
1554                 up_write(&gfs2_umount_flush_sem);
1555                 msleep(10);
1556         }
1557 }
1558
1559 static const char *state2str(unsigned state)
1560 {
1561         switch(state) {
1562         case LM_ST_UNLOCKED:
1563                 return "UN";
1564         case LM_ST_SHARED:
1565                 return "SH";
1566         case LM_ST_DEFERRED:
1567                 return "DF";
1568         case LM_ST_EXCLUSIVE:
1569                 return "EX";
1570         }
1571         return "??";
1572 }
1573
1574 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1575 {
1576         char *p = buf;
1577         if (flags & LM_FLAG_TRY)
1578                 *p++ = 't';
1579         if (flags & LM_FLAG_TRY_1CB)
1580                 *p++ = 'T';
1581         if (flags & LM_FLAG_NOEXP)
1582                 *p++ = 'e';
1583         if (flags & LM_FLAG_ANY)
1584                 *p++ = 'a';
1585         if (flags & LM_FLAG_PRIORITY)
1586                 *p++ = 'p';
1587         if (flags & GL_ASYNC)
1588                 *p++ = 'a';
1589         if (flags & GL_EXACT)
1590                 *p++ = 'E';
1591         if (flags & GL_NOCACHE)
1592                 *p++ = 'c';
1593         if (test_bit(HIF_HOLDER, &iflags))
1594                 *p++ = 'H';
1595         if (test_bit(HIF_WAIT, &iflags))
1596                 *p++ = 'W';
1597         if (test_bit(HIF_FIRST, &iflags))
1598                 *p++ = 'F';
1599         *p = 0;
1600         return buf;
1601 }
1602
1603 /**
1604  * dump_holder - print information about a glock holder
1605  * @seq: the seq_file struct
1606  * @gh: the glock holder
1607  *
1608  * Returns: 0 on success, -ENOBUFS when we run out of space
1609  */
1610
1611 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1612 {
1613         struct task_struct *gh_owner = NULL;
1614         char buffer[KSYM_SYMBOL_LEN];
1615         char flags_buf[32];
1616
1617         sprint_symbol(buffer, gh->gh_ip);
1618         if (gh->gh_owner_pid)
1619                 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1620         gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1621                   state2str(gh->gh_state),
1622                   hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1623                   gh->gh_error, 
1624                   gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1625                   gh_owner ? gh_owner->comm : "(ended)", buffer);
1626         return 0;
1627 }
1628
1629 static const char *gflags2str(char *buf, const unsigned long *gflags)
1630 {
1631         char *p = buf;
1632         if (test_bit(GLF_LOCK, gflags))
1633                 *p++ = 'l';
1634         if (test_bit(GLF_STICKY, gflags))
1635                 *p++ = 's';
1636         if (test_bit(GLF_DEMOTE, gflags))
1637                 *p++ = 'D';
1638         if (test_bit(GLF_PENDING_DEMOTE, gflags))
1639                 *p++ = 'd';
1640         if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1641                 *p++ = 'p';
1642         if (test_bit(GLF_DIRTY, gflags))
1643                 *p++ = 'y';
1644         if (test_bit(GLF_LFLUSH, gflags))
1645                 *p++ = 'f';
1646         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1647                 *p++ = 'i';
1648         if (test_bit(GLF_REPLY_PENDING, gflags))
1649                 *p++ = 'r';
1650         *p = 0;
1651         return buf;
1652 }
1653
1654 /**
1655  * __dump_glock - print information about a glock
1656  * @seq: The seq_file struct
1657  * @gl: the glock
1658  *
1659  * The file format is as follows:
1660  * One line per object, capital letters are used to indicate objects
1661  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1662  * other objects are indented by a single space and follow the glock to
1663  * which they are related. Fields are indicated by lower case letters
1664  * followed by a colon and the field value, except for strings which are in
1665  * [] so that its possible to see if they are composed of spaces for
1666  * example. The field's are n = number (id of the object), f = flags,
1667  * t = type, s = state, r = refcount, e = error, p = pid.
1668  *
1669  * Returns: 0 on success, -ENOBUFS when we run out of space
1670  */
1671
1672 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1673 {
1674         const struct gfs2_glock_operations *glops = gl->gl_ops;
1675         unsigned long long dtime;
1676         const struct gfs2_holder *gh;
1677         char gflags_buf[32];
1678         int error = 0;
1679
1680         dtime = jiffies - gl->gl_demote_time;
1681         dtime *= 1000000/HZ; /* demote time in uSec */
1682         if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1683                 dtime = 0;
1684         gfs2_print_dbg(seq, "G:  s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
1685                   state2str(gl->gl_state),
1686                   gl->gl_name.ln_type,
1687                   (unsigned long long)gl->gl_name.ln_number,
1688                   gflags2str(gflags_buf, &gl->gl_flags),
1689                   state2str(gl->gl_target),
1690                   state2str(gl->gl_demote_state), dtime,
1691                   atomic_read(&gl->gl_lvb_count),
1692                   atomic_read(&gl->gl_ail_count),
1693                   atomic_read(&gl->gl_ref));
1694
1695         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1696                 error = dump_holder(seq, gh);
1697                 if (error)
1698                         goto out;
1699         }
1700         if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1701                 error = glops->go_dump(seq, gl);
1702 out:
1703         return error;
1704 }
1705
1706 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1707 {
1708         int ret;
1709         spin_lock(&gl->gl_spin);
1710         ret = __dump_glock(seq, gl);
1711         spin_unlock(&gl->gl_spin);
1712         return ret;
1713 }
1714
1715 /**
1716  * gfs2_dump_lockstate - print out the current lockstate
1717  * @sdp: the filesystem
1718  * @ub: the buffer to copy the information into
1719  *
1720  * If @ub is NULL, dump the lockstate to the console.
1721  *
1722  */
1723
1724 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1725 {
1726         struct gfs2_glock *gl;
1727         struct hlist_node *h;
1728         unsigned int x;
1729         int error = 0;
1730
1731         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1732
1733                 read_lock(gl_lock_addr(x));
1734
1735                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1736                         if (gl->gl_sbd != sdp)
1737                                 continue;
1738
1739                         error = dump_glock(NULL, gl);
1740                         if (error)
1741                                 break;
1742                 }
1743
1744                 read_unlock(gl_lock_addr(x));
1745
1746                 if (error)
1747                         break;
1748         }
1749
1750
1751         return error;
1752 }
1753
1754 /**
1755  * gfs2_scand - Look for cached glocks and inodes to toss from memory
1756  * @sdp: Pointer to GFS2 superblock
1757  *
1758  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1759  * See gfs2_glockd()
1760  */
1761
1762 static int gfs2_scand(void *data)
1763 {
1764         unsigned x;
1765         unsigned delay;
1766
1767         while (!kthread_should_stop()) {
1768                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1769                         examine_bucket(scan_glock, NULL, x);
1770                 if (freezing(current))
1771                         refrigerator();
1772                 delay = scand_secs;
1773                 if (delay < 1)
1774                         delay = 1;
1775                 schedule_timeout_interruptible(delay * HZ);
1776         }
1777
1778         return 0;
1779 }
1780
1781
1782
1783 int __init gfs2_glock_init(void)
1784 {
1785         unsigned i;
1786         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1787                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1788         }
1789 #ifdef GL_HASH_LOCK_SZ
1790         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1791                 rwlock_init(&gl_hash_locks[i]);
1792         }
1793 #endif
1794
1795         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
1796         if (IS_ERR(scand_process))
1797                 return PTR_ERR(scand_process);
1798
1799         glock_workqueue = create_workqueue("glock_workqueue");
1800         if (IS_ERR(glock_workqueue)) {
1801                 kthread_stop(scand_process);
1802                 return PTR_ERR(glock_workqueue);
1803         }
1804
1805         return 0;
1806 }
1807
1808 void gfs2_glock_exit(void)
1809 {
1810         destroy_workqueue(glock_workqueue);
1811         kthread_stop(scand_process);
1812 }
1813
1814 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
1815 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
1816
1817 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1818 {
1819         struct gfs2_glock *gl;
1820
1821 restart:
1822         read_lock(gl_lock_addr(gi->hash));
1823         gl = gi->gl;
1824         if (gl) {
1825                 gi->gl = hlist_entry(gl->gl_list.next,
1826                                      struct gfs2_glock, gl_list);
1827         } else {
1828                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1829                                      struct gfs2_glock, gl_list);
1830         }
1831         if (gi->gl)
1832                 gfs2_glock_hold(gi->gl);
1833         read_unlock(gl_lock_addr(gi->hash));
1834         if (gl)
1835                 gfs2_glock_put(gl);
1836         while (gi->gl == NULL) {
1837                 gi->hash++;
1838                 if (gi->hash >= GFS2_GL_HASH_SIZE)
1839                         return 1;
1840                 read_lock(gl_lock_addr(gi->hash));
1841                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1842                                      struct gfs2_glock, gl_list);
1843                 if (gi->gl)
1844                         gfs2_glock_hold(gi->gl);
1845                 read_unlock(gl_lock_addr(gi->hash));
1846         }
1847
1848         if (gi->sdp != gi->gl->gl_sbd)
1849                 goto restart;
1850
1851         return 0;
1852 }
1853
1854 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1855 {
1856         if (gi->gl)
1857                 gfs2_glock_put(gi->gl);
1858         gi->gl = NULL;
1859 }
1860
1861 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1862 {
1863         struct gfs2_glock_iter *gi = seq->private;
1864         loff_t n = *pos;
1865
1866         gi->hash = 0;
1867
1868         do {
1869                 if (gfs2_glock_iter_next(gi)) {
1870                         gfs2_glock_iter_free(gi);
1871                         return NULL;
1872                 }
1873         } while (n--);
1874
1875         return gi->gl;
1876 }
1877
1878 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1879                                  loff_t *pos)
1880 {
1881         struct gfs2_glock_iter *gi = seq->private;
1882
1883         (*pos)++;
1884
1885         if (gfs2_glock_iter_next(gi)) {
1886                 gfs2_glock_iter_free(gi);
1887                 return NULL;
1888         }
1889
1890         return gi->gl;
1891 }
1892
1893 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1894 {
1895         struct gfs2_glock_iter *gi = seq->private;
1896         gfs2_glock_iter_free(gi);
1897 }
1898
1899 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1900 {
1901         return dump_glock(seq, iter_ptr);
1902 }
1903
1904 static const struct seq_operations gfs2_glock_seq_ops = {
1905         .start = gfs2_glock_seq_start,
1906         .next  = gfs2_glock_seq_next,
1907         .stop  = gfs2_glock_seq_stop,
1908         .show  = gfs2_glock_seq_show,
1909 };
1910
1911 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1912 {
1913         int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1914                                    sizeof(struct gfs2_glock_iter));
1915         if (ret == 0) {
1916                 struct seq_file *seq = file->private_data;
1917                 struct gfs2_glock_iter *gi = seq->private;
1918                 gi->sdp = inode->i_private;
1919         }
1920         return ret;
1921 }
1922
1923 static const struct file_operations gfs2_debug_fops = {
1924         .owner   = THIS_MODULE,
1925         .open    = gfs2_debugfs_open,
1926         .read    = seq_read,
1927         .llseek  = seq_lseek,
1928         .release = seq_release_private,
1929 };
1930
1931 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1932 {
1933         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1934         if (!sdp->debugfs_dir)
1935                 return -ENOMEM;
1936         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1937                                                          S_IFREG | S_IRUGO,
1938                                                          sdp->debugfs_dir, sdp,
1939                                                          &gfs2_debug_fops);
1940         if (!sdp->debugfs_dentry_glocks)
1941                 return -ENOMEM;
1942
1943         return 0;
1944 }
1945
1946 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1947 {
1948         if (sdp && sdp->debugfs_dir) {
1949                 if (sdp->debugfs_dentry_glocks) {
1950                         debugfs_remove(sdp->debugfs_dentry_glocks);
1951                         sdp->debugfs_dentry_glocks = NULL;
1952                 }
1953                 debugfs_remove(sdp->debugfs_dir);
1954                 sdp->debugfs_dir = NULL;
1955         }
1956 }
1957
1958 int gfs2_register_debugfs(void)
1959 {
1960         gfs2_root = debugfs_create_dir("gfs2", NULL);
1961         return gfs2_root ? 0 : -ENOMEM;
1962 }
1963
1964 void gfs2_unregister_debugfs(void)
1965 {
1966         debugfs_remove(gfs2_root);
1967         gfs2_root = NULL;
1968 }