Merge tag 'clk-v4.15-exynos-pm' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / fs / gfs2 / glops.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16 #include <linux/security.h>
17
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "bmap.h"
21 #include "glock.h"
22 #include "glops.h"
23 #include "inode.h"
24 #include "log.h"
25 #include "meta_io.h"
26 #include "recovery.h"
27 #include "rgrp.h"
28 #include "util.h"
29 #include "trans.h"
30 #include "dir.h"
31
32 struct workqueue_struct *gfs2_freeze_wq;
33
34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36         fs_err(gl->gl_name.ln_sbd,
37                "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38                "state 0x%lx\n",
39                bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40                bh->b_page->mapping, bh->b_page->flags);
41         fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42                gl->gl_name.ln_type, gl->gl_name.ln_number,
43                gfs2_glock2aspace(gl));
44         gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
45 }
46
47 /**
48  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
49  * @gl: the glock
50  * @fsync: set when called from fsync (not all buffers will be clean)
51  *
52  * None of the buffers should be dirty, locked, or pinned.
53  */
54
55 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
56                              unsigned int nr_revokes)
57 {
58         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
59         struct list_head *head = &gl->gl_ail_list;
60         struct gfs2_bufdata *bd, *tmp;
61         struct buffer_head *bh;
62         const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
63
64         gfs2_log_lock(sdp);
65         spin_lock(&sdp->sd_ail_lock);
66         list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
67                 if (nr_revokes == 0)
68                         break;
69                 bh = bd->bd_bh;
70                 if (bh->b_state & b_state) {
71                         if (fsync)
72                                 continue;
73                         gfs2_ail_error(gl, bh);
74                 }
75                 gfs2_trans_add_revoke(sdp, bd);
76                 nr_revokes--;
77         }
78         GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
79         spin_unlock(&sdp->sd_ail_lock);
80         gfs2_log_unlock(sdp);
81 }
82
83
84 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
85 {
86         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
87         struct gfs2_trans tr;
88
89         memset(&tr, 0, sizeof(tr));
90         INIT_LIST_HEAD(&tr.tr_buf);
91         INIT_LIST_HEAD(&tr.tr_databuf);
92         tr.tr_revokes = atomic_read(&gl->gl_ail_count);
93
94         if (!tr.tr_revokes)
95                 return;
96
97         /* A shortened, inline version of gfs2_trans_begin()
98          * tr->alloced is not set since the transaction structure is
99          * on the stack */
100         tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
101         tr.tr_ip = _RET_IP_;
102         if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
103                 return;
104         WARN_ON_ONCE(current->journal_info);
105         current->journal_info = &tr;
106
107         __gfs2_ail_flush(gl, 0, tr.tr_revokes);
108
109         gfs2_trans_end(sdp);
110         gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
111 }
112
113 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
114 {
115         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
116         unsigned int revokes = atomic_read(&gl->gl_ail_count);
117         unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
118         int ret;
119
120         if (!revokes)
121                 return;
122
123         while (revokes > max_revokes)
124                 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
125
126         ret = gfs2_trans_begin(sdp, 0, max_revokes);
127         if (ret)
128                 return;
129         __gfs2_ail_flush(gl, fsync, max_revokes);
130         gfs2_trans_end(sdp);
131         gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
132 }
133
134 /**
135  * rgrp_go_sync - sync out the metadata for this glock
136  * @gl: the glock
137  *
138  * Called when demoting or unlocking an EX glock.  We must flush
139  * to disk all dirty buffers/pages relating to this glock, and must not
140  * return to caller to demote/unlock the glock until I/O is complete.
141  */
142
143 static void rgrp_go_sync(struct gfs2_glock *gl)
144 {
145         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
146         struct address_space *mapping = &sdp->sd_aspace;
147         struct gfs2_rgrpd *rgd;
148         int error;
149
150         spin_lock(&gl->gl_lockref.lock);
151         rgd = gl->gl_object;
152         if (rgd)
153                 gfs2_rgrp_brelse(rgd);
154         spin_unlock(&gl->gl_lockref.lock);
155
156         if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
157                 return;
158         GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
159
160         gfs2_log_flush(sdp, gl, NORMAL_FLUSH);
161         filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
162         error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
163         mapping_set_error(mapping, error);
164         gfs2_ail_empty_gl(gl);
165
166         spin_lock(&gl->gl_lockref.lock);
167         rgd = gl->gl_object;
168         if (rgd)
169                 gfs2_free_clones(rgd);
170         spin_unlock(&gl->gl_lockref.lock);
171 }
172
173 /**
174  * rgrp_go_inval - invalidate the metadata for this glock
175  * @gl: the glock
176  * @flags:
177  *
178  * We never used LM_ST_DEFERRED with resource groups, so that we
179  * should always see the metadata flag set here.
180  *
181  */
182
183 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
184 {
185         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
186         struct address_space *mapping = &sdp->sd_aspace;
187         struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
188
189         if (rgd)
190                 gfs2_rgrp_brelse(rgd);
191
192         WARN_ON_ONCE(!(flags & DIO_METADATA));
193         gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
194         truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
195
196         if (rgd)
197                 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
198 }
199
200 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
201 {
202         struct gfs2_inode *ip;
203
204         spin_lock(&gl->gl_lockref.lock);
205         ip = gl->gl_object;
206         if (ip)
207                 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
208         spin_unlock(&gl->gl_lockref.lock);
209         return ip;
210 }
211
212 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
213 {
214         struct gfs2_rgrpd *rgd;
215
216         spin_lock(&gl->gl_lockref.lock);
217         rgd = gl->gl_object;
218         spin_unlock(&gl->gl_lockref.lock);
219
220         return rgd;
221 }
222
223 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
224 {
225         if (!ip)
226                 return;
227
228         clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
229         wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
230 }
231
232 /**
233  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
234  * @gl: the glock protecting the inode
235  *
236  */
237
238 static void inode_go_sync(struct gfs2_glock *gl)
239 {
240         struct gfs2_inode *ip = gfs2_glock2inode(gl);
241         int isreg = ip && S_ISREG(ip->i_inode.i_mode);
242         struct address_space *metamapping = gfs2_glock2aspace(gl);
243         int error;
244
245         if (isreg) {
246                 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
247                         unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
248                 inode_dio_wait(&ip->i_inode);
249         }
250         if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
251                 goto out;
252
253         GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
254
255         gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
256         filemap_fdatawrite(metamapping);
257         if (isreg) {
258                 struct address_space *mapping = ip->i_inode.i_mapping;
259                 filemap_fdatawrite(mapping);
260                 error = filemap_fdatawait(mapping);
261                 mapping_set_error(mapping, error);
262         }
263         error = filemap_fdatawait(metamapping);
264         mapping_set_error(metamapping, error);
265         gfs2_ail_empty_gl(gl);
266         /*
267          * Writeback of the data mapping may cause the dirty flag to be set
268          * so we have to clear it again here.
269          */
270         smp_mb__before_atomic();
271         clear_bit(GLF_DIRTY, &gl->gl_flags);
272
273 out:
274         gfs2_clear_glop_pending(ip);
275 }
276
277 /**
278  * inode_go_inval - prepare a inode glock to be released
279  * @gl: the glock
280  * @flags:
281  *
282  * Normally we invalidate everything, but if we are moving into
283  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
284  * can keep hold of the metadata, since it won't have changed.
285  *
286  */
287
288 static void inode_go_inval(struct gfs2_glock *gl, int flags)
289 {
290         struct gfs2_inode *ip = gfs2_glock2inode(gl);
291
292         gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
293
294         if (flags & DIO_METADATA) {
295                 struct address_space *mapping = gfs2_glock2aspace(gl);
296                 truncate_inode_pages(mapping, 0);
297                 if (ip) {
298                         set_bit(GIF_INVALID, &ip->i_flags);
299                         forget_all_cached_acls(&ip->i_inode);
300                         security_inode_invalidate_secctx(&ip->i_inode);
301                         gfs2_dir_hash_inval(ip);
302                 }
303         }
304
305         if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
306                 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH);
307                 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
308         }
309         if (ip && S_ISREG(ip->i_inode.i_mode))
310                 truncate_inode_pages(ip->i_inode.i_mapping, 0);
311
312         gfs2_clear_glop_pending(ip);
313 }
314
315 /**
316  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
317  * @gl: the glock
318  *
319  * Returns: 1 if it's ok
320  */
321
322 static int inode_go_demote_ok(const struct gfs2_glock *gl)
323 {
324         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
325
326         if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
327                 return 0;
328
329         return 1;
330 }
331
332 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
333 {
334         const struct gfs2_dinode *str = buf;
335         struct timespec atime;
336         u16 height, depth;
337
338         if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
339                 goto corrupt;
340         ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
341         ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
342         ip->i_inode.i_rdev = 0;
343         switch (ip->i_inode.i_mode & S_IFMT) {
344         case S_IFBLK:
345         case S_IFCHR:
346                 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
347                                            be32_to_cpu(str->di_minor));
348                 break;
349         };
350
351         i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
352         i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
353         set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
354         i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
355         gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
356         atime.tv_sec = be64_to_cpu(str->di_atime);
357         atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
358         if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
359                 ip->i_inode.i_atime = atime;
360         ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
361         ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
362         ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
363         ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
364
365         ip->i_goal = be64_to_cpu(str->di_goal_meta);
366         ip->i_generation = be64_to_cpu(str->di_generation);
367
368         ip->i_diskflags = be32_to_cpu(str->di_flags);
369         ip->i_eattr = be64_to_cpu(str->di_eattr);
370         /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
371         gfs2_set_inode_flags(&ip->i_inode);
372         height = be16_to_cpu(str->di_height);
373         if (unlikely(height > GFS2_MAX_META_HEIGHT))
374                 goto corrupt;
375         ip->i_height = (u8)height;
376
377         depth = be16_to_cpu(str->di_depth);
378         if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
379                 goto corrupt;
380         ip->i_depth = (u8)depth;
381         ip->i_entries = be32_to_cpu(str->di_entries);
382
383         if (S_ISREG(ip->i_inode.i_mode))
384                 gfs2_set_aops(&ip->i_inode);
385
386         return 0;
387 corrupt:
388         gfs2_consist_inode(ip);
389         return -EIO;
390 }
391
392 /**
393  * gfs2_inode_refresh - Refresh the incore copy of the dinode
394  * @ip: The GFS2 inode
395  *
396  * Returns: errno
397  */
398
399 int gfs2_inode_refresh(struct gfs2_inode *ip)
400 {
401         struct buffer_head *dibh;
402         int error;
403
404         error = gfs2_meta_inode_buffer(ip, &dibh);
405         if (error)
406                 return error;
407
408         error = gfs2_dinode_in(ip, dibh->b_data);
409         brelse(dibh);
410         clear_bit(GIF_INVALID, &ip->i_flags);
411
412         return error;
413 }
414
415 /**
416  * inode_go_lock - operation done after an inode lock is locked by a process
417  * @gl: the glock
418  * @flags:
419  *
420  * Returns: errno
421  */
422
423 static int inode_go_lock(struct gfs2_holder *gh)
424 {
425         struct gfs2_glock *gl = gh->gh_gl;
426         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
427         struct gfs2_inode *ip = gl->gl_object;
428         int error = 0;
429
430         if (!ip || (gh->gh_flags & GL_SKIP))
431                 return 0;
432
433         if (test_bit(GIF_INVALID, &ip->i_flags)) {
434                 error = gfs2_inode_refresh(ip);
435                 if (error)
436                         return error;
437         }
438
439         if (gh->gh_state != LM_ST_DEFERRED)
440                 inode_dio_wait(&ip->i_inode);
441
442         if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
443             (gl->gl_state == LM_ST_EXCLUSIVE) &&
444             (gh->gh_state == LM_ST_EXCLUSIVE)) {
445                 spin_lock(&sdp->sd_trunc_lock);
446                 if (list_empty(&ip->i_trunc_list))
447                         list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
448                 spin_unlock(&sdp->sd_trunc_lock);
449                 wake_up(&sdp->sd_quota_wait);
450                 return 1;
451         }
452
453         return error;
454 }
455
456 /**
457  * inode_go_dump - print information about an inode
458  * @seq: The iterator
459  * @ip: the inode
460  *
461  */
462
463 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
464 {
465         const struct gfs2_inode *ip = gl->gl_object;
466         if (ip == NULL)
467                 return;
468         gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
469                   (unsigned long long)ip->i_no_formal_ino,
470                   (unsigned long long)ip->i_no_addr,
471                   IF2DT(ip->i_inode.i_mode), ip->i_flags,
472                   (unsigned int)ip->i_diskflags,
473                   (unsigned long long)i_size_read(&ip->i_inode));
474 }
475
476 /**
477  * freeze_go_sync - promote/demote the freeze glock
478  * @gl: the glock
479  * @state: the requested state
480  * @flags:
481  *
482  */
483
484 static void freeze_go_sync(struct gfs2_glock *gl)
485 {
486         int error = 0;
487         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
488
489         if (gl->gl_state == LM_ST_SHARED &&
490             test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
491                 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
492                 error = freeze_super(sdp->sd_vfs);
493                 if (error) {
494                         printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
495                         gfs2_assert_withdraw(sdp, 0);
496                 }
497                 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
498                 gfs2_log_flush(sdp, NULL, FREEZE_FLUSH);
499         }
500 }
501
502 /**
503  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
504  * @gl: the glock
505  *
506  */
507
508 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
509 {
510         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
511         struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
512         struct gfs2_glock *j_gl = ip->i_gl;
513         struct gfs2_log_header_host head;
514         int error;
515
516         if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
517                 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
518
519                 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
520                 if (error)
521                         gfs2_consist(sdp);
522                 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
523                         gfs2_consist(sdp);
524
525                 /*  Initialize some head of the log stuff  */
526                 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
527                         sdp->sd_log_sequence = head.lh_sequence + 1;
528                         gfs2_log_pointers_init(sdp, head.lh_blkno);
529                 }
530         }
531         return 0;
532 }
533
534 /**
535  * trans_go_demote_ok
536  * @gl: the glock
537  *
538  * Always returns 0
539  */
540
541 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
542 {
543         return 0;
544 }
545
546 /**
547  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
548  * @gl: the glock
549  *
550  * gl_lockref.lock lock is held while calling this
551  */
552 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
553 {
554         struct gfs2_inode *ip = gl->gl_object;
555         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
556
557         if (!remote || sb_rdonly(sdp->sd_vfs))
558                 return;
559
560         if (gl->gl_demote_state == LM_ST_UNLOCKED &&
561             gl->gl_state == LM_ST_SHARED && ip) {
562                 gl->gl_lockref.count++;
563                 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
564                         gl->gl_lockref.count--;
565         }
566 }
567
568 const struct gfs2_glock_operations gfs2_meta_glops = {
569         .go_type = LM_TYPE_META,
570 };
571
572 const struct gfs2_glock_operations gfs2_inode_glops = {
573         .go_sync = inode_go_sync,
574         .go_inval = inode_go_inval,
575         .go_demote_ok = inode_go_demote_ok,
576         .go_lock = inode_go_lock,
577         .go_dump = inode_go_dump,
578         .go_type = LM_TYPE_INODE,
579         .go_flags = GLOF_ASPACE | GLOF_LRU,
580 };
581
582 const struct gfs2_glock_operations gfs2_rgrp_glops = {
583         .go_sync = rgrp_go_sync,
584         .go_inval = rgrp_go_inval,
585         .go_lock = gfs2_rgrp_go_lock,
586         .go_unlock = gfs2_rgrp_go_unlock,
587         .go_dump = gfs2_rgrp_dump,
588         .go_type = LM_TYPE_RGRP,
589         .go_flags = GLOF_LVB,
590 };
591
592 const struct gfs2_glock_operations gfs2_freeze_glops = {
593         .go_sync = freeze_go_sync,
594         .go_xmote_bh = freeze_go_xmote_bh,
595         .go_demote_ok = freeze_go_demote_ok,
596         .go_type = LM_TYPE_NONDISK,
597 };
598
599 const struct gfs2_glock_operations gfs2_iopen_glops = {
600         .go_type = LM_TYPE_IOPEN,
601         .go_callback = iopen_go_callback,
602         .go_flags = GLOF_LRU,
603 };
604
605 const struct gfs2_glock_operations gfs2_flock_glops = {
606         .go_type = LM_TYPE_FLOCK,
607         .go_flags = GLOF_LRU,
608 };
609
610 const struct gfs2_glock_operations gfs2_nondisk_glops = {
611         .go_type = LM_TYPE_NONDISK,
612 };
613
614 const struct gfs2_glock_operations gfs2_quota_glops = {
615         .go_type = LM_TYPE_QUOTA,
616         .go_flags = GLOF_LVB | GLOF_LRU,
617 };
618
619 const struct gfs2_glock_operations gfs2_journal_glops = {
620         .go_type = LM_TYPE_JOURNAL,
621 };
622
623 const struct gfs2_glock_operations *gfs2_glops_list[] = {
624         [LM_TYPE_META] = &gfs2_meta_glops,
625         [LM_TYPE_INODE] = &gfs2_inode_glops,
626         [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
627         [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
628         [LM_TYPE_FLOCK] = &gfs2_flock_glops,
629         [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
630         [LM_TYPE_QUOTA] = &gfs2_quota_glops,
631         [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
632 };
633