GNU Linux-libre 4.19.264-gnu1
[releases.git] / fs / gfs2 / glops.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16 #include <linux/security.h>
17
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "bmap.h"
21 #include "glock.h"
22 #include "glops.h"
23 #include "inode.h"
24 #include "log.h"
25 #include "meta_io.h"
26 #include "recovery.h"
27 #include "rgrp.h"
28 #include "util.h"
29 #include "trans.h"
30 #include "dir.h"
31
32 struct workqueue_struct *gfs2_freeze_wq;
33
34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36         fs_err(gl->gl_name.ln_sbd,
37                "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38                "state 0x%lx\n",
39                bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40                bh->b_page->mapping, bh->b_page->flags);
41         fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42                gl->gl_name.ln_type, gl->gl_name.ln_number,
43                gfs2_glock2aspace(gl));
44         gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
45 }
46
47 /**
48  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
49  * @gl: the glock
50  * @fsync: set when called from fsync (not all buffers will be clean)
51  *
52  * None of the buffers should be dirty, locked, or pinned.
53  */
54
55 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
56                              unsigned int nr_revokes)
57 {
58         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
59         struct list_head *head = &gl->gl_ail_list;
60         struct gfs2_bufdata *bd, *tmp;
61         struct buffer_head *bh;
62         const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
63
64         gfs2_log_lock(sdp);
65         spin_lock(&sdp->sd_ail_lock);
66         list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
67                 if (nr_revokes == 0)
68                         break;
69                 bh = bd->bd_bh;
70                 if (bh->b_state & b_state) {
71                         if (fsync)
72                                 continue;
73                         gfs2_ail_error(gl, bh);
74                 }
75                 gfs2_trans_add_revoke(sdp, bd);
76                 nr_revokes--;
77         }
78         GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
79         spin_unlock(&sdp->sd_ail_lock);
80         gfs2_log_unlock(sdp);
81 }
82
83
84 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
85 {
86         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
87         struct gfs2_trans tr;
88
89         memset(&tr, 0, sizeof(tr));
90         INIT_LIST_HEAD(&tr.tr_buf);
91         INIT_LIST_HEAD(&tr.tr_databuf);
92         INIT_LIST_HEAD(&tr.tr_ail1_list);
93         INIT_LIST_HEAD(&tr.tr_ail2_list);
94         tr.tr_revokes = atomic_read(&gl->gl_ail_count);
95
96         if (!tr.tr_revokes)
97                 return;
98
99         /* A shortened, inline version of gfs2_trans_begin()
100          * tr->alloced is not set since the transaction structure is
101          * on the stack */
102         tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
103         tr.tr_ip = _RET_IP_;
104         if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
105                 return;
106         WARN_ON_ONCE(current->journal_info);
107         current->journal_info = &tr;
108
109         __gfs2_ail_flush(gl, 0, tr.tr_revokes);
110
111         gfs2_trans_end(sdp);
112         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
113                        GFS2_LFC_AIL_EMPTY_GL);
114 }
115
116 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
117 {
118         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
119         unsigned int revokes = atomic_read(&gl->gl_ail_count);
120         unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
121         int ret;
122
123         if (!revokes)
124                 return;
125
126         while (revokes > max_revokes)
127                 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
128
129         ret = gfs2_trans_begin(sdp, 0, max_revokes);
130         if (ret)
131                 return;
132         __gfs2_ail_flush(gl, fsync, max_revokes);
133         gfs2_trans_end(sdp);
134         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
135                        GFS2_LFC_AIL_FLUSH);
136 }
137
138 /**
139  * rgrp_go_sync - sync out the metadata for this glock
140  * @gl: the glock
141  *
142  * Called when demoting or unlocking an EX glock.  We must flush
143  * to disk all dirty buffers/pages relating to this glock, and must not
144  * return to caller to demote/unlock the glock until I/O is complete.
145  */
146
147 static void rgrp_go_sync(struct gfs2_glock *gl)
148 {
149         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
150         struct address_space *mapping = &sdp->sd_aspace;
151         struct gfs2_rgrpd *rgd;
152         int error;
153
154         spin_lock(&gl->gl_lockref.lock);
155         rgd = gl->gl_object;
156         if (rgd)
157                 gfs2_rgrp_brelse(rgd);
158         spin_unlock(&gl->gl_lockref.lock);
159
160         if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
161                 return;
162         GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
163
164         gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
165                        GFS2_LFC_RGRP_GO_SYNC);
166         filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
167         error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
168         mapping_set_error(mapping, error);
169         gfs2_ail_empty_gl(gl);
170
171         spin_lock(&gl->gl_lockref.lock);
172         rgd = gl->gl_object;
173         if (rgd)
174                 gfs2_free_clones(rgd);
175         spin_unlock(&gl->gl_lockref.lock);
176 }
177
178 /**
179  * rgrp_go_inval - invalidate the metadata for this glock
180  * @gl: the glock
181  * @flags:
182  *
183  * We never used LM_ST_DEFERRED with resource groups, so that we
184  * should always see the metadata flag set here.
185  *
186  */
187
188 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
189 {
190         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
191         struct address_space *mapping = &sdp->sd_aspace;
192         struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
193
194         if (rgd)
195                 gfs2_rgrp_brelse(rgd);
196
197         WARN_ON_ONCE(!(flags & DIO_METADATA));
198         gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
199         truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
200
201         if (rgd)
202                 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
203 }
204
205 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
206 {
207         struct gfs2_inode *ip;
208
209         spin_lock(&gl->gl_lockref.lock);
210         ip = gl->gl_object;
211         if (ip)
212                 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
213         spin_unlock(&gl->gl_lockref.lock);
214         return ip;
215 }
216
217 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
218 {
219         struct gfs2_rgrpd *rgd;
220
221         spin_lock(&gl->gl_lockref.lock);
222         rgd = gl->gl_object;
223         spin_unlock(&gl->gl_lockref.lock);
224
225         return rgd;
226 }
227
228 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
229 {
230         if (!ip)
231                 return;
232
233         clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
234         wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
235 }
236
237 /**
238  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
239  * @gl: the glock protecting the inode
240  *
241  */
242
243 static void inode_go_sync(struct gfs2_glock *gl)
244 {
245         struct gfs2_inode *ip = gfs2_glock2inode(gl);
246         int isreg = ip && S_ISREG(ip->i_inode.i_mode);
247         struct address_space *metamapping = gfs2_glock2aspace(gl);
248         int error;
249
250         if (isreg) {
251                 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
252                         unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
253                 inode_dio_wait(&ip->i_inode);
254         }
255         if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
256                 goto out;
257
258         GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
259
260         gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
261                        GFS2_LFC_INODE_GO_SYNC);
262         filemap_fdatawrite(metamapping);
263         if (isreg) {
264                 struct address_space *mapping = ip->i_inode.i_mapping;
265                 filemap_fdatawrite(mapping);
266                 error = filemap_fdatawait(mapping);
267                 mapping_set_error(mapping, error);
268         }
269         error = filemap_fdatawait(metamapping);
270         mapping_set_error(metamapping, error);
271         gfs2_ail_empty_gl(gl);
272         /*
273          * Writeback of the data mapping may cause the dirty flag to be set
274          * so we have to clear it again here.
275          */
276         smp_mb__before_atomic();
277         clear_bit(GLF_DIRTY, &gl->gl_flags);
278
279 out:
280         gfs2_clear_glop_pending(ip);
281 }
282
283 /**
284  * inode_go_inval - prepare a inode glock to be released
285  * @gl: the glock
286  * @flags:
287  *
288  * Normally we invalidate everything, but if we are moving into
289  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
290  * can keep hold of the metadata, since it won't have changed.
291  *
292  */
293
294 static void inode_go_inval(struct gfs2_glock *gl, int flags)
295 {
296         struct gfs2_inode *ip = gfs2_glock2inode(gl);
297
298         gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
299
300         if (flags & DIO_METADATA) {
301                 struct address_space *mapping = gfs2_glock2aspace(gl);
302                 truncate_inode_pages(mapping, 0);
303                 if (ip) {
304                         set_bit(GIF_INVALID, &ip->i_flags);
305                         forget_all_cached_acls(&ip->i_inode);
306                         security_inode_invalidate_secctx(&ip->i_inode);
307                         gfs2_dir_hash_inval(ip);
308                 }
309         }
310
311         if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
312                 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
313                                GFS2_LOG_HEAD_FLUSH_NORMAL |
314                                GFS2_LFC_INODE_GO_INVAL);
315                 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
316         }
317         if (ip && S_ISREG(ip->i_inode.i_mode))
318                 truncate_inode_pages(ip->i_inode.i_mapping, 0);
319
320         gfs2_clear_glop_pending(ip);
321 }
322
323 /**
324  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
325  * @gl: the glock
326  *
327  * Returns: 1 if it's ok
328  */
329
330 static int inode_go_demote_ok(const struct gfs2_glock *gl)
331 {
332         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
333
334         if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
335                 return 0;
336
337         return 1;
338 }
339
340 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
341 {
342         const struct gfs2_dinode *str = buf;
343         struct timespec64 atime;
344         u16 height, depth;
345
346         if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
347                 goto corrupt;
348         ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
349         ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
350         ip->i_inode.i_rdev = 0;
351         switch (ip->i_inode.i_mode & S_IFMT) {
352         case S_IFBLK:
353         case S_IFCHR:
354                 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
355                                            be32_to_cpu(str->di_minor));
356                 break;
357         };
358
359         i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
360         i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
361         set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
362         i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
363         gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
364         atime.tv_sec = be64_to_cpu(str->di_atime);
365         atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
366         if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
367                 ip->i_inode.i_atime = atime;
368         ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
369         ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
370         ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
371         ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
372
373         ip->i_goal = be64_to_cpu(str->di_goal_meta);
374         ip->i_generation = be64_to_cpu(str->di_generation);
375
376         ip->i_diskflags = be32_to_cpu(str->di_flags);
377         ip->i_eattr = be64_to_cpu(str->di_eattr);
378         /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
379         gfs2_set_inode_flags(&ip->i_inode);
380         height = be16_to_cpu(str->di_height);
381         if (unlikely(height > GFS2_MAX_META_HEIGHT))
382                 goto corrupt;
383         ip->i_height = (u8)height;
384
385         depth = be16_to_cpu(str->di_depth);
386         if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
387                 goto corrupt;
388         ip->i_depth = (u8)depth;
389         ip->i_entries = be32_to_cpu(str->di_entries);
390
391         if (S_ISREG(ip->i_inode.i_mode))
392                 gfs2_set_aops(&ip->i_inode);
393
394         return 0;
395 corrupt:
396         gfs2_consist_inode(ip);
397         return -EIO;
398 }
399
400 /**
401  * gfs2_inode_refresh - Refresh the incore copy of the dinode
402  * @ip: The GFS2 inode
403  *
404  * Returns: errno
405  */
406
407 int gfs2_inode_refresh(struct gfs2_inode *ip)
408 {
409         struct buffer_head *dibh;
410         int error;
411
412         error = gfs2_meta_inode_buffer(ip, &dibh);
413         if (error)
414                 return error;
415
416         error = gfs2_dinode_in(ip, dibh->b_data);
417         brelse(dibh);
418         clear_bit(GIF_INVALID, &ip->i_flags);
419
420         return error;
421 }
422
423 /**
424  * inode_go_lock - operation done after an inode lock is locked by a process
425  * @gl: the glock
426  * @flags:
427  *
428  * Returns: errno
429  */
430
431 static int inode_go_lock(struct gfs2_holder *gh)
432 {
433         struct gfs2_glock *gl = gh->gh_gl;
434         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
435         struct gfs2_inode *ip = gl->gl_object;
436         int error = 0;
437
438         if (!ip || (gh->gh_flags & GL_SKIP))
439                 return 0;
440
441         if (test_bit(GIF_INVALID, &ip->i_flags)) {
442                 error = gfs2_inode_refresh(ip);
443                 if (error)
444                         return error;
445         }
446
447         if (gh->gh_state != LM_ST_DEFERRED)
448                 inode_dio_wait(&ip->i_inode);
449
450         if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
451             (gl->gl_state == LM_ST_EXCLUSIVE) &&
452             (gh->gh_state == LM_ST_EXCLUSIVE)) {
453                 spin_lock(&sdp->sd_trunc_lock);
454                 if (list_empty(&ip->i_trunc_list))
455                         list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
456                 spin_unlock(&sdp->sd_trunc_lock);
457                 wake_up(&sdp->sd_quota_wait);
458                 return 1;
459         }
460
461         return error;
462 }
463
464 /**
465  * inode_go_dump - print information about an inode
466  * @seq: The iterator
467  * @ip: the inode
468  *
469  */
470
471 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
472 {
473         const struct gfs2_inode *ip = gl->gl_object;
474         if (ip == NULL)
475                 return;
476         gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
477                   (unsigned long long)ip->i_no_formal_ino,
478                   (unsigned long long)ip->i_no_addr,
479                   IF2DT(ip->i_inode.i_mode), ip->i_flags,
480                   (unsigned int)ip->i_diskflags,
481                   (unsigned long long)i_size_read(&ip->i_inode));
482 }
483
484 /**
485  * freeze_go_sync - promote/demote the freeze glock
486  * @gl: the glock
487  * @state: the requested state
488  * @flags:
489  *
490  */
491
492 static void freeze_go_sync(struct gfs2_glock *gl)
493 {
494         int error = 0;
495         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
496
497         if (gl->gl_state == LM_ST_SHARED &&
498             test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
499                 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
500                 error = freeze_super(sdp->sd_vfs);
501                 if (error) {
502                         printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error);
503                         gfs2_assert_withdraw(sdp, 0);
504                 }
505                 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
506                 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
507                                GFS2_LFC_FREEZE_GO_SYNC);
508         }
509 }
510
511 /**
512  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
513  * @gl: the glock
514  *
515  */
516
517 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
518 {
519         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
520         struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
521         struct gfs2_glock *j_gl = ip->i_gl;
522         struct gfs2_log_header_host head;
523         int error;
524
525         if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
526                 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
527
528                 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
529                 if (error)
530                         gfs2_consist(sdp);
531                 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
532                         gfs2_consist(sdp);
533
534                 /*  Initialize some head of the log stuff  */
535                 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
536                         sdp->sd_log_sequence = head.lh_sequence + 1;
537                         gfs2_log_pointers_init(sdp, head.lh_blkno);
538                 }
539         }
540         return 0;
541 }
542
543 /**
544  * trans_go_demote_ok
545  * @gl: the glock
546  *
547  * Always returns 0
548  */
549
550 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
551 {
552         return 0;
553 }
554
555 /**
556  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
557  * @gl: the glock
558  *
559  * gl_lockref.lock lock is held while calling this
560  */
561 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
562 {
563         struct gfs2_inode *ip = gl->gl_object;
564         struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
565
566         if (!remote || sb_rdonly(sdp->sd_vfs))
567                 return;
568
569         if (gl->gl_demote_state == LM_ST_UNLOCKED &&
570             gl->gl_state == LM_ST_SHARED && ip) {
571                 gl->gl_lockref.count++;
572                 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
573                         gl->gl_lockref.count--;
574         }
575 }
576
577 const struct gfs2_glock_operations gfs2_meta_glops = {
578         .go_type = LM_TYPE_META,
579 };
580
581 const struct gfs2_glock_operations gfs2_inode_glops = {
582         .go_sync = inode_go_sync,
583         .go_inval = inode_go_inval,
584         .go_demote_ok = inode_go_demote_ok,
585         .go_lock = inode_go_lock,
586         .go_dump = inode_go_dump,
587         .go_type = LM_TYPE_INODE,
588         .go_flags = GLOF_ASPACE | GLOF_LRU,
589 };
590
591 const struct gfs2_glock_operations gfs2_rgrp_glops = {
592         .go_sync = rgrp_go_sync,
593         .go_inval = rgrp_go_inval,
594         .go_lock = gfs2_rgrp_go_lock,
595         .go_unlock = gfs2_rgrp_go_unlock,
596         .go_dump = gfs2_rgrp_dump,
597         .go_type = LM_TYPE_RGRP,
598         .go_flags = GLOF_LVB,
599 };
600
601 const struct gfs2_glock_operations gfs2_freeze_glops = {
602         .go_sync = freeze_go_sync,
603         .go_xmote_bh = freeze_go_xmote_bh,
604         .go_demote_ok = freeze_go_demote_ok,
605         .go_type = LM_TYPE_NONDISK,
606 };
607
608 const struct gfs2_glock_operations gfs2_iopen_glops = {
609         .go_type = LM_TYPE_IOPEN,
610         .go_callback = iopen_go_callback,
611         .go_flags = GLOF_LRU,
612 };
613
614 const struct gfs2_glock_operations gfs2_flock_glops = {
615         .go_type = LM_TYPE_FLOCK,
616         .go_flags = GLOF_LRU,
617 };
618
619 const struct gfs2_glock_operations gfs2_nondisk_glops = {
620         .go_type = LM_TYPE_NONDISK,
621 };
622
623 const struct gfs2_glock_operations gfs2_quota_glops = {
624         .go_type = LM_TYPE_QUOTA,
625         .go_flags = GLOF_LVB | GLOF_LRU,
626 };
627
628 const struct gfs2_glock_operations gfs2_journal_glops = {
629         .go_type = LM_TYPE_JOURNAL,
630 };
631
632 const struct gfs2_glock_operations *gfs2_glops_list[] = {
633         [LM_TYPE_META] = &gfs2_meta_glops,
634         [LM_TYPE_INODE] = &gfs2_inode_glops,
635         [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
636         [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
637         [LM_TYPE_FLOCK] = &gfs2_flock_glops,
638         [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
639         [LM_TYPE_QUOTA] = &gfs2_quota_glops,
640         [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
641 };
642