GNU Linux-libre 4.19.264-gnu1
[releases.git] / fs / gfs2 / log.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/crc32c.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/freezer.h>
21 #include <linux/bio.h>
22 #include <linux/blkdev.h>
23 #include <linux/writeback.h>
24 #include <linux/list_sort.h>
25
26 #include "gfs2.h"
27 #include "incore.h"
28 #include "bmap.h"
29 #include "glock.h"
30 #include "log.h"
31 #include "lops.h"
32 #include "meta_io.h"
33 #include "util.h"
34 #include "dir.h"
35 #include "trace_gfs2.h"
36
37 /**
38  * gfs2_struct2blk - compute stuff
39  * @sdp: the filesystem
40  * @nstruct: the number of structures
41  * @ssize: the size of the structures
42  *
43  * Compute the number of log descriptor blocks needed to hold a certain number
44  * of structures of a certain size.
45  *
46  * Returns: the number of blocks needed (minimum is always 1)
47  */
48
49 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
50                              unsigned int ssize)
51 {
52         unsigned int blks;
53         unsigned int first, second;
54
55         blks = 1;
56         first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
57
58         if (nstruct > first) {
59                 second = (sdp->sd_sb.sb_bsize -
60                           sizeof(struct gfs2_meta_header)) / ssize;
61                 blks += DIV_ROUND_UP(nstruct - first, second);
62         }
63
64         return blks;
65 }
66
67 /**
68  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
69  * @mapping: The associated mapping (maybe NULL)
70  * @bd: The gfs2_bufdata to remove
71  *
72  * The ail lock _must_ be held when calling this function
73  *
74  */
75
76 static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
77 {
78         bd->bd_tr = NULL;
79         list_del_init(&bd->bd_ail_st_list);
80         list_del_init(&bd->bd_ail_gl_list);
81         atomic_dec(&bd->bd_gl->gl_ail_count);
82         brelse(bd->bd_bh);
83 }
84
85 /**
86  * gfs2_ail1_start_one - Start I/O on a part of the AIL
87  * @sdp: the filesystem
88  * @wbc: The writeback control structure
89  * @ai: The ail structure
90  *
91  */
92
93 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
94                                struct writeback_control *wbc,
95                                struct gfs2_trans *tr,
96                                bool *withdraw)
97 __releases(&sdp->sd_ail_lock)
98 __acquires(&sdp->sd_ail_lock)
99 {
100         struct gfs2_glock *gl = NULL;
101         struct address_space *mapping;
102         struct gfs2_bufdata *bd, *s;
103         struct buffer_head *bh;
104
105         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
106                 bh = bd->bd_bh;
107
108                 gfs2_assert(sdp, bd->bd_tr == tr);
109
110                 if (!buffer_busy(bh)) {
111                         if (!buffer_uptodate(bh) &&
112                             !test_and_set_bit(SDF_AIL1_IO_ERROR,
113                                               &sdp->sd_flags)) {
114                                 gfs2_io_error_bh(sdp, bh);
115                                 *withdraw = true;
116                         }
117                         list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
118                         continue;
119                 }
120
121                 if (!buffer_dirty(bh))
122                         continue;
123                 if (gl == bd->bd_gl)
124                         continue;
125                 gl = bd->bd_gl;
126                 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
127                 mapping = bh->b_page->mapping;
128                 if (!mapping)
129                         continue;
130                 spin_unlock(&sdp->sd_ail_lock);
131                 generic_writepages(mapping, wbc);
132                 spin_lock(&sdp->sd_ail_lock);
133                 if (wbc->nr_to_write <= 0)
134                         break;
135                 return 1;
136         }
137
138         return 0;
139 }
140
141
142 /**
143  * gfs2_ail1_flush - start writeback of some ail1 entries 
144  * @sdp: The super block
145  * @wbc: The writeback control structure
146  *
147  * Writes back some ail1 entries, according to the limits in the
148  * writeback control structure
149  */
150
151 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
152 {
153         struct list_head *head = &sdp->sd_ail1_list;
154         struct gfs2_trans *tr;
155         struct blk_plug plug;
156         bool withdraw = false;
157
158         trace_gfs2_ail_flush(sdp, wbc, 1);
159         blk_start_plug(&plug);
160         spin_lock(&sdp->sd_ail_lock);
161 restart:
162         list_for_each_entry_reverse(tr, head, tr_list) {
163                 if (wbc->nr_to_write <= 0)
164                         break;
165                 if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
166                         goto restart;
167         }
168         spin_unlock(&sdp->sd_ail_lock);
169         blk_finish_plug(&plug);
170         if (withdraw)
171                 gfs2_lm_withdraw(sdp, NULL);
172         trace_gfs2_ail_flush(sdp, wbc, 0);
173 }
174
175 /**
176  * gfs2_ail1_start - start writeback of all ail1 entries
177  * @sdp: The superblock
178  */
179
180 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
181 {
182         struct writeback_control wbc = {
183                 .sync_mode = WB_SYNC_NONE,
184                 .nr_to_write = LONG_MAX,
185                 .range_start = 0,
186                 .range_end = LLONG_MAX,
187         };
188
189         return gfs2_ail1_flush(sdp, &wbc);
190 }
191
192 /**
193  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
194  * @sdp: the filesystem
195  * @ai: the AIL entry
196  *
197  */
198
199 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
200                                 bool *withdraw)
201 {
202         struct gfs2_bufdata *bd, *s;
203         struct buffer_head *bh;
204
205         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
206                                          bd_ail_st_list) {
207                 bh = bd->bd_bh;
208                 gfs2_assert(sdp, bd->bd_tr == tr);
209                 if (buffer_busy(bh))
210                         continue;
211                 if (!buffer_uptodate(bh) &&
212                     !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
213                         gfs2_io_error_bh(sdp, bh);
214                         *withdraw = true;
215                 }
216                 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
217         }
218 }
219
220 /**
221  * gfs2_ail1_empty - Try to empty the ail1 lists
222  * @sdp: The superblock
223  *
224  * Tries to empty the ail1 lists, starting with the oldest first
225  */
226
227 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
228 {
229         struct gfs2_trans *tr, *s;
230         int oldest_tr = 1;
231         int ret;
232         bool withdraw = false;
233
234         spin_lock(&sdp->sd_ail_lock);
235         list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
236                 gfs2_ail1_empty_one(sdp, tr, &withdraw);
237                 if (list_empty(&tr->tr_ail1_list) && oldest_tr)
238                         list_move(&tr->tr_list, &sdp->sd_ail2_list);
239                 else
240                         oldest_tr = 0;
241         }
242         ret = list_empty(&sdp->sd_ail1_list);
243         spin_unlock(&sdp->sd_ail_lock);
244
245         if (withdraw)
246                 gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
247
248         return ret;
249 }
250
251 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
252 {
253         struct gfs2_trans *tr;
254         struct gfs2_bufdata *bd;
255         struct buffer_head *bh;
256
257         spin_lock(&sdp->sd_ail_lock);
258         list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
259                 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
260                         bh = bd->bd_bh;
261                         if (!buffer_locked(bh))
262                                 continue;
263                         get_bh(bh);
264                         spin_unlock(&sdp->sd_ail_lock);
265                         wait_on_buffer(bh);
266                         brelse(bh);
267                         return;
268                 }
269         }
270         spin_unlock(&sdp->sd_ail_lock);
271 }
272
273 /**
274  * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
275  * @sdp: the filesystem
276  * @ai: the AIL entry
277  *
278  */
279
280 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
281 {
282         struct list_head *head = &tr->tr_ail2_list;
283         struct gfs2_bufdata *bd;
284
285         while (!list_empty(head)) {
286                 bd = list_entry(head->prev, struct gfs2_bufdata,
287                                 bd_ail_st_list);
288                 gfs2_assert(sdp, bd->bd_tr == tr);
289                 gfs2_remove_from_ail(bd);
290         }
291 }
292
293 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
294 {
295         struct gfs2_trans *tr, *safe;
296         unsigned int old_tail = sdp->sd_log_tail;
297         int wrap = (new_tail < old_tail);
298         int a, b, rm;
299
300         spin_lock(&sdp->sd_ail_lock);
301
302         list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
303                 a = (old_tail <= tr->tr_first);
304                 b = (tr->tr_first < new_tail);
305                 rm = (wrap) ? (a || b) : (a && b);
306                 if (!rm)
307                         continue;
308
309                 gfs2_ail2_empty_one(sdp, tr);
310                 list_del(&tr->tr_list);
311                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
312                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
313                 kfree(tr);
314         }
315
316         spin_unlock(&sdp->sd_ail_lock);
317 }
318
319 /**
320  * gfs2_log_release - Release a given number of log blocks
321  * @sdp: The GFS2 superblock
322  * @blks: The number of blocks
323  *
324  */
325
326 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
327 {
328
329         atomic_add(blks, &sdp->sd_log_blks_free);
330         trace_gfs2_log_blocks(sdp, blks);
331         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
332                                   sdp->sd_jdesc->jd_blocks);
333         up_read(&sdp->sd_log_flush_lock);
334 }
335
336 /**
337  * gfs2_log_reserve - Make a log reservation
338  * @sdp: The GFS2 superblock
339  * @blks: The number of blocks to reserve
340  *
341  * Note that we never give out the last few blocks of the journal. Thats
342  * due to the fact that there is a small number of header blocks
343  * associated with each log flush. The exact number can't be known until
344  * flush time, so we ensure that we have just enough free blocks at all
345  * times to avoid running out during a log flush.
346  *
347  * We no longer flush the log here, instead we wake up logd to do that
348  * for us. To avoid the thundering herd and to ensure that we deal fairly
349  * with queued waiters, we use an exclusive wait. This means that when we
350  * get woken with enough journal space to get our reservation, we need to
351  * wake the next waiter on the list.
352  *
353  * Returns: errno
354  */
355
356 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
357 {
358         int ret = 0;
359         unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
360         unsigned wanted = blks + reserved_blks;
361         DEFINE_WAIT(wait);
362         int did_wait = 0;
363         unsigned int free_blocks;
364
365         if (gfs2_assert_warn(sdp, blks) ||
366             gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
367                 return -EINVAL;
368         atomic_add(blks, &sdp->sd_log_blks_needed);
369 retry:
370         free_blocks = atomic_read(&sdp->sd_log_blks_free);
371         if (unlikely(free_blocks <= wanted)) {
372                 do {
373                         prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
374                                         TASK_UNINTERRUPTIBLE);
375                         wake_up(&sdp->sd_logd_waitq);
376                         did_wait = 1;
377                         if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
378                                 io_schedule();
379                         free_blocks = atomic_read(&sdp->sd_log_blks_free);
380                 } while(free_blocks <= wanted);
381                 finish_wait(&sdp->sd_log_waitq, &wait);
382         }
383         atomic_inc(&sdp->sd_reserving_log);
384         if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
385                                 free_blocks - blks) != free_blocks) {
386                 if (atomic_dec_and_test(&sdp->sd_reserving_log))
387                         wake_up(&sdp->sd_reserving_log_wait);
388                 goto retry;
389         }
390         atomic_sub(blks, &sdp->sd_log_blks_needed);
391         trace_gfs2_log_blocks(sdp, -blks);
392
393         /*
394          * If we waited, then so might others, wake them up _after_ we get
395          * our share of the log.
396          */
397         if (unlikely(did_wait))
398                 wake_up(&sdp->sd_log_waitq);
399
400         down_read(&sdp->sd_log_flush_lock);
401         if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
402                 gfs2_log_release(sdp, blks);
403                 ret = -EROFS;
404         }
405         if (atomic_dec_and_test(&sdp->sd_reserving_log))
406                 wake_up(&sdp->sd_reserving_log_wait);
407         return ret;
408 }
409
410 /**
411  * log_distance - Compute distance between two journal blocks
412  * @sdp: The GFS2 superblock
413  * @newer: The most recent journal block of the pair
414  * @older: The older journal block of the pair
415  *
416  *   Compute the distance (in the journal direction) between two
417  *   blocks in the journal
418  *
419  * Returns: the distance in blocks
420  */
421
422 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
423                                         unsigned int older)
424 {
425         int dist;
426
427         dist = newer - older;
428         if (dist < 0)
429                 dist += sdp->sd_jdesc->jd_blocks;
430
431         return dist;
432 }
433
434 /**
435  * calc_reserved - Calculate the number of blocks to reserve when
436  *                 refunding a transaction's unused buffers.
437  * @sdp: The GFS2 superblock
438  *
439  * This is complex.  We need to reserve room for all our currently used
440  * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
441  * all our journaled data buffers for journaled files (e.g. files in the 
442  * meta_fs like rindex, or files for which chattr +j was done.)
443  * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
444  * will count it as free space (sd_log_blks_free) and corruption will follow.
445  *
446  * We can have metadata bufs and jdata bufs in the same journal.  So each
447  * type gets its own log header, for which we need to reserve a block.
448  * In fact, each type has the potential for needing more than one header 
449  * in cases where we have more buffers than will fit on a journal page.
450  * Metadata journal entries take up half the space of journaled buffer entries.
451  * Thus, metadata entries have buf_limit (502) and journaled buffers have
452  * databuf_limit (251) before they cause a wrap around.
453  *
454  * Also, we need to reserve blocks for revoke journal entries and one for an
455  * overall header for the lot.
456  *
457  * Returns: the number of blocks reserved
458  */
459 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
460 {
461         unsigned int reserved = 0;
462         unsigned int mbuf;
463         unsigned int dbuf;
464         struct gfs2_trans *tr = sdp->sd_log_tr;
465
466         if (tr) {
467                 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
468                 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
469                 reserved = mbuf + dbuf;
470                 /* Account for header blocks */
471                 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
472                 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
473         }
474
475         if (sdp->sd_log_commited_revoke > 0)
476                 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
477                                           sizeof(u64));
478         /* One for the overall header */
479         if (reserved)
480                 reserved++;
481         return reserved;
482 }
483
484 static unsigned int current_tail(struct gfs2_sbd *sdp)
485 {
486         struct gfs2_trans *tr;
487         unsigned int tail;
488
489         spin_lock(&sdp->sd_ail_lock);
490
491         if (list_empty(&sdp->sd_ail1_list)) {
492                 tail = sdp->sd_log_head;
493         } else {
494                 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
495                                 tr_list);
496                 tail = tr->tr_first;
497         }
498
499         spin_unlock(&sdp->sd_ail_lock);
500
501         return tail;
502 }
503
504 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
505 {
506         unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
507
508         ail2_empty(sdp, new_tail);
509
510         atomic_add(dist, &sdp->sd_log_blks_free);
511         trace_gfs2_log_blocks(sdp, dist);
512         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
513                              sdp->sd_jdesc->jd_blocks);
514
515         sdp->sd_log_tail = new_tail;
516 }
517
518
519 static void log_flush_wait(struct gfs2_sbd *sdp)
520 {
521         DEFINE_WAIT(wait);
522
523         if (atomic_read(&sdp->sd_log_in_flight)) {
524                 do {
525                         prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
526                                         TASK_UNINTERRUPTIBLE);
527                         if (atomic_read(&sdp->sd_log_in_flight))
528                                 io_schedule();
529                 } while(atomic_read(&sdp->sd_log_in_flight));
530                 finish_wait(&sdp->sd_log_flush_wait, &wait);
531         }
532 }
533
534 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
535 {
536         struct gfs2_inode *ipa, *ipb;
537
538         ipa = list_entry(a, struct gfs2_inode, i_ordered);
539         ipb = list_entry(b, struct gfs2_inode, i_ordered);
540
541         if (ipa->i_no_addr < ipb->i_no_addr)
542                 return -1;
543         if (ipa->i_no_addr > ipb->i_no_addr)
544                 return 1;
545         return 0;
546 }
547
548 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
549 {
550         struct gfs2_inode *ip;
551         LIST_HEAD(written);
552
553         spin_lock(&sdp->sd_ordered_lock);
554         list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
555         while (!list_empty(&sdp->sd_log_le_ordered)) {
556                 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
557                 if (ip->i_inode.i_mapping->nrpages == 0) {
558                         test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
559                         list_del(&ip->i_ordered);
560                         continue;
561                 }
562                 list_move(&ip->i_ordered, &written);
563                 spin_unlock(&sdp->sd_ordered_lock);
564                 filemap_fdatawrite(ip->i_inode.i_mapping);
565                 spin_lock(&sdp->sd_ordered_lock);
566         }
567         list_splice(&written, &sdp->sd_log_le_ordered);
568         spin_unlock(&sdp->sd_ordered_lock);
569 }
570
571 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
572 {
573         struct gfs2_inode *ip;
574
575         spin_lock(&sdp->sd_ordered_lock);
576         while (!list_empty(&sdp->sd_log_le_ordered)) {
577                 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
578                 list_del(&ip->i_ordered);
579                 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
580                 if (ip->i_inode.i_mapping->nrpages == 0)
581                         continue;
582                 spin_unlock(&sdp->sd_ordered_lock);
583                 filemap_fdatawait(ip->i_inode.i_mapping);
584                 spin_lock(&sdp->sd_ordered_lock);
585         }
586         spin_unlock(&sdp->sd_ordered_lock);
587 }
588
589 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
590 {
591         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
592
593         spin_lock(&sdp->sd_ordered_lock);
594         if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
595                 list_del(&ip->i_ordered);
596         spin_unlock(&sdp->sd_ordered_lock);
597 }
598
599 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
600 {
601         struct buffer_head *bh = bd->bd_bh;
602         struct gfs2_glock *gl = bd->bd_gl;
603
604         bh->b_private = NULL;
605         bd->bd_blkno = bh->b_blocknr;
606         gfs2_remove_from_ail(bd); /* drops ref on bh */
607         bd->bd_bh = NULL;
608         bd->bd_ops = &gfs2_revoke_lops;
609         sdp->sd_log_num_revoke++;
610         if (atomic_inc_return(&gl->gl_revokes) == 1)
611                 gfs2_glock_hold(gl);
612         set_bit(GLF_LFLUSH, &gl->gl_flags);
613         list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
614 }
615
616 void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
617 {
618         if (atomic_dec_return(&gl->gl_revokes) == 0) {
619                 clear_bit(GLF_LFLUSH, &gl->gl_flags);
620                 gfs2_glock_queue_put(gl);
621         }
622 }
623
624 void gfs2_write_revokes(struct gfs2_sbd *sdp)
625 {
626         struct gfs2_trans *tr;
627         struct gfs2_bufdata *bd, *tmp;
628         int have_revokes = 0;
629         int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
630
631         gfs2_ail1_empty(sdp);
632         spin_lock(&sdp->sd_ail_lock);
633         list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
634                 list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
635                         if (list_empty(&bd->bd_list)) {
636                                 have_revokes = 1;
637                                 goto done;
638                         }
639                 }
640         }
641 done:
642         spin_unlock(&sdp->sd_ail_lock);
643         if (have_revokes == 0)
644                 return;
645         while (sdp->sd_log_num_revoke > max_revokes)
646                 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
647         max_revokes -= sdp->sd_log_num_revoke;
648         if (!sdp->sd_log_num_revoke) {
649                 atomic_dec(&sdp->sd_log_blks_free);
650                 /* If no blocks have been reserved, we need to also
651                  * reserve a block for the header */
652                 if (!sdp->sd_log_blks_reserved)
653                         atomic_dec(&sdp->sd_log_blks_free);
654         }
655         gfs2_log_lock(sdp);
656         spin_lock(&sdp->sd_ail_lock);
657         list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
658                 list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
659                         if (max_revokes == 0)
660                                 goto out_of_blocks;
661                         if (!list_empty(&bd->bd_list))
662                                 continue;
663                         gfs2_add_revoke(sdp, bd);
664                         max_revokes--;
665                 }
666         }
667 out_of_blocks:
668         spin_unlock(&sdp->sd_ail_lock);
669         gfs2_log_unlock(sdp);
670
671         if (!sdp->sd_log_num_revoke) {
672                 atomic_inc(&sdp->sd_log_blks_free);
673                 if (!sdp->sd_log_blks_reserved)
674                         atomic_inc(&sdp->sd_log_blks_free);
675         }
676 }
677
678 /**
679  * write_log_header - Write a journal log header buffer at sd_log_flush_head
680  * @sdp: The GFS2 superblock
681  * @jd: journal descriptor of the journal to which we are writing
682  * @seq: sequence number
683  * @tail: tail of the log
684  * @flags: log header flags GFS2_LOG_HEAD_*
685  * @op_flags: flags to pass to the bio
686  *
687  * Returns: the initialized log buffer descriptor
688  */
689
690 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
691                            u64 seq, u32 tail, u32 flags, int op_flags)
692 {
693         struct gfs2_log_header *lh;
694         u32 hash, crc;
695         struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
696         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
697         struct timespec64 tv;
698         struct super_block *sb = sdp->sd_vfs;
699         u64 addr;
700
701         lh = page_address(page);
702         clear_page(lh);
703
704         lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
705         lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
706         lh->lh_header.__pad0 = cpu_to_be64(0);
707         lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
708         lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
709         lh->lh_sequence = cpu_to_be64(seq);
710         lh->lh_flags = cpu_to_be32(flags);
711         lh->lh_tail = cpu_to_be32(tail);
712         lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
713         hash = ~crc32(~0, lh, LH_V1_SIZE);
714         lh->lh_hash = cpu_to_be32(hash);
715
716         ktime_get_coarse_real_ts64(&tv);
717         lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
718         lh->lh_sec = cpu_to_be64(tv.tv_sec);
719         addr = gfs2_log_bmap(sdp);
720         lh->lh_addr = cpu_to_be64(addr);
721         lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
722
723         /* We may only write local statfs, quota, etc., when writing to our
724            own journal. The values are left 0 when recovering a journal
725            different from our own. */
726         if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
727                 lh->lh_statfs_addr =
728                         cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
729                 lh->lh_quota_addr =
730                         cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
731
732                 spin_lock(&sdp->sd_statfs_spin);
733                 lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
734                 lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
735                 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
736                 spin_unlock(&sdp->sd_statfs_spin);
737         }
738
739         BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
740
741         crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
742                      sb->s_blocksize - LH_V1_SIZE - 4);
743         lh->lh_crc = cpu_to_be32(crc);
744
745         gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
746         gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
747         log_flush_wait(sdp);
748 }
749
750 /**
751  * log_write_header - Get and initialize a journal header buffer
752  * @sdp: The GFS2 superblock
753  * @flags: The log header flags, including log header origin
754  *
755  * Returns: the initialized log buffer descriptor
756  */
757
758 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
759 {
760         unsigned int tail;
761         int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
762         enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
763
764         gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
765         tail = current_tail(sdp);
766
767         if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
768                 gfs2_ordered_wait(sdp);
769                 log_flush_wait(sdp);
770                 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
771         }
772         sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
773         gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
774                               flags, op_flags);
775
776         if (sdp->sd_log_tail != tail)
777                 log_pull_tail(sdp, tail);
778 }
779
780 /**
781  * gfs2_log_flush - flush incore transaction(s)
782  * @sdp: the filesystem
783  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
784  * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
785  *
786  */
787
788 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
789 {
790         struct gfs2_trans *tr;
791         enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
792
793         down_write(&sdp->sd_log_flush_lock);
794
795         /* Log might have been flushed while we waited for the flush lock */
796         if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
797                 up_write(&sdp->sd_log_flush_lock);
798                 return;
799         }
800         trace_gfs2_log_flush(sdp, 1, flags);
801
802         if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
803                 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
804
805         sdp->sd_log_flush_head = sdp->sd_log_head;
806         tr = sdp->sd_log_tr;
807         if (tr) {
808                 sdp->sd_log_tr = NULL;
809                 tr->tr_first = sdp->sd_log_flush_head;
810                 if (unlikely (state == SFS_FROZEN))
811                         gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
812         }
813
814         if (unlikely(state == SFS_FROZEN))
815                 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
816         gfs2_assert_withdraw(sdp,
817                         sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
818
819         gfs2_ordered_write(sdp);
820         lops_before_commit(sdp, tr);
821         gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
822
823         if (sdp->sd_log_head != sdp->sd_log_flush_head) {
824                 log_flush_wait(sdp);
825                 log_write_header(sdp, flags);
826         } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
827                 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
828                 trace_gfs2_log_blocks(sdp, -1);
829                 log_write_header(sdp, flags);
830         }
831         lops_after_commit(sdp, tr);
832
833         gfs2_log_lock(sdp);
834         sdp->sd_log_head = sdp->sd_log_flush_head;
835         sdp->sd_log_blks_reserved = 0;
836         sdp->sd_log_commited_revoke = 0;
837
838         spin_lock(&sdp->sd_ail_lock);
839         if (tr && !list_empty(&tr->tr_ail1_list)) {
840                 list_add(&tr->tr_list, &sdp->sd_ail1_list);
841                 tr = NULL;
842         }
843         spin_unlock(&sdp->sd_ail_lock);
844         gfs2_log_unlock(sdp);
845
846         if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
847                 if (!sdp->sd_log_idle) {
848                         for (;;) {
849                                 gfs2_ail1_start(sdp);
850                                 gfs2_ail1_wait(sdp);
851                                 if (gfs2_ail1_empty(sdp))
852                                         break;
853                         }
854                         atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
855                         trace_gfs2_log_blocks(sdp, -1);
856                         log_write_header(sdp, flags);
857                         sdp->sd_log_head = sdp->sd_log_flush_head;
858                 }
859                 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
860                              GFS2_LOG_HEAD_FLUSH_FREEZE))
861                         gfs2_log_shutdown(sdp);
862                 if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
863                         atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
864         }
865
866         trace_gfs2_log_flush(sdp, 0, flags);
867         up_write(&sdp->sd_log_flush_lock);
868
869         kfree(tr);
870 }
871
872 /**
873  * gfs2_merge_trans - Merge a new transaction into a cached transaction
874  * @old: Original transaction to be expanded
875  * @new: New transaction to be merged
876  */
877
878 static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
879 {
880         struct gfs2_trans *old = sdp->sd_log_tr;
881
882         WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
883
884         old->tr_num_buf_new     += new->tr_num_buf_new;
885         old->tr_num_databuf_new += new->tr_num_databuf_new;
886         old->tr_num_buf_rm      += new->tr_num_buf_rm;
887         old->tr_num_databuf_rm  += new->tr_num_databuf_rm;
888         old->tr_num_revoke      += new->tr_num_revoke;
889         old->tr_num_revoke_rm   += new->tr_num_revoke_rm;
890
891         list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
892         list_splice_tail_init(&new->tr_buf, &old->tr_buf);
893
894         spin_lock(&sdp->sd_ail_lock);
895         list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
896         list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
897         spin_unlock(&sdp->sd_ail_lock);
898 }
899
900 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
901 {
902         unsigned int reserved;
903         unsigned int unused;
904         unsigned int maxres;
905
906         gfs2_log_lock(sdp);
907
908         if (sdp->sd_log_tr) {
909                 gfs2_merge_trans(sdp, tr);
910         } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
911                 gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
912                 sdp->sd_log_tr = tr;
913                 set_bit(TR_ATTACHED, &tr->tr_flags);
914         }
915
916         sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
917         reserved = calc_reserved(sdp);
918         maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
919         gfs2_assert_withdraw(sdp, maxres >= reserved);
920         unused = maxres - reserved;
921         atomic_add(unused, &sdp->sd_log_blks_free);
922         trace_gfs2_log_blocks(sdp, unused);
923         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
924                              sdp->sd_jdesc->jd_blocks);
925         sdp->sd_log_blks_reserved = reserved;
926
927         gfs2_log_unlock(sdp);
928 }
929
930 /**
931  * gfs2_log_commit - Commit a transaction to the log
932  * @sdp: the filesystem
933  * @tr: the transaction
934  *
935  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
936  * or the total number of used blocks (pinned blocks plus AIL blocks)
937  * is greater than thresh2.
938  *
939  * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
940  * journal size.
941  *
942  * Returns: errno
943  */
944
945 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
946 {
947         log_refund(sdp, tr);
948
949         if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
950             ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
951             atomic_read(&sdp->sd_log_thresh2)))
952                 wake_up(&sdp->sd_logd_waitq);
953 }
954
955 /**
956  * gfs2_log_shutdown - write a shutdown header into a journal
957  * @sdp: the filesystem
958  *
959  */
960
961 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
962 {
963         gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
964         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
965         gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
966
967         sdp->sd_log_flush_head = sdp->sd_log_head;
968
969         log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
970
971         gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
972         gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
973
974         sdp->sd_log_head = sdp->sd_log_flush_head;
975         sdp->sd_log_tail = sdp->sd_log_head;
976 }
977
978 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
979 {
980         return (atomic_read(&sdp->sd_log_pinned) +
981                 atomic_read(&sdp->sd_log_blks_needed) >=
982                 atomic_read(&sdp->sd_log_thresh1));
983 }
984
985 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
986 {
987         unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
988
989         if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
990                 return 1;
991
992         return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
993                 atomic_read(&sdp->sd_log_thresh2);
994 }
995
996 /**
997  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
998  * @sdp: Pointer to GFS2 superblock
999  *
1000  * Also, periodically check to make sure that we're using the most recent
1001  * journal index.
1002  */
1003
1004 int gfs2_logd(void *data)
1005 {
1006         struct gfs2_sbd *sdp = data;
1007         unsigned long t = 1;
1008         DEFINE_WAIT(wait);
1009         bool did_flush;
1010
1011         while (!kthread_should_stop()) {
1012
1013                 /* Check for errors writing to the journal */
1014                 if (sdp->sd_log_error) {
1015                         gfs2_lm_withdraw(sdp,
1016                                          "GFS2: fsid=%s: error %d: "
1017                                          "withdrawing the file system to "
1018                                          "prevent further damage.\n",
1019                                          sdp->sd_fsname, sdp->sd_log_error);
1020                 }
1021
1022                 did_flush = false;
1023                 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1024                         gfs2_ail1_empty(sdp);
1025                         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1026                                        GFS2_LFC_LOGD_JFLUSH_REQD);
1027                         did_flush = true;
1028                 }
1029
1030                 if (gfs2_ail_flush_reqd(sdp)) {
1031                         gfs2_ail1_start(sdp);
1032                         gfs2_ail1_wait(sdp);
1033                         gfs2_ail1_empty(sdp);
1034                         gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1035                                        GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1036                         did_flush = true;
1037                 }
1038
1039                 if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1040                         wake_up(&sdp->sd_log_waitq);
1041
1042                 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1043
1044                 try_to_freeze();
1045
1046                 do {
1047                         prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1048                                         TASK_INTERRUPTIBLE);
1049                         if (!gfs2_ail_flush_reqd(sdp) &&
1050                             !gfs2_jrnl_flush_reqd(sdp) &&
1051                             !kthread_should_stop())
1052                                 t = schedule_timeout(t);
1053                 } while(t && !gfs2_ail_flush_reqd(sdp) &&
1054                         !gfs2_jrnl_flush_reqd(sdp) &&
1055                         !kthread_should_stop());
1056                 finish_wait(&sdp->sd_logd_waitq, &wait);
1057         }
1058
1059         return 0;
1060 }
1061