2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/prefetch.h>
19 #include <linux/blkdev.h>
20 #include <linux/rbtree.h>
21 #include <linux/random.h>
36 #include "trace_gfs2.h"
38 #define BFITNOENT ((u32)~0)
39 #define NO_BLOCK ((u64)~0)
41 #if BITS_PER_LONG == 32
42 #define LBITMASK (0x55555555UL)
43 #define LBITSKIP55 (0x55555555UL)
44 #define LBITSKIP00 (0x00000000UL)
46 #define LBITMASK (0x5555555555555555UL)
47 #define LBITSKIP55 (0x5555555555555555UL)
48 #define LBITSKIP00 (0x0000000000000000UL)
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
54 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
67 static const char valid_change[16] = {
75 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
76 const struct gfs2_inode *ip, bool nowrap);
80 * gfs2_setbit - Set a bit in the bitmaps
81 * @rbm: The position of the bit to set
82 * @do_clone: Also set the clone bitmap, if it exists
83 * @new_state: the new state of the block
87 static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
88 unsigned char new_state)
90 unsigned char *byte1, *byte2, *end, cur_state;
91 struct gfs2_bitmap *bi = rbm_bi(rbm);
92 unsigned int buflen = bi->bi_len;
93 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
95 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
96 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
100 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
102 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
103 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
104 rbm->offset, cur_state, new_state);
105 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
106 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
107 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
108 bi->bi_offset, bi->bi_len);
110 gfs2_consist_rgrpd(rbm->rgd);
113 *byte1 ^= (cur_state ^ new_state) << bit;
115 if (do_clone && bi->bi_clone) {
116 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
117 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
118 *byte2 ^= (cur_state ^ new_state) << bit;
123 * gfs2_testbit - test a bit in the bitmaps
124 * @rbm: The bit to test
126 * Returns: The two bit block state of the requested bit
129 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
131 struct gfs2_bitmap *bi = rbm_bi(rbm);
132 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
136 byte = buffer + (rbm->offset / GFS2_NBBY);
137 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
139 return (*byte >> bit) & GFS2_BIT_MASK;
144 * @ptr: Pointer to bitmap data
145 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
146 * @state: The state we are searching for
148 * We xor the bitmap data with a patter which is the bitwise opposite
149 * of what we are looking for, this gives rise to a pattern of ones
150 * wherever there is a match. Since we have two bits per entry, we
151 * take this pattern, shift it down by one place and then and it with
152 * the original. All the even bit positions (0,2,4, etc) then represent
153 * successful matches, so we mask with 0x55555..... to remove the unwanted
156 * This allows searching of a whole u64 at once (32 blocks) with a
157 * single test (on 64 bit arches).
160 static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
163 static const u64 search[] = {
164 [0] = 0xffffffffffffffffULL,
165 [1] = 0xaaaaaaaaaaaaaaaaULL,
166 [2] = 0x5555555555555555ULL,
167 [3] = 0x0000000000000000ULL,
169 tmp = le64_to_cpu(*ptr) ^ search[state];
176 * rs_cmp - multi-block reservation range compare
177 * @blk: absolute file system block number of the new reservation
178 * @len: number of blocks in the new reservation
179 * @rs: existing reservation to compare against
181 * returns: 1 if the block range is beyond the reach of the reservation
182 * -1 if the block range is before the start of the reservation
183 * 0 if the block range overlaps with the reservation
185 static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
187 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
189 if (blk >= startblk + rs->rs_free)
191 if (blk + len - 1 < startblk)
197 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
198 * a block in a given allocation state.
199 * @buf: the buffer that holds the bitmaps
200 * @len: the length (in bytes) of the buffer
201 * @goal: start search at this block's bit-pair (within @buffer)
202 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
204 * Scope of @goal and returned block number is only within this bitmap buffer,
205 * not entire rgrp or filesystem. @buffer will be offset from the actual
206 * beginning of a bitmap block buffer, skipping any header structures, but
207 * headers are always a multiple of 64 bits long so that the buffer is
208 * always aligned to a 64 bit boundary.
210 * The size of the buffer is in bytes, but is it assumed that it is
211 * always ok to read a complete multiple of 64 bits at the end
212 * of the block in case the end is no aligned to a natural boundary.
214 * Return: the block number (bitmap buffer scope) that was found
217 static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
220 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
221 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
222 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
224 u64 mask = 0x5555555555555555ULL;
227 /* Mask off bits we don't care about at the start of the search */
229 tmp = gfs2_bit_search(ptr, mask, state);
231 while(tmp == 0 && ptr < end) {
232 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
235 /* Mask off any bits which are more than len bytes from the start */
236 if (ptr == end && (len & (sizeof(u64) - 1)))
237 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
238 /* Didn't find anything, so return */
243 bit /= 2; /* two bits per entry in the bitmap */
244 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
248 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
249 * @rbm: The rbm with rgd already set correctly
250 * @block: The block number (filesystem relative)
252 * This sets the bi and offset members of an rbm based on a
253 * resource group and a filesystem relative block number. The
254 * resource group must be set in the rbm on entry, the bi and
255 * offset members will be set by this function.
257 * Returns: 0 on success, or an error code
260 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
262 u64 rblock = block - rbm->rgd->rd_data0;
264 if (WARN_ON_ONCE(rblock > UINT_MAX))
266 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
270 rbm->offset = (u32)(rblock);
271 /* Check if the block is within the first block */
272 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
275 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
276 rbm->offset += (sizeof(struct gfs2_rgrp) -
277 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
278 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
279 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
284 * gfs2_rbm_incr - increment an rbm structure
285 * @rbm: The rbm with rgd already set correctly
287 * This function takes an existing rbm structure and increments it to the next
288 * viable block offset.
290 * Returns: If incrementing the offset would cause the rbm to go past the
291 * end of the rgrp, true is returned, otherwise false.
295 static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
297 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
301 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
310 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
311 * @rbm: Position to search (value/result)
312 * @n_unaligned: Number of unaligned blocks to check
313 * @len: Decremented for each block found (terminate on zero)
315 * Returns: true if a non-free block is encountered
318 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
323 for (n = 0; n < n_unaligned; n++) {
324 res = gfs2_testbit(rbm);
325 if (res != GFS2_BLKST_FREE)
330 if (gfs2_rbm_incr(rbm))
338 * gfs2_free_extlen - Return extent length of free blocks
339 * @rrbm: Starting position
340 * @len: Max length to check
342 * Starting at the block specified by the rbm, see how many free blocks
343 * there are, not reading more than len blocks ahead. This can be done
344 * using memchr_inv when the blocks are byte aligned, but has to be done
345 * on a block by block basis in case of unaligned blocks. Also this
346 * function can cope with bitmap boundaries (although it must stop on
347 * a resource group boundary)
349 * Returns: Number of free blocks in the extent
352 static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
354 struct gfs2_rbm rbm = *rrbm;
355 u32 n_unaligned = rbm.offset & 3;
359 u8 *ptr, *start, *end;
361 struct gfs2_bitmap *bi;
364 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
367 n_unaligned = len & 3;
368 /* Start is now byte aligned */
371 start = bi->bi_bh->b_data;
373 start = bi->bi_clone;
374 end = start + bi->bi_bh->b_size;
375 start += bi->bi_offset;
376 BUG_ON(rbm.offset & 3);
377 start += (rbm.offset / GFS2_NBBY);
378 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
379 ptr = memchr_inv(start, 0, bytes);
380 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
381 chunk_size *= GFS2_NBBY;
382 BUG_ON(len < chunk_size);
384 block = gfs2_rbm_to_block(&rbm);
385 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
393 n_unaligned = len & 3;
396 /* Deal with any bits left over at the end */
398 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
404 * gfs2_bitcount - count the number of bits in a certain state
405 * @rgd: the resource group descriptor
406 * @buffer: the buffer that holds the bitmaps
407 * @buflen: the length (in bytes) of the buffer
408 * @state: the state of the block we're looking for
410 * Returns: The number of bits
413 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
414 unsigned int buflen, u8 state)
416 const u8 *byte = buffer;
417 const u8 *end = buffer + buflen;
418 const u8 state1 = state << 2;
419 const u8 state2 = state << 4;
420 const u8 state3 = state << 6;
423 for (; byte < end; byte++) {
424 if (((*byte) & 0x03) == state)
426 if (((*byte) & 0x0C) == state1)
428 if (((*byte) & 0x30) == state2)
430 if (((*byte) & 0xC0) == state3)
438 * gfs2_rgrp_verify - Verify that a resource group is consistent
443 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
445 struct gfs2_sbd *sdp = rgd->rd_sbd;
446 struct gfs2_bitmap *bi = NULL;
447 u32 length = rgd->rd_length;
451 memset(count, 0, 4 * sizeof(u32));
453 /* Count # blocks in each of 4 possible allocation states */
454 for (buf = 0; buf < length; buf++) {
455 bi = rgd->rd_bits + buf;
456 for (x = 0; x < 4; x++)
457 count[x] += gfs2_bitcount(rgd,
463 if (count[0] != rgd->rd_free) {
464 if (gfs2_consist_rgrpd(rgd))
465 fs_err(sdp, "free data mismatch: %u != %u\n",
466 count[0], rgd->rd_free);
470 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
471 if (count[1] != tmp) {
472 if (gfs2_consist_rgrpd(rgd))
473 fs_err(sdp, "used data mismatch: %u != %u\n",
478 if (count[2] + count[3] != rgd->rd_dinodes) {
479 if (gfs2_consist_rgrpd(rgd))
480 fs_err(sdp, "used metadata mismatch: %u != %u\n",
481 count[2] + count[3], rgd->rd_dinodes);
487 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
488 * @sdp: The GFS2 superblock
489 * @blk: The data block number
490 * @exact: True if this needs to be an exact match
492 * Returns: The resource group, or NULL if not found
495 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
497 struct rb_node *n, *next;
498 struct gfs2_rgrpd *cur;
500 spin_lock(&sdp->sd_rindex_spin);
501 n = sdp->sd_rindex_tree.rb_node;
503 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
505 if (blk < cur->rd_addr)
507 else if (blk >= cur->rd_data0 + cur->rd_data)
510 spin_unlock(&sdp->sd_rindex_spin);
512 if (blk < cur->rd_addr)
514 if (blk >= cur->rd_data0 + cur->rd_data)
521 spin_unlock(&sdp->sd_rindex_spin);
527 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
528 * @sdp: The GFS2 superblock
530 * Returns: The first rgrp in the filesystem
533 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
535 const struct rb_node *n;
536 struct gfs2_rgrpd *rgd;
538 spin_lock(&sdp->sd_rindex_spin);
539 n = rb_first(&sdp->sd_rindex_tree);
540 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
541 spin_unlock(&sdp->sd_rindex_spin);
547 * gfs2_rgrpd_get_next - get the next RG
548 * @rgd: the resource group descriptor
550 * Returns: The next rgrp
553 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
555 struct gfs2_sbd *sdp = rgd->rd_sbd;
556 const struct rb_node *n;
558 spin_lock(&sdp->sd_rindex_spin);
559 n = rb_next(&rgd->rd_node);
561 n = rb_first(&sdp->sd_rindex_tree);
563 if (unlikely(&rgd->rd_node == n)) {
564 spin_unlock(&sdp->sd_rindex_spin);
567 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
568 spin_unlock(&sdp->sd_rindex_spin);
572 void check_and_update_goal(struct gfs2_inode *ip)
574 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
575 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
576 ip->i_goal = ip->i_no_addr;
579 void gfs2_free_clones(struct gfs2_rgrpd *rgd)
583 for (x = 0; x < rgd->rd_length; x++) {
584 struct gfs2_bitmap *bi = rgd->rd_bits + x;
591 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
592 * plus a quota allocations data structure, if necessary
593 * @ip: the inode for this reservation
595 int gfs2_rsqa_alloc(struct gfs2_inode *ip)
597 return gfs2_qa_alloc(ip);
600 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
602 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
603 (unsigned long long)rs->rs_inum,
604 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
605 rs->rs_rbm.offset, rs->rs_free);
609 * __rs_deltree - remove a multi-block reservation from the rgd tree
610 * @rs: The reservation to remove
613 static void __rs_deltree(struct gfs2_blkreserv *rs)
615 struct gfs2_rgrpd *rgd;
617 if (!gfs2_rs_active(rs))
620 rgd = rs->rs_rbm.rgd;
621 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
622 rb_erase(&rs->rs_node, &rgd->rd_rstree);
623 RB_CLEAR_NODE(&rs->rs_node);
626 u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
628 struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
629 struct gfs2_bitmap *start, *last;
631 /* return reserved blocks to the rgrp */
632 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
633 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
634 /* The rgrp extent failure point is likely not to increase;
635 it will only do so if the freed blocks are somehow
636 contiguous with a span of free blocks that follows. Still,
637 it will force the number to be recalculated later. */
638 rgd->rd_extfail_pt += rs->rs_free;
640 if (gfs2_rbm_from_block(&last_rbm, last_block))
642 start = rbm_bi(&rs->rs_rbm);
643 last = rbm_bi(&last_rbm);
645 clear_bit(GBF_FULL, &start->bi_flags);
646 while (start++ != last);
651 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
652 * @rs: The reservation to remove
655 void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
657 struct gfs2_rgrpd *rgd;
659 rgd = rs->rs_rbm.rgd;
661 spin_lock(&rgd->rd_rsspin);
664 spin_unlock(&rgd->rd_rsspin);
669 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
670 * @ip: The inode for this reservation
671 * @wcount: The inode's write count, or NULL
674 void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
676 down_write(&ip->i_rw_mutex);
677 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
678 gfs2_rs_deltree(&ip->i_res);
679 up_write(&ip->i_rw_mutex);
680 gfs2_qa_delete(ip, wcount);
684 * return_all_reservations - return all reserved blocks back to the rgrp.
685 * @rgd: the rgrp that needs its space back
687 * We previously reserved a bunch of blocks for allocation. Now we need to
688 * give them back. This leave the reservation structures in tact, but removes
689 * all of their corresponding "no-fly zones".
691 static void return_all_reservations(struct gfs2_rgrpd *rgd)
694 struct gfs2_blkreserv *rs;
696 spin_lock(&rgd->rd_rsspin);
697 while ((n = rb_first(&rgd->rd_rstree))) {
698 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
701 spin_unlock(&rgd->rd_rsspin);
704 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
707 struct gfs2_rgrpd *rgd;
708 struct gfs2_glock *gl;
710 while ((n = rb_first(&sdp->sd_rindex_tree))) {
711 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
714 rb_erase(n, &sdp->sd_rindex_tree);
717 glock_clear_object(gl, rgd);
718 gfs2_rgrp_brelse(rgd);
722 gfs2_free_clones(rgd);
723 return_all_reservations(rgd);
726 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
730 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
732 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
733 pr_info("ri_length = %u\n", rgd->rd_length);
734 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
735 pr_info("ri_data = %u\n", rgd->rd_data);
736 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
740 * gfs2_compute_bitstructs - Compute the bitmap sizes
741 * @rgd: The resource group descriptor
743 * Calculates bitmap descriptors, one for each block that contains bitmap data
748 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
750 struct gfs2_sbd *sdp = rgd->rd_sbd;
751 struct gfs2_bitmap *bi;
752 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
753 u32 bytes_left, bytes;
759 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
763 bytes_left = rgd->rd_bitbytes;
765 for (x = 0; x < length; x++) {
766 bi = rgd->rd_bits + x;
769 /* small rgrp; bitmap stored completely in header block */
772 bi->bi_offset = sizeof(struct gfs2_rgrp);
775 bi->bi_blocks = bytes * GFS2_NBBY;
778 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
779 bi->bi_offset = sizeof(struct gfs2_rgrp);
782 bi->bi_blocks = bytes * GFS2_NBBY;
784 } else if (x + 1 == length) {
786 bi->bi_offset = sizeof(struct gfs2_meta_header);
787 bi->bi_start = rgd->rd_bitbytes - bytes_left;
789 bi->bi_blocks = bytes * GFS2_NBBY;
792 bytes = sdp->sd_sb.sb_bsize -
793 sizeof(struct gfs2_meta_header);
794 bi->bi_offset = sizeof(struct gfs2_meta_header);
795 bi->bi_start = rgd->rd_bitbytes - bytes_left;
797 bi->bi_blocks = bytes * GFS2_NBBY;
804 gfs2_consist_rgrpd(rgd);
807 bi = rgd->rd_bits + (length - 1);
808 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
809 if (gfs2_consist_rgrpd(rgd)) {
810 gfs2_rindex_print(rgd);
811 fs_err(sdp, "start=%u len=%u offset=%u\n",
812 bi->bi_start, bi->bi_len, bi->bi_offset);
821 * gfs2_ri_total - Total up the file system space, according to the rindex.
822 * @sdp: the filesystem
825 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
828 struct inode *inode = sdp->sd_rindex;
829 struct gfs2_inode *ip = GFS2_I(inode);
830 char buf[sizeof(struct gfs2_rindex)];
833 for (rgrps = 0;; rgrps++) {
834 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
836 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
838 error = gfs2_internal_read(ip, buf, &pos,
839 sizeof(struct gfs2_rindex));
840 if (error != sizeof(struct gfs2_rindex))
842 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
847 static int rgd_insert(struct gfs2_rgrpd *rgd)
849 struct gfs2_sbd *sdp = rgd->rd_sbd;
850 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
852 /* Figure out where to put new node */
854 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
858 if (rgd->rd_addr < cur->rd_addr)
859 newn = &((*newn)->rb_left);
860 else if (rgd->rd_addr > cur->rd_addr)
861 newn = &((*newn)->rb_right);
866 rb_link_node(&rgd->rd_node, parent, newn);
867 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
873 * read_rindex_entry - Pull in a new resource index entry from the disk
874 * @ip: Pointer to the rindex inode
876 * Returns: 0 on success, > 0 on EOF, error code otherwise
879 static int read_rindex_entry(struct gfs2_inode *ip)
881 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
882 const unsigned bsize = sdp->sd_sb.sb_bsize;
883 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
884 struct gfs2_rindex buf;
886 struct gfs2_rgrpd *rgd;
888 if (pos >= i_size_read(&ip->i_inode))
891 error = gfs2_internal_read(ip, (char *)&buf, &pos,
892 sizeof(struct gfs2_rindex));
894 if (error != sizeof(struct gfs2_rindex))
895 return (error == 0) ? 1 : error;
897 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
903 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
904 rgd->rd_length = be32_to_cpu(buf.ri_length);
905 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
906 rgd->rd_data = be32_to_cpu(buf.ri_data);
907 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
908 spin_lock_init(&rgd->rd_rsspin);
910 error = compute_bitstructs(rgd);
914 error = gfs2_glock_get(sdp, rgd->rd_addr,
915 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
919 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
920 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
921 if (rgd->rd_data > sdp->sd_max_rg_data)
922 sdp->sd_max_rg_data = rgd->rd_data;
923 spin_lock(&sdp->sd_rindex_spin);
924 error = rgd_insert(rgd);
925 spin_unlock(&sdp->sd_rindex_spin);
927 glock_set_object(rgd->rd_gl, rgd);
928 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
929 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
930 rgd->rd_length) * bsize) - 1;
934 error = 0; /* someone else read in the rgrp; free it and ignore it */
935 gfs2_glock_put(rgd->rd_gl);
940 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
945 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
946 * @sdp: the GFS2 superblock
948 * The purpose of this function is to select a subset of the resource groups
949 * and mark them as PREFERRED. We do it in such a way that each node prefers
950 * to use a unique set of rgrps to minimize glock contention.
952 static void set_rgrp_preferences(struct gfs2_sbd *sdp)
954 struct gfs2_rgrpd *rgd, *first;
957 /* Skip an initial number of rgrps, based on this node's journal ID.
958 That should start each node out on its own set. */
959 rgd = gfs2_rgrpd_get_first(sdp);
960 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
961 rgd = gfs2_rgrpd_get_next(rgd);
965 rgd->rd_flags |= GFS2_RDF_PREFERRED;
966 for (i = 0; i < sdp->sd_journals; i++) {
967 rgd = gfs2_rgrpd_get_next(rgd);
968 if (!rgd || rgd == first)
971 } while (rgd && rgd != first);
975 * gfs2_ri_update - Pull in a new resource index from the disk
976 * @ip: pointer to the rindex inode
978 * Returns: 0 on successful update, error code otherwise
981 static int gfs2_ri_update(struct gfs2_inode *ip)
983 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
987 error = read_rindex_entry(ip);
988 } while (error == 0);
993 if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
994 fs_err(sdp, "no resource groups found in the file system.\n");
997 set_rgrp_preferences(sdp);
999 sdp->sd_rindex_uptodate = 1;
1004 * gfs2_rindex_update - Update the rindex if required
1005 * @sdp: The GFS2 superblock
1007 * We grab a lock on the rindex inode to make sure that it doesn't
1008 * change whilst we are performing an operation. We keep this lock
1009 * for quite long periods of time compared to other locks. This
1010 * doesn't matter, since it is shared and it is very, very rarely
1011 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1013 * This makes sure that we're using the latest copy of the resource index
1014 * special file, which might have been updated if someone expanded the
1015 * filesystem (via gfs2_grow utility), which adds new resource groups.
1017 * Returns: 0 on succeess, error code otherwise
1020 int gfs2_rindex_update(struct gfs2_sbd *sdp)
1022 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1023 struct gfs2_glock *gl = ip->i_gl;
1024 struct gfs2_holder ri_gh;
1026 int unlock_required = 0;
1028 /* Read new copy from disk if we don't have the latest */
1029 if (!sdp->sd_rindex_uptodate) {
1030 if (!gfs2_glock_is_locked_by_me(gl)) {
1031 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1034 unlock_required = 1;
1036 if (!sdp->sd_rindex_uptodate)
1037 error = gfs2_ri_update(ip);
1038 if (unlock_required)
1039 gfs2_glock_dq_uninit(&ri_gh);
1045 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1047 const struct gfs2_rgrp *str = buf;
1050 rg_flags = be32_to_cpu(str->rg_flags);
1051 rg_flags &= ~GFS2_RDF_MASK;
1052 rgd->rd_flags &= GFS2_RDF_MASK;
1053 rgd->rd_flags |= rg_flags;
1054 rgd->rd_free = be32_to_cpu(str->rg_free);
1055 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1056 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1059 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1061 struct gfs2_rgrp *str = buf;
1063 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1064 str->rg_free = cpu_to_be32(rgd->rd_free);
1065 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1066 str->__pad = cpu_to_be32(0);
1067 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1068 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1071 static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1073 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1074 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1076 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1077 rgl->rl_dinodes != str->rg_dinodes ||
1078 rgl->rl_igeneration != str->rg_igeneration)
1083 static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1085 const struct gfs2_rgrp *str = buf;
1087 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1088 rgl->rl_flags = str->rg_flags;
1089 rgl->rl_free = str->rg_free;
1090 rgl->rl_dinodes = str->rg_dinodes;
1091 rgl->rl_igeneration = str->rg_igeneration;
1095 static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1097 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1098 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1099 rgl->rl_unlinked = cpu_to_be32(unlinked);
1102 static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1104 struct gfs2_bitmap *bi;
1105 const u32 length = rgd->rd_length;
1106 const u8 *buffer = NULL;
1107 u32 i, goal, count = 0;
1109 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1111 buffer = bi->bi_bh->b_data + bi->bi_offset;
1112 WARN_ON(!buffer_uptodate(bi->bi_bh));
1113 while (goal < bi->bi_len * GFS2_NBBY) {
1114 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1115 GFS2_BLKST_UNLINKED);
1116 if (goal == BFITNOENT)
1128 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1129 * @rgd: the struct gfs2_rgrpd describing the RG to read in
1131 * Read in all of a Resource Group's header and bitmap blocks.
1132 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1137 static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
1139 struct gfs2_sbd *sdp = rgd->rd_sbd;
1140 struct gfs2_glock *gl = rgd->rd_gl;
1141 unsigned int length = rgd->rd_length;
1142 struct gfs2_bitmap *bi;
1146 if (rgd->rd_bits[0].bi_bh != NULL)
1149 for (x = 0; x < length; x++) {
1150 bi = rgd->rd_bits + x;
1151 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
1156 for (y = length; y--;) {
1157 bi = rgd->rd_bits + y;
1158 error = gfs2_meta_wait(sdp, bi->bi_bh);
1161 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
1162 GFS2_METATYPE_RG)) {
1168 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1169 for (x = 0; x < length; x++)
1170 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1171 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1172 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1173 rgd->rd_free_clone = rgd->rd_free;
1174 /* max out the rgrp allocation failure point */
1175 rgd->rd_extfail_pt = rgd->rd_free;
1177 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
1178 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1179 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1180 rgd->rd_bits[0].bi_bh->b_data);
1182 else if (sdp->sd_args.ar_rgrplvb) {
1183 if (!gfs2_rgrp_lvb_valid(rgd)){
1184 gfs2_consist_rgrpd(rgd);
1188 if (rgd->rd_rgl->rl_unlinked == 0)
1189 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1195 bi = rgd->rd_bits + x;
1198 gfs2_assert_warn(sdp, !bi->bi_clone);
1204 static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1208 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1211 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1212 return gfs2_rgrp_bh_get(rgd);
1214 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1215 rl_flags &= ~GFS2_RDF_MASK;
1216 rgd->rd_flags &= GFS2_RDF_MASK;
1217 rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
1218 if (rgd->rd_rgl->rl_unlinked == 0)
1219 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1220 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1221 rgd->rd_free_clone = rgd->rd_free;
1222 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1223 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1227 int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1229 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1230 struct gfs2_sbd *sdp = rgd->rd_sbd;
1232 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1234 return gfs2_rgrp_bh_get(rgd);
1238 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1239 * @rgd: The resource group
1243 void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
1245 int x, length = rgd->rd_length;
1247 for (x = 0; x < length; x++) {
1248 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1258 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1259 * @gh: The glock holder for the resource group
1263 void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1265 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1266 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1267 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1269 if (rgd && demote_requested)
1270 gfs2_rgrp_brelse(rgd);
1273 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1274 struct buffer_head *bh,
1275 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1277 struct super_block *sb = sdp->sd_vfs;
1280 sector_t nr_blks = 0;
1286 for (x = 0; x < bi->bi_len; x++) {
1287 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1288 clone += bi->bi_offset;
1291 const u8 *orig = bh->b_data + bi->bi_offset + x;
1292 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1294 diff = ~(*clone | (*clone >> 1));
1299 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1303 goto start_new_extent;
1304 if ((start + nr_blks) != blk) {
1305 if (nr_blks >= minlen) {
1306 rv = sb_issue_discard(sb,
1323 if (nr_blks >= minlen) {
1324 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
1330 *ptrimmed = trimmed;
1334 if (sdp->sd_args.ar_discard)
1335 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1336 sdp->sd_args.ar_discard = 0;
1341 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1342 * @filp: Any file on the filesystem
1343 * @argp: Pointer to the arguments (also used to pass result)
1345 * Returns: 0 on success, otherwise error code
1348 int gfs2_fitrim(struct file *filp, void __user *argp)
1350 struct inode *inode = file_inode(filp);
1351 struct gfs2_sbd *sdp = GFS2_SB(inode);
1352 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1353 struct buffer_head *bh;
1354 struct gfs2_rgrpd *rgd;
1355 struct gfs2_rgrpd *rgd_end;
1356 struct gfs2_holder gh;
1357 struct fstrim_range r;
1361 u64 start, end, minlen;
1363 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
1365 if (!capable(CAP_SYS_ADMIN))
1368 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1371 if (!blk_queue_discard(q))
1374 if (copy_from_user(&r, argp, sizeof(r)))
1377 ret = gfs2_rindex_update(sdp);
1381 start = r.start >> bs_shift;
1382 end = start + (r.len >> bs_shift);
1383 minlen = max_t(u64, r.minlen,
1384 q->limits.discard_granularity) >> bs_shift;
1386 if (end <= start || minlen > sdp->sd_max_rg_data)
1389 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1390 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1392 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1393 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1394 return -EINVAL; /* start is beyond the end of the fs */
1398 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1402 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1403 /* Trim each bitmap in the rgrp */
1404 for (x = 0; x < rgd->rd_length; x++) {
1405 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1406 ret = gfs2_rgrp_send_discards(sdp,
1407 rgd->rd_data0, NULL, bi, minlen,
1410 gfs2_glock_dq_uninit(&gh);
1416 /* Mark rgrp as having been trimmed */
1417 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1419 bh = rgd->rd_bits[0].bi_bh;
1420 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1421 gfs2_trans_add_meta(rgd->rd_gl, bh);
1422 gfs2_rgrp_out(rgd, bh->b_data);
1423 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
1424 gfs2_trans_end(sdp);
1427 gfs2_glock_dq_uninit(&gh);
1432 rgd = gfs2_rgrpd_get_next(rgd);
1436 r.len = trimmed << bs_shift;
1437 if (copy_to_user(argp, &r, sizeof(r)))
1444 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1445 * @ip: the inode structure
1448 static void rs_insert(struct gfs2_inode *ip)
1450 struct rb_node **newn, *parent = NULL;
1452 struct gfs2_blkreserv *rs = &ip->i_res;
1453 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1454 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1456 BUG_ON(gfs2_rs_active(rs));
1458 spin_lock(&rgd->rd_rsspin);
1459 newn = &rgd->rd_rstree.rb_node;
1461 struct gfs2_blkreserv *cur =
1462 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1465 rc = rs_cmp(fsblock, rs->rs_free, cur);
1467 newn = &((*newn)->rb_right);
1469 newn = &((*newn)->rb_left);
1471 spin_unlock(&rgd->rd_rsspin);
1477 rb_link_node(&rs->rs_node, parent, newn);
1478 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1480 /* Do our rgrp accounting for the reservation */
1481 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1482 spin_unlock(&rgd->rd_rsspin);
1483 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1487 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1488 * @rgd: the resource group descriptor
1489 * @ip: pointer to the inode for which we're reserving blocks
1490 * @ap: the allocation parameters
1494 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1495 const struct gfs2_alloc_parms *ap)
1497 struct gfs2_rbm rbm = { .rgd = rgd, };
1499 struct gfs2_blkreserv *rs = &ip->i_res;
1501 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1503 struct inode *inode = &ip->i_inode;
1505 if (S_ISDIR(inode->i_mode))
1508 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1509 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1511 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1514 /* Find bitmap block that contains bits for goal block */
1515 if (rgrp_contains_block(rgd, ip->i_goal))
1518 goal = rgd->rd_last_alloc + rgd->rd_data0;
1520 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1523 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
1526 rs->rs_free = extlen;
1527 rs->rs_inum = ip->i_no_addr;
1530 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1531 rgd->rd_last_alloc = 0;
1536 * gfs2_next_unreserved_block - Return next block that is not reserved
1537 * @rgd: The resource group
1538 * @block: The starting block
1539 * @length: The required length
1540 * @ip: Ignore any reservations for this inode
1542 * If the block does not appear in any reservation, then return the
1543 * block number unchanged. If it does appear in the reservation, then
1544 * keep looking through the tree of reservations in order to find the
1545 * first block number which is not reserved.
1548 static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1550 const struct gfs2_inode *ip)
1552 struct gfs2_blkreserv *rs;
1556 spin_lock(&rgd->rd_rsspin);
1557 n = rgd->rd_rstree.rb_node;
1559 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1560 rc = rs_cmp(block, length, rs);
1570 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
1571 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1575 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1579 spin_unlock(&rgd->rd_rsspin);
1584 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1585 * @rbm: The current position in the resource group
1586 * @ip: The inode for which we are searching for blocks
1587 * @minext: The minimum extent length
1588 * @maxext: A pointer to the maximum extent structure
1590 * This checks the current position in the rgrp to see whether there is
1591 * a reservation covering this block. If not then this function is a
1592 * no-op. If there is, then the position is moved to the end of the
1593 * contiguous reservation(s) so that we are pointing at the first
1594 * non-reserved block.
1596 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1599 static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1600 const struct gfs2_inode *ip,
1602 struct gfs2_extent *maxext)
1604 u64 block = gfs2_rbm_to_block(rbm);
1610 * If we have a minimum extent length, then skip over any extent
1611 * which is less than the min extent length in size.
1614 extlen = gfs2_free_extlen(rbm, minext);
1615 if (extlen <= maxext->len)
1620 * Check the extent which has been found against the reservations
1621 * and skip if parts of it are already reserved
1623 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1624 if (nblock == block) {
1625 if (!minext || extlen >= minext)
1628 if (extlen > maxext->len) {
1629 maxext->len = extlen;
1633 nblock = block + extlen;
1635 ret = gfs2_rbm_from_block(rbm, nblock);
1642 * gfs2_rbm_find - Look for blocks of a particular state
1643 * @rbm: Value/result starting position and final position
1644 * @state: The state which we want to find
1645 * @minext: Pointer to the requested extent length (NULL for a single block)
1646 * This is updated to be the actual reservation size.
1647 * @ip: If set, check for reservations
1648 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1649 * around until we've reached the starting point.
1652 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1653 * has no free blocks in it.
1654 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1655 * has come up short on a free block search.
1657 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1660 static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1661 const struct gfs2_inode *ip, bool nowrap)
1663 struct buffer_head *bh;
1666 int first_bii = rbm->bii;
1667 u32 first_offset = rbm->offset;
1671 int iters = rbm->rgd->rd_length;
1673 struct gfs2_bitmap *bi;
1674 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
1676 /* If we are not starting at the beginning of a bitmap, then we
1677 * need to add one to the bitmap count to ensure that we search
1678 * the starting bitmap twice.
1680 if (rbm->offset != 0)
1685 if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
1686 test_bit(GBF_FULL, &bi->bi_flags) &&
1687 (state == GFS2_BLKST_FREE))
1691 buffer = bh->b_data + bi->bi_offset;
1692 WARN_ON(!buffer_uptodate(bh));
1693 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1694 buffer = bi->bi_clone + bi->bi_offset;
1695 initial_offset = rbm->offset;
1696 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1697 if (offset == BFITNOENT)
1699 rbm->offset = offset;
1703 initial_bii = rbm->bii;
1704 ret = gfs2_reservation_check_and_update(rbm, ip,
1705 minext ? *minext : 0,
1710 n += (rbm->bii - initial_bii);
1713 if (ret == -E2BIG) {
1716 n += (rbm->bii - initial_bii);
1717 goto res_covered_end_of_rgrp;
1721 bitmap_full: /* Mark bitmap as full and fall through */
1722 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1723 set_bit(GBF_FULL, &bi->bi_flags);
1725 next_bitmap: /* Find next bitmap in the rgrp */
1728 if (rbm->bii == rbm->rgd->rd_length)
1730 res_covered_end_of_rgrp:
1731 if ((rbm->bii == 0) && nowrap)
1739 if (minext == NULL || state != GFS2_BLKST_FREE)
1742 /* If the extent was too small, and it's smaller than the smallest
1743 to have failed before, remember for future reference that it's
1744 useless to search this rgrp again for this amount or more. */
1745 if ((first_offset == 0) && (first_bii == 0) &&
1746 (*minext < rbm->rgd->rd_extfail_pt))
1747 rbm->rgd->rd_extfail_pt = *minext;
1749 /* If the maximum extent we found is big enough to fulfill the
1750 minimum requirements, use it anyway. */
1753 *minext = maxext.len;
1761 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1763 * @last_unlinked: block address of the last dinode we unlinked
1764 * @skip: block address we should explicitly not unlink
1766 * Returns: 0 if no error
1767 * The inode, if one has been found, in inode.
1770 static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1773 struct gfs2_sbd *sdp = rgd->rd_sbd;
1774 struct gfs2_glock *gl;
1775 struct gfs2_inode *ip;
1778 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1781 down_write(&sdp->sd_log_flush_lock);
1782 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1784 up_write(&sdp->sd_log_flush_lock);
1785 if (error == -ENOSPC)
1787 if (WARN_ON_ONCE(error))
1790 block = gfs2_rbm_to_block(&rbm);
1791 if (gfs2_rbm_from_block(&rbm, block + 1))
1793 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1797 *last_unlinked = block;
1799 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
1803 /* If the inode is already in cache, we can ignore it here
1804 * because the existing inode disposal code will deal with
1805 * it when all refs have gone away. Accessing gl_object like
1806 * this is not safe in general. Here it is ok because we do
1807 * not dereference the pointer, and we only need an approx
1808 * answer to whether it is NULL or not.
1812 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1817 /* Limit reclaim to sensible number of tasks */
1818 if (found > NR_CPUS)
1822 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1827 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1828 * @rgd: The rgrp in question
1829 * @loops: An indication of how picky we can be (0=very, 1=less so)
1831 * This function uses the recently added glock statistics in order to
1832 * figure out whether a parciular resource group is suffering from
1833 * contention from multiple nodes. This is done purely on the basis
1834 * of timings, since this is the only data we have to work with and
1835 * our aim here is to reject a resource group which is highly contended
1836 * but (very important) not to do this too often in order to ensure that
1837 * we do not land up introducing fragmentation by changing resource
1838 * groups when not actually required.
1840 * The calculation is fairly simple, we want to know whether the SRTTB
1841 * (i.e. smoothed round trip time for blocking operations) to acquire
1842 * the lock for this rgrp's glock is significantly greater than the
1843 * time taken for resource groups on average. We introduce a margin in
1844 * the form of the variable @var which is computed as the sum of the two
1845 * respective variences, and multiplied by a factor depending on @loops
1846 * and whether we have a lot of data to base the decision on. This is
1847 * then tested against the square difference of the means in order to
1848 * decide whether the result is statistically significant or not.
1850 * Returns: A boolean verdict on the congestion status
1853 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1855 const struct gfs2_glock *gl = rgd->rd_gl;
1856 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1857 struct gfs2_lkstats *st;
1858 u64 r_dcount, l_dcount;
1859 u64 l_srttb, a_srttb = 0;
1863 int cpu, nonzero = 0;
1866 for_each_present_cpu(cpu) {
1867 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
1868 if (st->stats[GFS2_LKS_SRTTB]) {
1869 a_srttb += st->stats[GFS2_LKS_SRTTB];
1873 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
1875 do_div(a_srttb, nonzero);
1876 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1877 var = st->stats[GFS2_LKS_SRTTVARB] +
1878 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1881 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1882 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1884 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
1887 srttb_diff = a_srttb - l_srttb;
1888 sqr_diff = srttb_diff * srttb_diff;
1891 if (l_dcount < 8 || r_dcount < 8)
1896 return ((srttb_diff < 0) && (sqr_diff > var));
1900 * gfs2_rgrp_used_recently
1901 * @rs: The block reservation with the rgrp to test
1902 * @msecs: The time limit in milliseconds
1904 * Returns: True if the rgrp glock has been used within the time limit
1906 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1911 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1912 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1914 return tdiff > (msecs * 1000 * 1000);
1917 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1919 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1922 get_random_bytes(&skip, sizeof(skip));
1923 return skip % sdp->sd_rgrps;
1926 static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1928 struct gfs2_rgrpd *rgd = *pos;
1929 struct gfs2_sbd *sdp = rgd->rd_sbd;
1931 rgd = gfs2_rgrpd_get_next(rgd);
1933 rgd = gfs2_rgrpd_get_first(sdp);
1935 if (rgd != begin) /* If we didn't wrap */
1941 * fast_to_acquire - determine if a resource group will be fast to acquire
1943 * If this is one of our preferred rgrps, it should be quicker to acquire,
1944 * because we tried to set ourselves up as dlm lock master.
1946 static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1948 struct gfs2_glock *gl = rgd->rd_gl;
1950 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1951 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1952 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1954 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1960 * gfs2_inplace_reserve - Reserve space in the filesystem
1961 * @ip: the inode to reserve space for
1962 * @ap: the allocation parameters
1964 * We try our best to find an rgrp that has at least ap->target blocks
1965 * available. After a couple of passes (loops == 2), the prospects of finding
1966 * such an rgrp diminish. At this stage, we return the first rgrp that has
1967 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1968 * the number of blocks available in the chosen rgrp.
1970 * Returns: 0 on success,
1971 * -ENOMEM if a suitable rgrp can't be found
1975 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
1977 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1978 struct gfs2_rgrpd *begin = NULL;
1979 struct gfs2_blkreserv *rs = &ip->i_res;
1980 int error = 0, rg_locked, flags = 0;
1981 u64 last_unlinked = NO_BLOCK;
1985 if (sdp->sd_args.ar_rgrplvb)
1987 if (gfs2_assert_warn(sdp, ap->target))
1989 if (gfs2_rs_active(rs)) {
1990 begin = rs->rs_rbm.rgd;
1991 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1992 rs->rs_rbm.rgd = begin = ip->i_rgd;
1994 check_and_update_goal(ip);
1995 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1997 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
1998 skip = gfs2_orlov_skip(ip);
1999 if (rs->rs_rbm.rgd == NULL)
2005 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
2009 if (!gfs2_rs_active(rs)) {
2011 !fast_to_acquire(rs->rs_rbm.rgd))
2014 gfs2_rgrp_used_recently(rs, 1000) &&
2015 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2018 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
2019 LM_ST_EXCLUSIVE, flags,
2021 if (unlikely(error))
2023 if (!gfs2_rs_active(rs) && (loops < 2) &&
2024 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2026 if (sdp->sd_args.ar_rgrplvb) {
2027 error = update_rgrp_lvb(rs->rs_rbm.rgd);
2028 if (unlikely(error)) {
2029 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2035 /* Skip unuseable resource groups */
2036 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2038 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
2041 if (sdp->sd_args.ar_rgrplvb)
2042 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2044 /* Get a reservation if we don't already have one */
2045 if (!gfs2_rs_active(rs))
2046 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
2048 /* Skip rgrps when we can't get a reservation on first pass */
2049 if (!gfs2_rs_active(rs) && (loops < 1))
2052 /* If rgrp has enough free space, use it */
2053 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2054 (loops == 2 && ap->min_target &&
2055 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
2056 ip->i_rgd = rs->rs_rbm.rgd;
2057 ap->allowed = ip->i_rgd->rd_free_clone;
2061 /* Check for unlinked inodes which can be reclaimed */
2062 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2063 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2066 /* Drop reservation, if we couldn't use reserved rgrp */
2067 if (gfs2_rs_active(rs))
2068 gfs2_rs_deltree(rs);
2070 /* Unlock rgrp if required */
2072 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2074 /* Find the next rgrp, and continue looking */
2075 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2080 /* If we've scanned all the rgrps, but found no free blocks
2081 * then this checks for some less likely conditions before
2085 /* Check that fs hasn't grown if writing to rindex */
2086 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2087 error = gfs2_ri_update(ip);
2091 /* Flushing the log may release space */
2093 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
2100 * gfs2_inplace_release - release an inplace reservation
2101 * @ip: the inode the reservation was taken out on
2103 * Release a reservation made by gfs2_inplace_reserve().
2106 void gfs2_inplace_release(struct gfs2_inode *ip)
2108 struct gfs2_blkreserv *rs = &ip->i_res;
2110 if (gfs2_holder_initialized(&rs->rs_rgd_gh))
2111 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2115 * gfs2_get_block_type - Check a block in a RG is of given type
2116 * @rgd: the resource group holding the block
2117 * @block: the block number
2119 * Returns: The block type (GFS2_BLKST_*)
2122 static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
2124 struct gfs2_rbm rbm = { .rgd = rgd, };
2127 ret = gfs2_rbm_from_block(&rbm, block);
2128 WARN_ON_ONCE(ret != 0);
2130 return gfs2_testbit(&rbm);
2135 * gfs2_alloc_extent - allocate an extent from a given bitmap
2136 * @rbm: the resource group information
2137 * @dinode: TRUE if the first block we allocate is for a dinode
2138 * @n: The extent length (value/result)
2140 * Add the bitmap buffer to the transaction.
2141 * Set the found bits to @new_state to change block's allocation state.
2143 static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
2146 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
2147 const unsigned int elen = *n;
2152 block = gfs2_rbm_to_block(rbm);
2153 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
2154 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2157 ret = gfs2_rbm_from_block(&pos, block);
2158 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
2160 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
2161 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
2168 * rgblk_free - Change alloc state of given block(s)
2169 * @sdp: the filesystem
2170 * @bstart: the start of a run of blocks to free
2171 * @blen: the length of the block run (all must lie within ONE RG!)
2172 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2174 * Returns: Resource group containing the block(s)
2177 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2178 u32 blen, unsigned char new_state)
2180 struct gfs2_rbm rbm;
2181 struct gfs2_bitmap *bi, *bi_prev = NULL;
2183 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2185 if (gfs2_consist(sdp))
2186 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
2190 gfs2_rbm_from_block(&rbm, bstart);
2193 if (bi != bi_prev) {
2194 if (!bi->bi_clone) {
2195 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2196 GFP_NOFS | __GFP_NOFAIL);
2197 memcpy(bi->bi_clone + bi->bi_offset,
2198 bi->bi_bh->b_data + bi->bi_offset,
2201 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2204 gfs2_setbit(&rbm, false, new_state);
2205 gfs2_rbm_incr(&rbm);
2212 * gfs2_rgrp_dump - print out an rgrp
2213 * @seq: The iterator
2214 * @gl: The glock in question
2218 void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
2220 struct gfs2_rgrpd *rgd = gl->gl_object;
2221 struct gfs2_blkreserv *trs;
2222 const struct rb_node *n;
2226 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2227 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
2228 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
2229 rgd->rd_reserved, rgd->rd_extfail_pt);
2230 spin_lock(&rgd->rd_rsspin);
2231 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2232 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2235 spin_unlock(&rgd->rd_rsspin);
2238 static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2240 struct gfs2_sbd *sdp = rgd->rd_sbd;
2241 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
2242 (unsigned long long)rgd->rd_addr);
2243 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2244 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2245 rgd->rd_flags |= GFS2_RDF_ERROR;
2249 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2250 * @ip: The inode we have just allocated blocks for
2251 * @rbm: The start of the allocated blocks
2252 * @len: The extent length
2254 * Adjusts a reservation after an allocation has taken place. If the
2255 * reservation does not match the allocation, or if it is now empty
2256 * then it is removed.
2259 static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2260 const struct gfs2_rbm *rbm, unsigned len)
2262 struct gfs2_blkreserv *rs = &ip->i_res;
2263 struct gfs2_rgrpd *rgd = rbm->rgd;
2268 spin_lock(&rgd->rd_rsspin);
2269 if (gfs2_rs_active(rs)) {
2270 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2271 block = gfs2_rbm_to_block(rbm);
2272 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2273 rlen = min(rs->rs_free, len);
2274 rs->rs_free -= rlen;
2275 rgd->rd_reserved -= rlen;
2276 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2277 if (rs->rs_free && !ret)
2279 /* We used up our block reservation, so we should
2280 reserve more blocks next time. */
2281 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
2286 spin_unlock(&rgd->rd_rsspin);
2290 * gfs2_set_alloc_start - Set starting point for block allocation
2291 * @rbm: The rbm which will be set to the required location
2292 * @ip: The gfs2 inode
2293 * @dinode: Flag to say if allocation includes a new inode
2295 * This sets the starting point from the reservation if one is active
2296 * otherwise it falls back to guessing a start point based on the
2297 * inode's goal block or the last allocation point in the rgrp.
2300 static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2301 const struct gfs2_inode *ip, bool dinode)
2305 if (gfs2_rs_active(&ip->i_res)) {
2306 *rbm = ip->i_res.rs_rbm;
2310 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2313 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2315 gfs2_rbm_from_block(rbm, goal);
2319 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2320 * @ip: the inode to allocate the block for
2321 * @bn: Used to return the starting block number
2322 * @nblocks: requested number of blocks/extent length (value/result)
2323 * @dinode: 1 if we're allocating a dinode block, else 0
2324 * @generation: the generation number of the inode
2326 * Returns: 0 or error
2329 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2330 bool dinode, u64 *generation)
2332 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2333 struct buffer_head *dibh;
2334 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2336 u64 block; /* block, within the file system scope */
2339 gfs2_set_alloc_start(&rbm, ip, dinode);
2340 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
2342 if (error == -ENOSPC) {
2343 gfs2_set_alloc_start(&rbm, ip, dinode);
2344 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
2347 /* Since all blocks are reserved in advance, this shouldn't happen */
2349 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
2350 (unsigned long long)ip->i_no_addr, error, *nblocks,
2351 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2352 rbm.rgd->rd_extfail_pt);
2356 gfs2_alloc_extent(&rbm, dinode, nblocks);
2357 block = gfs2_rbm_to_block(&rbm);
2358 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2359 if (gfs2_rs_active(&ip->i_res))
2360 gfs2_adjust_reservation(ip, &rbm, *nblocks);
2366 ip->i_goal = block + ndata - 1;
2367 error = gfs2_meta_inode_buffer(ip, &dibh);
2369 struct gfs2_dinode *di =
2370 (struct gfs2_dinode *)dibh->b_data;
2371 gfs2_trans_add_meta(ip->i_gl, dibh);
2372 di->di_goal_meta = di->di_goal_data =
2373 cpu_to_be64(ip->i_goal);
2377 if (rbm.rgd->rd_free < *nblocks) {
2378 pr_warn("nblocks=%u\n", *nblocks);
2382 rbm.rgd->rd_free -= *nblocks;
2384 rbm.rgd->rd_dinodes++;
2385 *generation = rbm.rgd->rd_igeneration++;
2386 if (*generation == 0)
2387 *generation = rbm.rgd->rd_igeneration++;
2390 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
2391 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2392 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
2394 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2396 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
2398 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
2400 rbm.rgd->rd_free_clone -= *nblocks;
2401 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2402 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2407 gfs2_rgrp_error(rbm.rgd);
2412 * __gfs2_free_blocks - free a contiguous run of block(s)
2413 * @ip: the inode these blocks are being freed from
2414 * @bstart: first block of a run of contiguous blocks
2415 * @blen: the length of the block run
2416 * @meta: 1 if the blocks represent metadata
2420 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
2422 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2423 struct gfs2_rgrpd *rgd;
2425 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2428 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2429 rgd->rd_free += blen;
2430 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2431 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2432 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2433 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2435 /* Directories keep their data in the metadata address space */
2436 if (meta || ip->i_depth)
2437 gfs2_meta_wipe(ip, bstart, blen);
2441 * gfs2_free_meta - free a contiguous run of data block(s)
2442 * @ip: the inode these blocks are being freed from
2443 * @bstart: first block of a run of contiguous blocks
2444 * @blen: the length of the block run
2448 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2450 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2452 __gfs2_free_blocks(ip, bstart, blen, 1);
2453 gfs2_statfs_change(sdp, 0, +blen, 0);
2454 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
2457 void gfs2_unlink_di(struct inode *inode)
2459 struct gfs2_inode *ip = GFS2_I(inode);
2460 struct gfs2_sbd *sdp = GFS2_SB(inode);
2461 struct gfs2_rgrpd *rgd;
2462 u64 blkno = ip->i_no_addr;
2464 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2467 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2468 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2469 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2470 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2471 update_rgrp_lvb_unlinked(rgd, 1);
2474 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
2476 struct gfs2_sbd *sdp = rgd->rd_sbd;
2477 struct gfs2_rgrpd *tmp_rgd;
2479 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2482 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2484 if (!rgd->rd_dinodes)
2485 gfs2_consist_rgrpd(rgd);
2489 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
2490 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2491 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2492 update_rgrp_lvb_unlinked(rgd, -1);
2494 gfs2_statfs_change(sdp, 0, +1, -1);
2498 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2500 gfs2_free_uninit_di(rgd, ip->i_no_addr);
2501 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2502 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2503 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
2507 * gfs2_check_blk_type - Check the type of a block
2508 * @sdp: The superblock
2509 * @no_addr: The block number to check
2510 * @type: The block type we are looking for
2512 * Returns: 0 if the block type matches the expected type
2513 * -ESTALE if it doesn't match
2514 * or -ve errno if something went wrong while checking
2517 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2519 struct gfs2_rgrpd *rgd;
2520 struct gfs2_holder rgd_gh;
2521 int error = -EINVAL;
2523 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2527 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2531 if (gfs2_get_block_type(rgd, no_addr) != type)
2534 gfs2_glock_dq_uninit(&rgd_gh);
2540 * gfs2_rlist_add - add a RG to a list of RGs
2542 * @rlist: the list of resource groups
2545 * Figure out what RG a block belongs to and add that RG to the list
2547 * FIXME: Don't use NOFAIL
2551 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2554 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2555 struct gfs2_rgrpd *rgd;
2556 struct gfs2_rgrpd **tmp;
2557 unsigned int new_space;
2560 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2563 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2566 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2568 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
2573 for (x = 0; x < rlist->rl_rgrps; x++)
2574 if (rlist->rl_rgd[x] == rgd)
2577 if (rlist->rl_rgrps == rlist->rl_space) {
2578 new_space = rlist->rl_space + 10;
2580 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2581 GFP_NOFS | __GFP_NOFAIL);
2583 if (rlist->rl_rgd) {
2584 memcpy(tmp, rlist->rl_rgd,
2585 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2586 kfree(rlist->rl_rgd);
2589 rlist->rl_space = new_space;
2590 rlist->rl_rgd = tmp;
2593 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2597 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2598 * and initialize an array of glock holders for them
2599 * @rlist: the list of resource groups
2600 * @state: the lock state to acquire the RG lock in
2602 * FIXME: Don't use NOFAIL
2606 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2610 rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
2611 GFP_NOFS | __GFP_NOFAIL);
2612 for (x = 0; x < rlist->rl_rgrps; x++)
2613 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2619 * gfs2_rlist_free - free a resource group list
2620 * @rlist: the list of resource groups
2624 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2628 kfree(rlist->rl_rgd);
2630 if (rlist->rl_ghs) {
2631 for (x = 0; x < rlist->rl_rgrps; x++)
2632 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2633 kfree(rlist->rl_ghs);
2634 rlist->rl_ghs = NULL;