GNU Linux-libre 4.9.337-gnu1
[releases.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "free-space-tree.h"
37 #include "math.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40
41 #undef SCRAMBLE_DELAYED_REFS
42
43 /*
44  * control flags for do_chunk_alloc's force field
45  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
46  * if we really need one.
47  *
48  * CHUNK_ALLOC_LIMITED means to only try and allocate one
49  * if we have very few chunks already allocated.  This is
50  * used as part of the clustering code to help make sure
51  * we have a good pool of storage to cluster in, without
52  * filling the FS with empty chunks
53  *
54  * CHUNK_ALLOC_FORCE means it must try to allocate one
55  *
56  */
57 enum {
58         CHUNK_ALLOC_NO_FORCE = 0,
59         CHUNK_ALLOC_LIMITED = 1,
60         CHUNK_ALLOC_FORCE = 2,
61 };
62
63 static int update_block_group(struct btrfs_trans_handle *trans,
64                               struct btrfs_root *root, u64 bytenr,
65                               u64 num_bytes, int alloc);
66 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
67                                 struct btrfs_root *root,
68                                 struct btrfs_delayed_ref_node *node, u64 parent,
69                                 u64 root_objectid, u64 owner_objectid,
70                                 u64 owner_offset, int refs_to_drop,
71                                 struct btrfs_delayed_extent_op *extra_op);
72 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
73                                     struct extent_buffer *leaf,
74                                     struct btrfs_extent_item *ei);
75 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
76                                       struct btrfs_root *root,
77                                       u64 parent, u64 root_objectid,
78                                       u64 flags, u64 owner, u64 offset,
79                                       struct btrfs_key *ins, int ref_mod);
80 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
81                                      struct btrfs_root *root,
82                                      u64 parent, u64 root_objectid,
83                                      u64 flags, struct btrfs_disk_key *key,
84                                      int level, struct btrfs_key *ins);
85 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
86                           struct btrfs_root *extent_root, u64 flags,
87                           int force);
88 static int find_next_key(struct btrfs_path *path, int level,
89                          struct btrfs_key *key);
90 static void dump_space_info(struct btrfs_fs_info *fs_info,
91                             struct btrfs_space_info *info, u64 bytes,
92                             int dump_block_groups);
93 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
94                                     u64 ram_bytes, u64 num_bytes, int delalloc);
95 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
96                                      u64 num_bytes, int delalloc);
97 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
98                                u64 num_bytes);
99 int btrfs_pin_extent(struct btrfs_root *root,
100                      u64 bytenr, u64 num_bytes, int reserved);
101 static int __reserve_metadata_bytes(struct btrfs_root *root,
102                                     struct btrfs_space_info *space_info,
103                                     u64 orig_bytes,
104                                     enum btrfs_reserve_flush_enum flush);
105 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
106                                      struct btrfs_space_info *space_info,
107                                      u64 num_bytes);
108 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
109                                      struct btrfs_space_info *space_info,
110                                      u64 num_bytes);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED ||
117                 cache->cached == BTRFS_CACHE_ERROR;
118 }
119
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 {
122         return (cache->flags & bits) == bits;
123 }
124
125 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 {
127         atomic_inc(&cache->count);
128 }
129
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 {
132         if (atomic_dec_and_test(&cache->count)) {
133                 WARN_ON(cache->pinned > 0);
134                 WARN_ON(cache->reserved > 0);
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_root *root,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&root->fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE);
232         set_extent_bits(&root->fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_root *root,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&root->fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE);
247         clear_extent_bits(&root->fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE);
249 }
250
251 static int exclude_super_stripes(struct btrfs_root *root,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(root, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(root->fs_info, cache->key.objectid,
271                                        bytenr, 0, &logical, &nr, &stripe_len);
272                 if (ret)
273                         return ret;
274
275                 while (nr--) {
276                         u64 start, len;
277
278                         if (logical[nr] > cache->key.objectid +
279                             cache->key.offset)
280                                 continue;
281
282                         if (logical[nr] + stripe_len <= cache->key.objectid)
283                                 continue;
284
285                         start = logical[nr];
286                         if (start < cache->key.objectid) {
287                                 start = cache->key.objectid;
288                                 len = (logical[nr] + stripe_len) - start;
289                         } else {
290                                 len = min_t(u64, stripe_len,
291                                             cache->key.objectid +
292                                             cache->key.offset - start);
293                         }
294
295                         cache->bytes_super += len;
296                         ret = add_excluded_extent(root, start, len);
297                         if (ret) {
298                                 kfree(logical);
299                                 return ret;
300                         }
301                 }
302
303                 kfree(logical);
304         }
305         return 0;
306 }
307
308 static struct btrfs_caching_control *
309 get_caching_control(struct btrfs_block_group_cache *cache)
310 {
311         struct btrfs_caching_control *ctl;
312
313         spin_lock(&cache->lock);
314         if (!cache->caching_ctl) {
315                 spin_unlock(&cache->lock);
316                 return NULL;
317         }
318
319         ctl = cache->caching_ctl;
320         atomic_inc(&ctl->count);
321         spin_unlock(&cache->lock);
322         return ctl;
323 }
324
325 static void put_caching_control(struct btrfs_caching_control *ctl)
326 {
327         if (atomic_dec_and_test(&ctl->count))
328                 kfree(ctl);
329 }
330
331 #ifdef CONFIG_BTRFS_DEBUG
332 static void fragment_free_space(struct btrfs_root *root,
333                                 struct btrfs_block_group_cache *block_group)
334 {
335         u64 start = block_group->key.objectid;
336         u64 len = block_group->key.offset;
337         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
338                 root->nodesize : root->sectorsize;
339         u64 step = chunk << 1;
340
341         while (len > chunk) {
342                 btrfs_remove_free_space(block_group, start, chunk);
343                 start += step;
344                 if (len < step)
345                         len = 0;
346                 else
347                         len -= step;
348         }
349 }
350 #endif
351
352 /*
353  * this is only called by cache_block_group, since we could have freed extents
354  * we need to check the pinned_extents for any extents that can't be used yet
355  * since their free space will be released as soon as the transaction commits.
356  */
357 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
358                        struct btrfs_fs_info *info, u64 start, u64 end)
359 {
360         u64 extent_start, extent_end, size, total_added = 0;
361         int ret;
362
363         while (start < end) {
364                 ret = find_first_extent_bit(info->pinned_extents, start,
365                                             &extent_start, &extent_end,
366                                             EXTENT_DIRTY | EXTENT_UPTODATE,
367                                             NULL);
368                 if (ret)
369                         break;
370
371                 if (extent_start <= start) {
372                         start = extent_end + 1;
373                 } else if (extent_start > start && extent_start < end) {
374                         size = extent_start - start;
375                         total_added += size;
376                         ret = btrfs_add_free_space(block_group, start,
377                                                    size);
378                         BUG_ON(ret); /* -ENOMEM or logic error */
379                         start = extent_end + 1;
380                 } else {
381                         break;
382                 }
383         }
384
385         if (start < end) {
386                 size = end - start;
387                 total_added += size;
388                 ret = btrfs_add_free_space(block_group, start, size);
389                 BUG_ON(ret); /* -ENOMEM or logic error */
390         }
391
392         return total_added;
393 }
394
395 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
396 {
397         struct btrfs_block_group_cache *block_group;
398         struct btrfs_fs_info *fs_info;
399         struct btrfs_root *extent_root;
400         struct btrfs_path *path;
401         struct extent_buffer *leaf;
402         struct btrfs_key key;
403         u64 total_found = 0;
404         u64 last = 0;
405         u32 nritems;
406         int ret;
407         bool wakeup = true;
408
409         block_group = caching_ctl->block_group;
410         fs_info = block_group->fs_info;
411         extent_root = fs_info->extent_root;
412
413         path = btrfs_alloc_path();
414         if (!path)
415                 return -ENOMEM;
416
417         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
418
419 #ifdef CONFIG_BTRFS_DEBUG
420         /*
421          * If we're fragmenting we don't want to make anybody think we can
422          * allocate from this block group until we've had a chance to fragment
423          * the free space.
424          */
425         if (btrfs_should_fragment_free_space(extent_root, block_group))
426                 wakeup = false;
427 #endif
428         /*
429          * We don't want to deadlock with somebody trying to allocate a new
430          * extent for the extent root while also trying to search the extent
431          * root to add free space.  So we skip locking and search the commit
432          * root, since its read-only
433          */
434         path->skip_locking = 1;
435         path->search_commit_root = 1;
436         path->reada = READA_FORWARD;
437
438         key.objectid = last;
439         key.offset = 0;
440         key.type = BTRFS_EXTENT_ITEM_KEY;
441
442 next:
443         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
444         if (ret < 0)
445                 goto out;
446
447         leaf = path->nodes[0];
448         nritems = btrfs_header_nritems(leaf);
449
450         while (1) {
451                 if (btrfs_fs_closing(fs_info) > 1) {
452                         last = (u64)-1;
453                         break;
454                 }
455
456                 if (path->slots[0] < nritems) {
457                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
458                 } else {
459                         ret = find_next_key(path, 0, &key);
460                         if (ret)
461                                 break;
462
463                         if (need_resched() ||
464                             rwsem_is_contended(&fs_info->commit_root_sem)) {
465                                 if (wakeup)
466                                         caching_ctl->progress = last;
467                                 btrfs_release_path(path);
468                                 up_read(&fs_info->commit_root_sem);
469                                 mutex_unlock(&caching_ctl->mutex);
470                                 cond_resched();
471                                 mutex_lock(&caching_ctl->mutex);
472                                 down_read(&fs_info->commit_root_sem);
473                                 goto next;
474                         }
475
476                         ret = btrfs_next_leaf(extent_root, path);
477                         if (ret < 0)
478                                 goto out;
479                         if (ret)
480                                 break;
481                         leaf = path->nodes[0];
482                         nritems = btrfs_header_nritems(leaf);
483                         continue;
484                 }
485
486                 if (key.objectid < last) {
487                         key.objectid = last;
488                         key.offset = 0;
489                         key.type = BTRFS_EXTENT_ITEM_KEY;
490
491                         if (wakeup)
492                                 caching_ctl->progress = last;
493                         btrfs_release_path(path);
494                         goto next;
495                 }
496
497                 if (key.objectid < block_group->key.objectid) {
498                         path->slots[0]++;
499                         continue;
500                 }
501
502                 if (key.objectid >= block_group->key.objectid +
503                     block_group->key.offset)
504                         break;
505
506                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
507                     key.type == BTRFS_METADATA_ITEM_KEY) {
508                         total_found += add_new_free_space(block_group,
509                                                           fs_info, last,
510                                                           key.objectid);
511                         if (key.type == BTRFS_METADATA_ITEM_KEY)
512                                 last = key.objectid +
513                                         fs_info->tree_root->nodesize;
514                         else
515                                 last = key.objectid + key.offset;
516
517                         if (total_found > CACHING_CTL_WAKE_UP) {
518                                 total_found = 0;
519                                 if (wakeup)
520                                         wake_up(&caching_ctl->wait);
521                         }
522                 }
523                 path->slots[0]++;
524         }
525         ret = 0;
526
527         total_found += add_new_free_space(block_group, fs_info, last,
528                                           block_group->key.objectid +
529                                           block_group->key.offset);
530         caching_ctl->progress = (u64)-1;
531
532 out:
533         btrfs_free_path(path);
534         return ret;
535 }
536
537 static noinline void caching_thread(struct btrfs_work *work)
538 {
539         struct btrfs_block_group_cache *block_group;
540         struct btrfs_fs_info *fs_info;
541         struct btrfs_caching_control *caching_ctl;
542         struct btrfs_root *extent_root;
543         int ret;
544
545         caching_ctl = container_of(work, struct btrfs_caching_control, work);
546         block_group = caching_ctl->block_group;
547         fs_info = block_group->fs_info;
548         extent_root = fs_info->extent_root;
549
550         mutex_lock(&caching_ctl->mutex);
551         down_read(&fs_info->commit_root_sem);
552
553         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
554                 ret = load_free_space_tree(caching_ctl);
555         else
556                 ret = load_extent_tree_free(caching_ctl);
557
558         spin_lock(&block_group->lock);
559         block_group->caching_ctl = NULL;
560         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
561         spin_unlock(&block_group->lock);
562
563 #ifdef CONFIG_BTRFS_DEBUG
564         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
565                 u64 bytes_used;
566
567                 spin_lock(&block_group->space_info->lock);
568                 spin_lock(&block_group->lock);
569                 bytes_used = block_group->key.offset -
570                         btrfs_block_group_used(&block_group->item);
571                 block_group->space_info->bytes_used += bytes_used >> 1;
572                 spin_unlock(&block_group->lock);
573                 spin_unlock(&block_group->space_info->lock);
574                 fragment_free_space(extent_root, block_group);
575         }
576 #endif
577
578         caching_ctl->progress = (u64)-1;
579
580         up_read(&fs_info->commit_root_sem);
581         free_excluded_extents(fs_info->extent_root, block_group);
582         mutex_unlock(&caching_ctl->mutex);
583
584         wake_up(&caching_ctl->wait);
585
586         put_caching_control(caching_ctl);
587         btrfs_put_block_group(block_group);
588 }
589
590 static int cache_block_group(struct btrfs_block_group_cache *cache,
591                              int load_cache_only)
592 {
593         DEFINE_WAIT(wait);
594         struct btrfs_fs_info *fs_info = cache->fs_info;
595         struct btrfs_caching_control *caching_ctl;
596         int ret = 0;
597
598         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
599         if (!caching_ctl)
600                 return -ENOMEM;
601
602         INIT_LIST_HEAD(&caching_ctl->list);
603         mutex_init(&caching_ctl->mutex);
604         init_waitqueue_head(&caching_ctl->wait);
605         caching_ctl->block_group = cache;
606         caching_ctl->progress = cache->key.objectid;
607         atomic_set(&caching_ctl->count, 1);
608         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
609                         caching_thread, NULL, NULL);
610
611         spin_lock(&cache->lock);
612         /*
613          * This should be a rare occasion, but this could happen I think in the
614          * case where one thread starts to load the space cache info, and then
615          * some other thread starts a transaction commit which tries to do an
616          * allocation while the other thread is still loading the space cache
617          * info.  The previous loop should have kept us from choosing this block
618          * group, but if we've moved to the state where we will wait on caching
619          * block groups we need to first check if we're doing a fast load here,
620          * so we can wait for it to finish, otherwise we could end up allocating
621          * from a block group who's cache gets evicted for one reason or
622          * another.
623          */
624         while (cache->cached == BTRFS_CACHE_FAST) {
625                 struct btrfs_caching_control *ctl;
626
627                 ctl = cache->caching_ctl;
628                 atomic_inc(&ctl->count);
629                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
630                 spin_unlock(&cache->lock);
631
632                 schedule();
633
634                 finish_wait(&ctl->wait, &wait);
635                 put_caching_control(ctl);
636                 spin_lock(&cache->lock);
637         }
638
639         if (cache->cached != BTRFS_CACHE_NO) {
640                 spin_unlock(&cache->lock);
641                 kfree(caching_ctl);
642                 return 0;
643         }
644         WARN_ON(cache->caching_ctl);
645         cache->caching_ctl = caching_ctl;
646         cache->cached = BTRFS_CACHE_FAST;
647         spin_unlock(&cache->lock);
648
649         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
650                 mutex_lock(&caching_ctl->mutex);
651                 ret = load_free_space_cache(fs_info, cache);
652
653                 spin_lock(&cache->lock);
654                 if (ret == 1) {
655                         cache->caching_ctl = NULL;
656                         cache->cached = BTRFS_CACHE_FINISHED;
657                         cache->last_byte_to_unpin = (u64)-1;
658                         caching_ctl->progress = (u64)-1;
659                 } else {
660                         if (load_cache_only) {
661                                 cache->caching_ctl = NULL;
662                                 cache->cached = BTRFS_CACHE_NO;
663                         } else {
664                                 cache->cached = BTRFS_CACHE_STARTED;
665                                 cache->has_caching_ctl = 1;
666                         }
667                 }
668                 spin_unlock(&cache->lock);
669 #ifdef CONFIG_BTRFS_DEBUG
670                 if (ret == 1 &&
671                     btrfs_should_fragment_free_space(fs_info->extent_root,
672                                                      cache)) {
673                         u64 bytes_used;
674
675                         spin_lock(&cache->space_info->lock);
676                         spin_lock(&cache->lock);
677                         bytes_used = cache->key.offset -
678                                 btrfs_block_group_used(&cache->item);
679                         cache->space_info->bytes_used += bytes_used >> 1;
680                         spin_unlock(&cache->lock);
681                         spin_unlock(&cache->space_info->lock);
682                         fragment_free_space(fs_info->extent_root, cache);
683                 }
684 #endif
685                 mutex_unlock(&caching_ctl->mutex);
686
687                 wake_up(&caching_ctl->wait);
688                 if (ret == 1) {
689                         put_caching_control(caching_ctl);
690                         free_excluded_extents(fs_info->extent_root, cache);
691                         return 0;
692                 }
693         } else {
694                 /*
695                  * We're either using the free space tree or no caching at all.
696                  * Set cached to the appropriate value and wakeup any waiters.
697                  */
698                 spin_lock(&cache->lock);
699                 if (load_cache_only) {
700                         cache->caching_ctl = NULL;
701                         cache->cached = BTRFS_CACHE_NO;
702                 } else {
703                         cache->cached = BTRFS_CACHE_STARTED;
704                         cache->has_caching_ctl = 1;
705                 }
706                 spin_unlock(&cache->lock);
707                 wake_up(&caching_ctl->wait);
708         }
709
710         if (load_cache_only) {
711                 put_caching_control(caching_ctl);
712                 return 0;
713         }
714
715         down_write(&fs_info->commit_root_sem);
716         atomic_inc(&caching_ctl->count);
717         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
718         up_write(&fs_info->commit_root_sem);
719
720         btrfs_get_block_group(cache);
721
722         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
723
724         return ret;
725 }
726
727 /*
728  * return the block group that starts at or after bytenr
729  */
730 static struct btrfs_block_group_cache *
731 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
732 {
733         return block_group_cache_tree_search(info, bytenr, 0);
734 }
735
736 /*
737  * return the block group that contains the given bytenr
738  */
739 struct btrfs_block_group_cache *btrfs_lookup_block_group(
740                                                  struct btrfs_fs_info *info,
741                                                  u64 bytenr)
742 {
743         return block_group_cache_tree_search(info, bytenr, 1);
744 }
745
746 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
747                                                   u64 flags)
748 {
749         struct list_head *head = &info->space_info;
750         struct btrfs_space_info *found;
751
752         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
753
754         rcu_read_lock();
755         list_for_each_entry_rcu(found, head, list) {
756                 if (found->flags & flags) {
757                         rcu_read_unlock();
758                         return found;
759                 }
760         }
761         rcu_read_unlock();
762         return NULL;
763 }
764
765 /*
766  * after adding space to the filesystem, we need to clear the full flags
767  * on all the space infos.
768  */
769 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
770 {
771         struct list_head *head = &info->space_info;
772         struct btrfs_space_info *found;
773
774         rcu_read_lock();
775         list_for_each_entry_rcu(found, head, list)
776                 found->full = 0;
777         rcu_read_unlock();
778 }
779
780 /* simple helper to search for an existing data extent at a given offset */
781 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
782 {
783         int ret;
784         struct btrfs_key key;
785         struct btrfs_path *path;
786
787         path = btrfs_alloc_path();
788         if (!path)
789                 return -ENOMEM;
790
791         key.objectid = start;
792         key.offset = len;
793         key.type = BTRFS_EXTENT_ITEM_KEY;
794         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
795                                 0, 0);
796         btrfs_free_path(path);
797         return ret;
798 }
799
800 /*
801  * helper function to lookup reference count and flags of a tree block.
802  *
803  * the head node for delayed ref is used to store the sum of all the
804  * reference count modifications queued up in the rbtree. the head
805  * node may also store the extent flags to set. This way you can check
806  * to see what the reference count and extent flags would be if all of
807  * the delayed refs are not processed.
808  */
809 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
810                              struct btrfs_root *root, u64 bytenr,
811                              u64 offset, int metadata, u64 *refs, u64 *flags)
812 {
813         struct btrfs_delayed_ref_head *head;
814         struct btrfs_delayed_ref_root *delayed_refs;
815         struct btrfs_path *path;
816         struct btrfs_extent_item *ei;
817         struct extent_buffer *leaf;
818         struct btrfs_key key;
819         u32 item_size;
820         u64 num_refs;
821         u64 extent_flags;
822         int ret;
823
824         /*
825          * If we don't have skinny metadata, don't bother doing anything
826          * different
827          */
828         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
829                 offset = root->nodesize;
830                 metadata = 0;
831         }
832
833         path = btrfs_alloc_path();
834         if (!path)
835                 return -ENOMEM;
836
837         if (!trans) {
838                 path->skip_locking = 1;
839                 path->search_commit_root = 1;
840         }
841
842 search_again:
843         key.objectid = bytenr;
844         key.offset = offset;
845         if (metadata)
846                 key.type = BTRFS_METADATA_ITEM_KEY;
847         else
848                 key.type = BTRFS_EXTENT_ITEM_KEY;
849
850         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
851                                 &key, path, 0, 0);
852         if (ret < 0)
853                 goto out_free;
854
855         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
856                 if (path->slots[0]) {
857                         path->slots[0]--;
858                         btrfs_item_key_to_cpu(path->nodes[0], &key,
859                                               path->slots[0]);
860                         if (key.objectid == bytenr &&
861                             key.type == BTRFS_EXTENT_ITEM_KEY &&
862                             key.offset == root->nodesize)
863                                 ret = 0;
864                 }
865         }
866
867         if (ret == 0) {
868                 leaf = path->nodes[0];
869                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
870                 if (item_size >= sizeof(*ei)) {
871                         ei = btrfs_item_ptr(leaf, path->slots[0],
872                                             struct btrfs_extent_item);
873                         num_refs = btrfs_extent_refs(leaf, ei);
874                         extent_flags = btrfs_extent_flags(leaf, ei);
875                 } else {
876 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
877                         struct btrfs_extent_item_v0 *ei0;
878                         BUG_ON(item_size != sizeof(*ei0));
879                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
880                                              struct btrfs_extent_item_v0);
881                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
882                         /* FIXME: this isn't correct for data */
883                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
884 #else
885                         BUG();
886 #endif
887                 }
888                 BUG_ON(num_refs == 0);
889         } else {
890                 num_refs = 0;
891                 extent_flags = 0;
892                 ret = 0;
893         }
894
895         if (!trans)
896                 goto out;
897
898         delayed_refs = &trans->transaction->delayed_refs;
899         spin_lock(&delayed_refs->lock);
900         head = btrfs_find_delayed_ref_head(trans, bytenr);
901         if (head) {
902                 if (!mutex_trylock(&head->mutex)) {
903                         atomic_inc(&head->node.refs);
904                         spin_unlock(&delayed_refs->lock);
905
906                         btrfs_release_path(path);
907
908                         /*
909                          * Mutex was contended, block until it's released and try
910                          * again
911                          */
912                         mutex_lock(&head->mutex);
913                         mutex_unlock(&head->mutex);
914                         btrfs_put_delayed_ref(&head->node);
915                         goto search_again;
916                 }
917                 spin_lock(&head->lock);
918                 if (head->extent_op && head->extent_op->update_flags)
919                         extent_flags |= head->extent_op->flags_to_set;
920                 else
921                         BUG_ON(num_refs == 0);
922
923                 num_refs += head->node.ref_mod;
924                 spin_unlock(&head->lock);
925                 mutex_unlock(&head->mutex);
926         }
927         spin_unlock(&delayed_refs->lock);
928 out:
929         WARN_ON(num_refs == 0);
930         if (refs)
931                 *refs = num_refs;
932         if (flags)
933                 *flags = extent_flags;
934 out_free:
935         btrfs_free_path(path);
936         return ret;
937 }
938
939 /*
940  * Back reference rules.  Back refs have three main goals:
941  *
942  * 1) differentiate between all holders of references to an extent so that
943  *    when a reference is dropped we can make sure it was a valid reference
944  *    before freeing the extent.
945  *
946  * 2) Provide enough information to quickly find the holders of an extent
947  *    if we notice a given block is corrupted or bad.
948  *
949  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
950  *    maintenance.  This is actually the same as #2, but with a slightly
951  *    different use case.
952  *
953  * There are two kinds of back refs. The implicit back refs is optimized
954  * for pointers in non-shared tree blocks. For a given pointer in a block,
955  * back refs of this kind provide information about the block's owner tree
956  * and the pointer's key. These information allow us to find the block by
957  * b-tree searching. The full back refs is for pointers in tree blocks not
958  * referenced by their owner trees. The location of tree block is recorded
959  * in the back refs. Actually the full back refs is generic, and can be
960  * used in all cases the implicit back refs is used. The major shortcoming
961  * of the full back refs is its overhead. Every time a tree block gets
962  * COWed, we have to update back refs entry for all pointers in it.
963  *
964  * For a newly allocated tree block, we use implicit back refs for
965  * pointers in it. This means most tree related operations only involve
966  * implicit back refs. For a tree block created in old transaction, the
967  * only way to drop a reference to it is COW it. So we can detect the
968  * event that tree block loses its owner tree's reference and do the
969  * back refs conversion.
970  *
971  * When a tree block is COWed through a tree, there are four cases:
972  *
973  * The reference count of the block is one and the tree is the block's
974  * owner tree. Nothing to do in this case.
975  *
976  * The reference count of the block is one and the tree is not the
977  * block's owner tree. In this case, full back refs is used for pointers
978  * in the block. Remove these full back refs, add implicit back refs for
979  * every pointers in the new block.
980  *
981  * The reference count of the block is greater than one and the tree is
982  * the block's owner tree. In this case, implicit back refs is used for
983  * pointers in the block. Add full back refs for every pointers in the
984  * block, increase lower level extents' reference counts. The original
985  * implicit back refs are entailed to the new block.
986  *
987  * The reference count of the block is greater than one and the tree is
988  * not the block's owner tree. Add implicit back refs for every pointer in
989  * the new block, increase lower level extents' reference count.
990  *
991  * Back Reference Key composing:
992  *
993  * The key objectid corresponds to the first byte in the extent,
994  * The key type is used to differentiate between types of back refs.
995  * There are different meanings of the key offset for different types
996  * of back refs.
997  *
998  * File extents can be referenced by:
999  *
1000  * - multiple snapshots, subvolumes, or different generations in one subvol
1001  * - different files inside a single subvolume
1002  * - different offsets inside a file (bookend extents in file.c)
1003  *
1004  * The extent ref structure for the implicit back refs has fields for:
1005  *
1006  * - Objectid of the subvolume root
1007  * - objectid of the file holding the reference
1008  * - original offset in the file
1009  * - how many bookend extents
1010  *
1011  * The key offset for the implicit back refs is hash of the first
1012  * three fields.
1013  *
1014  * The extent ref structure for the full back refs has field for:
1015  *
1016  * - number of pointers in the tree leaf
1017  *
1018  * The key offset for the implicit back refs is the first byte of
1019  * the tree leaf
1020  *
1021  * When a file extent is allocated, The implicit back refs is used.
1022  * the fields are filled in:
1023  *
1024  *     (root_key.objectid, inode objectid, offset in file, 1)
1025  *
1026  * When a file extent is removed file truncation, we find the
1027  * corresponding implicit back refs and check the following fields:
1028  *
1029  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1030  *
1031  * Btree extents can be referenced by:
1032  *
1033  * - Different subvolumes
1034  *
1035  * Both the implicit back refs and the full back refs for tree blocks
1036  * only consist of key. The key offset for the implicit back refs is
1037  * objectid of block's owner tree. The key offset for the full back refs
1038  * is the first byte of parent block.
1039  *
1040  * When implicit back refs is used, information about the lowest key and
1041  * level of the tree block are required. These information are stored in
1042  * tree block info structure.
1043  */
1044
1045 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1046 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1047                                   struct btrfs_root *root,
1048                                   struct btrfs_path *path,
1049                                   u64 owner, u32 extra_size)
1050 {
1051         struct btrfs_extent_item *item;
1052         struct btrfs_extent_item_v0 *ei0;
1053         struct btrfs_extent_ref_v0 *ref0;
1054         struct btrfs_tree_block_info *bi;
1055         struct extent_buffer *leaf;
1056         struct btrfs_key key;
1057         struct btrfs_key found_key;
1058         u32 new_size = sizeof(*item);
1059         u64 refs;
1060         int ret;
1061
1062         leaf = path->nodes[0];
1063         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1064
1065         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1066         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1067                              struct btrfs_extent_item_v0);
1068         refs = btrfs_extent_refs_v0(leaf, ei0);
1069
1070         if (owner == (u64)-1) {
1071                 while (1) {
1072                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1073                                 ret = btrfs_next_leaf(root, path);
1074                                 if (ret < 0)
1075                                         return ret;
1076                                 BUG_ON(ret > 0); /* Corruption */
1077                                 leaf = path->nodes[0];
1078                         }
1079                         btrfs_item_key_to_cpu(leaf, &found_key,
1080                                               path->slots[0]);
1081                         BUG_ON(key.objectid != found_key.objectid);
1082                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1083                                 path->slots[0]++;
1084                                 continue;
1085                         }
1086                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1087                                               struct btrfs_extent_ref_v0);
1088                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1089                         break;
1090                 }
1091         }
1092         btrfs_release_path(path);
1093
1094         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1095                 new_size += sizeof(*bi);
1096
1097         new_size -= sizeof(*ei0);
1098         ret = btrfs_search_slot(trans, root, &key, path,
1099                                 new_size + extra_size, 1);
1100         if (ret < 0)
1101                 return ret;
1102         BUG_ON(ret); /* Corruption */
1103
1104         btrfs_extend_item(root, path, new_size);
1105
1106         leaf = path->nodes[0];
1107         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1108         btrfs_set_extent_refs(leaf, item, refs);
1109         /* FIXME: get real generation */
1110         btrfs_set_extent_generation(leaf, item, 0);
1111         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1112                 btrfs_set_extent_flags(leaf, item,
1113                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1114                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1115                 bi = (struct btrfs_tree_block_info *)(item + 1);
1116                 /* FIXME: get first key of the block */
1117                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1118                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1119         } else {
1120                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1121         }
1122         btrfs_mark_buffer_dirty(leaf);
1123         return 0;
1124 }
1125 #endif
1126
1127 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1128 {
1129         u32 high_crc = ~(u32)0;
1130         u32 low_crc = ~(u32)0;
1131         __le64 lenum;
1132
1133         lenum = cpu_to_le64(root_objectid);
1134         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1135         lenum = cpu_to_le64(owner);
1136         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1137         lenum = cpu_to_le64(offset);
1138         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1139
1140         return ((u64)high_crc << 31) ^ (u64)low_crc;
1141 }
1142
1143 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1144                                      struct btrfs_extent_data_ref *ref)
1145 {
1146         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1147                                     btrfs_extent_data_ref_objectid(leaf, ref),
1148                                     btrfs_extent_data_ref_offset(leaf, ref));
1149 }
1150
1151 static int match_extent_data_ref(struct extent_buffer *leaf,
1152                                  struct btrfs_extent_data_ref *ref,
1153                                  u64 root_objectid, u64 owner, u64 offset)
1154 {
1155         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1156             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1157             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1158                 return 0;
1159         return 1;
1160 }
1161
1162 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1163                                            struct btrfs_root *root,
1164                                            struct btrfs_path *path,
1165                                            u64 bytenr, u64 parent,
1166                                            u64 root_objectid,
1167                                            u64 owner, u64 offset)
1168 {
1169         struct btrfs_key key;
1170         struct btrfs_extent_data_ref *ref;
1171         struct extent_buffer *leaf;
1172         u32 nritems;
1173         int ret;
1174         int recow;
1175         int err = -ENOENT;
1176
1177         key.objectid = bytenr;
1178         if (parent) {
1179                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1180                 key.offset = parent;
1181         } else {
1182                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1183                 key.offset = hash_extent_data_ref(root_objectid,
1184                                                   owner, offset);
1185         }
1186 again:
1187         recow = 0;
1188         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1189         if (ret < 0) {
1190                 err = ret;
1191                 goto fail;
1192         }
1193
1194         if (parent) {
1195                 if (!ret)
1196                         return 0;
1197 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1198                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1199                 btrfs_release_path(path);
1200                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1201                 if (ret < 0) {
1202                         err = ret;
1203                         goto fail;
1204                 }
1205                 if (!ret)
1206                         return 0;
1207 #endif
1208                 goto fail;
1209         }
1210
1211         leaf = path->nodes[0];
1212         nritems = btrfs_header_nritems(leaf);
1213         while (1) {
1214                 if (path->slots[0] >= nritems) {
1215                         ret = btrfs_next_leaf(root, path);
1216                         if (ret < 0)
1217                                 err = ret;
1218                         if (ret)
1219                                 goto fail;
1220
1221                         leaf = path->nodes[0];
1222                         nritems = btrfs_header_nritems(leaf);
1223                         recow = 1;
1224                 }
1225
1226                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1227                 if (key.objectid != bytenr ||
1228                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1229                         goto fail;
1230
1231                 ref = btrfs_item_ptr(leaf, path->slots[0],
1232                                      struct btrfs_extent_data_ref);
1233
1234                 if (match_extent_data_ref(leaf, ref, root_objectid,
1235                                           owner, offset)) {
1236                         if (recow) {
1237                                 btrfs_release_path(path);
1238                                 goto again;
1239                         }
1240                         err = 0;
1241                         break;
1242                 }
1243                 path->slots[0]++;
1244         }
1245 fail:
1246         return err;
1247 }
1248
1249 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1250                                            struct btrfs_root *root,
1251                                            struct btrfs_path *path,
1252                                            u64 bytenr, u64 parent,
1253                                            u64 root_objectid, u64 owner,
1254                                            u64 offset, int refs_to_add)
1255 {
1256         struct btrfs_key key;
1257         struct extent_buffer *leaf;
1258         u32 size;
1259         u32 num_refs;
1260         int ret;
1261
1262         key.objectid = bytenr;
1263         if (parent) {
1264                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1265                 key.offset = parent;
1266                 size = sizeof(struct btrfs_shared_data_ref);
1267         } else {
1268                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1269                 key.offset = hash_extent_data_ref(root_objectid,
1270                                                   owner, offset);
1271                 size = sizeof(struct btrfs_extent_data_ref);
1272         }
1273
1274         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1275         if (ret && ret != -EEXIST)
1276                 goto fail;
1277
1278         leaf = path->nodes[0];
1279         if (parent) {
1280                 struct btrfs_shared_data_ref *ref;
1281                 ref = btrfs_item_ptr(leaf, path->slots[0],
1282                                      struct btrfs_shared_data_ref);
1283                 if (ret == 0) {
1284                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1285                 } else {
1286                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1287                         num_refs += refs_to_add;
1288                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1289                 }
1290         } else {
1291                 struct btrfs_extent_data_ref *ref;
1292                 while (ret == -EEXIST) {
1293                         ref = btrfs_item_ptr(leaf, path->slots[0],
1294                                              struct btrfs_extent_data_ref);
1295                         if (match_extent_data_ref(leaf, ref, root_objectid,
1296                                                   owner, offset))
1297                                 break;
1298                         btrfs_release_path(path);
1299                         key.offset++;
1300                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1301                                                       size);
1302                         if (ret && ret != -EEXIST)
1303                                 goto fail;
1304
1305                         leaf = path->nodes[0];
1306                 }
1307                 ref = btrfs_item_ptr(leaf, path->slots[0],
1308                                      struct btrfs_extent_data_ref);
1309                 if (ret == 0) {
1310                         btrfs_set_extent_data_ref_root(leaf, ref,
1311                                                        root_objectid);
1312                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1313                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1314                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1315                 } else {
1316                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1317                         num_refs += refs_to_add;
1318                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1319                 }
1320         }
1321         btrfs_mark_buffer_dirty(leaf);
1322         ret = 0;
1323 fail:
1324         btrfs_release_path(path);
1325         return ret;
1326 }
1327
1328 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1329                                            struct btrfs_root *root,
1330                                            struct btrfs_path *path,
1331                                            int refs_to_drop, int *last_ref)
1332 {
1333         struct btrfs_key key;
1334         struct btrfs_extent_data_ref *ref1 = NULL;
1335         struct btrfs_shared_data_ref *ref2 = NULL;
1336         struct extent_buffer *leaf;
1337         u32 num_refs = 0;
1338         int ret = 0;
1339
1340         leaf = path->nodes[0];
1341         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1342
1343         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1344                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1345                                       struct btrfs_extent_data_ref);
1346                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1347         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1348                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1349                                       struct btrfs_shared_data_ref);
1350                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1351 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1352         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1353                 struct btrfs_extent_ref_v0 *ref0;
1354                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1355                                       struct btrfs_extent_ref_v0);
1356                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1357 #endif
1358         } else {
1359                 BUG();
1360         }
1361
1362         BUG_ON(num_refs < refs_to_drop);
1363         num_refs -= refs_to_drop;
1364
1365         if (num_refs == 0) {
1366                 ret = btrfs_del_item(trans, root, path);
1367                 *last_ref = 1;
1368         } else {
1369                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1370                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1371                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1372                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1373 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1374                 else {
1375                         struct btrfs_extent_ref_v0 *ref0;
1376                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1377                                         struct btrfs_extent_ref_v0);
1378                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1379                 }
1380 #endif
1381                 btrfs_mark_buffer_dirty(leaf);
1382         }
1383         return ret;
1384 }
1385
1386 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1387                                           struct btrfs_extent_inline_ref *iref)
1388 {
1389         struct btrfs_key key;
1390         struct extent_buffer *leaf;
1391         struct btrfs_extent_data_ref *ref1;
1392         struct btrfs_shared_data_ref *ref2;
1393         u32 num_refs = 0;
1394
1395         leaf = path->nodes[0];
1396         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1397         if (iref) {
1398                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1399                     BTRFS_EXTENT_DATA_REF_KEY) {
1400                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1401                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1402                 } else {
1403                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1404                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1405                 }
1406         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1407                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1408                                       struct btrfs_extent_data_ref);
1409                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1410         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1411                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1412                                       struct btrfs_shared_data_ref);
1413                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1414 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1415         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1416                 struct btrfs_extent_ref_v0 *ref0;
1417                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1418                                       struct btrfs_extent_ref_v0);
1419                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1420 #endif
1421         } else {
1422                 WARN_ON(1);
1423         }
1424         return num_refs;
1425 }
1426
1427 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1428                                           struct btrfs_root *root,
1429                                           struct btrfs_path *path,
1430                                           u64 bytenr, u64 parent,
1431                                           u64 root_objectid)
1432 {
1433         struct btrfs_key key;
1434         int ret;
1435
1436         key.objectid = bytenr;
1437         if (parent) {
1438                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1439                 key.offset = parent;
1440         } else {
1441                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1442                 key.offset = root_objectid;
1443         }
1444
1445         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1446         if (ret > 0)
1447                 ret = -ENOENT;
1448 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1449         if (ret == -ENOENT && parent) {
1450                 btrfs_release_path(path);
1451                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1452                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1453                 if (ret > 0)
1454                         ret = -ENOENT;
1455         }
1456 #endif
1457         return ret;
1458 }
1459
1460 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1461                                           struct btrfs_root *root,
1462                                           struct btrfs_path *path,
1463                                           u64 bytenr, u64 parent,
1464                                           u64 root_objectid)
1465 {
1466         struct btrfs_key key;
1467         int ret;
1468
1469         key.objectid = bytenr;
1470         if (parent) {
1471                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1472                 key.offset = parent;
1473         } else {
1474                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1475                 key.offset = root_objectid;
1476         }
1477
1478         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1479         btrfs_release_path(path);
1480         return ret;
1481 }
1482
1483 static inline int extent_ref_type(u64 parent, u64 owner)
1484 {
1485         int type;
1486         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1487                 if (parent > 0)
1488                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1489                 else
1490                         type = BTRFS_TREE_BLOCK_REF_KEY;
1491         } else {
1492                 if (parent > 0)
1493                         type = BTRFS_SHARED_DATA_REF_KEY;
1494                 else
1495                         type = BTRFS_EXTENT_DATA_REF_KEY;
1496         }
1497         return type;
1498 }
1499
1500 static int find_next_key(struct btrfs_path *path, int level,
1501                          struct btrfs_key *key)
1502
1503 {
1504         for (; level < BTRFS_MAX_LEVEL; level++) {
1505                 if (!path->nodes[level])
1506                         break;
1507                 if (path->slots[level] + 1 >=
1508                     btrfs_header_nritems(path->nodes[level]))
1509                         continue;
1510                 if (level == 0)
1511                         btrfs_item_key_to_cpu(path->nodes[level], key,
1512                                               path->slots[level] + 1);
1513                 else
1514                         btrfs_node_key_to_cpu(path->nodes[level], key,
1515                                               path->slots[level] + 1);
1516                 return 0;
1517         }
1518         return 1;
1519 }
1520
1521 /*
1522  * look for inline back ref. if back ref is found, *ref_ret is set
1523  * to the address of inline back ref, and 0 is returned.
1524  *
1525  * if back ref isn't found, *ref_ret is set to the address where it
1526  * should be inserted, and -ENOENT is returned.
1527  *
1528  * if insert is true and there are too many inline back refs, the path
1529  * points to the extent item, and -EAGAIN is returned.
1530  *
1531  * NOTE: inline back refs are ordered in the same way that back ref
1532  *       items in the tree are ordered.
1533  */
1534 static noinline_for_stack
1535 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1536                                  struct btrfs_root *root,
1537                                  struct btrfs_path *path,
1538                                  struct btrfs_extent_inline_ref **ref_ret,
1539                                  u64 bytenr, u64 num_bytes,
1540                                  u64 parent, u64 root_objectid,
1541                                  u64 owner, u64 offset, int insert)
1542 {
1543         struct btrfs_key key;
1544         struct extent_buffer *leaf;
1545         struct btrfs_extent_item *ei;
1546         struct btrfs_extent_inline_ref *iref;
1547         u64 flags;
1548         u64 item_size;
1549         unsigned long ptr;
1550         unsigned long end;
1551         int extra_size;
1552         int type;
1553         int want;
1554         int ret;
1555         int err = 0;
1556         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1557                                                  SKINNY_METADATA);
1558
1559         key.objectid = bytenr;
1560         key.type = BTRFS_EXTENT_ITEM_KEY;
1561         key.offset = num_bytes;
1562
1563         want = extent_ref_type(parent, owner);
1564         if (insert) {
1565                 extra_size = btrfs_extent_inline_ref_size(want);
1566                 path->keep_locks = 1;
1567         } else
1568                 extra_size = -1;
1569
1570         /*
1571          * Owner is our parent level, so we can just add one to get the level
1572          * for the block we are interested in.
1573          */
1574         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1575                 key.type = BTRFS_METADATA_ITEM_KEY;
1576                 key.offset = owner;
1577         }
1578
1579 again:
1580         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1581         if (ret < 0) {
1582                 err = ret;
1583                 goto out;
1584         }
1585
1586         /*
1587          * We may be a newly converted file system which still has the old fat
1588          * extent entries for metadata, so try and see if we have one of those.
1589          */
1590         if (ret > 0 && skinny_metadata) {
1591                 skinny_metadata = false;
1592                 if (path->slots[0]) {
1593                         path->slots[0]--;
1594                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1595                                               path->slots[0]);
1596                         if (key.objectid == bytenr &&
1597                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1598                             key.offset == num_bytes)
1599                                 ret = 0;
1600                 }
1601                 if (ret) {
1602                         key.objectid = bytenr;
1603                         key.type = BTRFS_EXTENT_ITEM_KEY;
1604                         key.offset = num_bytes;
1605                         btrfs_release_path(path);
1606                         goto again;
1607                 }
1608         }
1609
1610         if (ret && !insert) {
1611                 err = -ENOENT;
1612                 goto out;
1613         } else if (WARN_ON(ret)) {
1614                 err = -EIO;
1615                 goto out;
1616         }
1617
1618         leaf = path->nodes[0];
1619         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1620 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1621         if (item_size < sizeof(*ei)) {
1622                 if (!insert) {
1623                         err = -ENOENT;
1624                         goto out;
1625                 }
1626                 ret = convert_extent_item_v0(trans, root, path, owner,
1627                                              extra_size);
1628                 if (ret < 0) {
1629                         err = ret;
1630                         goto out;
1631                 }
1632                 leaf = path->nodes[0];
1633                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1634         }
1635 #endif
1636         BUG_ON(item_size < sizeof(*ei));
1637
1638         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1639         flags = btrfs_extent_flags(leaf, ei);
1640
1641         ptr = (unsigned long)(ei + 1);
1642         end = (unsigned long)ei + item_size;
1643
1644         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1645                 ptr += sizeof(struct btrfs_tree_block_info);
1646                 BUG_ON(ptr > end);
1647         }
1648
1649         err = -ENOENT;
1650         while (1) {
1651                 if (ptr >= end) {
1652                         WARN_ON(ptr > end);
1653                         break;
1654                 }
1655                 iref = (struct btrfs_extent_inline_ref *)ptr;
1656                 type = btrfs_extent_inline_ref_type(leaf, iref);
1657                 if (want < type)
1658                         break;
1659                 if (want > type) {
1660                         ptr += btrfs_extent_inline_ref_size(type);
1661                         continue;
1662                 }
1663
1664                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1665                         struct btrfs_extent_data_ref *dref;
1666                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1667                         if (match_extent_data_ref(leaf, dref, root_objectid,
1668                                                   owner, offset)) {
1669                                 err = 0;
1670                                 break;
1671                         }
1672                         if (hash_extent_data_ref_item(leaf, dref) <
1673                             hash_extent_data_ref(root_objectid, owner, offset))
1674                                 break;
1675                 } else {
1676                         u64 ref_offset;
1677                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1678                         if (parent > 0) {
1679                                 if (parent == ref_offset) {
1680                                         err = 0;
1681                                         break;
1682                                 }
1683                                 if (ref_offset < parent)
1684                                         break;
1685                         } else {
1686                                 if (root_objectid == ref_offset) {
1687                                         err = 0;
1688                                         break;
1689                                 }
1690                                 if (ref_offset < root_objectid)
1691                                         break;
1692                         }
1693                 }
1694                 ptr += btrfs_extent_inline_ref_size(type);
1695         }
1696         if (err == -ENOENT && insert) {
1697                 if (item_size + extra_size >=
1698                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1699                         err = -EAGAIN;
1700                         goto out;
1701                 }
1702                 /*
1703                  * To add new inline back ref, we have to make sure
1704                  * there is no corresponding back ref item.
1705                  * For simplicity, we just do not add new inline back
1706                  * ref if there is any kind of item for this block
1707                  */
1708                 if (find_next_key(path, 0, &key) == 0 &&
1709                     key.objectid == bytenr &&
1710                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1711                         err = -EAGAIN;
1712                         goto out;
1713                 }
1714         }
1715         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1716 out:
1717         if (insert) {
1718                 path->keep_locks = 0;
1719                 btrfs_unlock_up_safe(path, 1);
1720         }
1721         return err;
1722 }
1723
1724 /*
1725  * helper to add new inline back ref
1726  */
1727 static noinline_for_stack
1728 void setup_inline_extent_backref(struct btrfs_root *root,
1729                                  struct btrfs_path *path,
1730                                  struct btrfs_extent_inline_ref *iref,
1731                                  u64 parent, u64 root_objectid,
1732                                  u64 owner, u64 offset, int refs_to_add,
1733                                  struct btrfs_delayed_extent_op *extent_op)
1734 {
1735         struct extent_buffer *leaf;
1736         struct btrfs_extent_item *ei;
1737         unsigned long ptr;
1738         unsigned long end;
1739         unsigned long item_offset;
1740         u64 refs;
1741         int size;
1742         int type;
1743
1744         leaf = path->nodes[0];
1745         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1746         item_offset = (unsigned long)iref - (unsigned long)ei;
1747
1748         type = extent_ref_type(parent, owner);
1749         size = btrfs_extent_inline_ref_size(type);
1750
1751         btrfs_extend_item(root, path, size);
1752
1753         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1754         refs = btrfs_extent_refs(leaf, ei);
1755         refs += refs_to_add;
1756         btrfs_set_extent_refs(leaf, ei, refs);
1757         if (extent_op)
1758                 __run_delayed_extent_op(extent_op, leaf, ei);
1759
1760         ptr = (unsigned long)ei + item_offset;
1761         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1762         if (ptr < end - size)
1763                 memmove_extent_buffer(leaf, ptr + size, ptr,
1764                                       end - size - ptr);
1765
1766         iref = (struct btrfs_extent_inline_ref *)ptr;
1767         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1768         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1769                 struct btrfs_extent_data_ref *dref;
1770                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1771                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1772                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1773                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1774                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1775         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1776                 struct btrfs_shared_data_ref *sref;
1777                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1778                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1779                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1780         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1781                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1782         } else {
1783                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1784         }
1785         btrfs_mark_buffer_dirty(leaf);
1786 }
1787
1788 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1789                                  struct btrfs_root *root,
1790                                  struct btrfs_path *path,
1791                                  struct btrfs_extent_inline_ref **ref_ret,
1792                                  u64 bytenr, u64 num_bytes, u64 parent,
1793                                  u64 root_objectid, u64 owner, u64 offset)
1794 {
1795         int ret;
1796
1797         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1798                                            bytenr, num_bytes, parent,
1799                                            root_objectid, owner, offset, 0);
1800         if (ret != -ENOENT)
1801                 return ret;
1802
1803         btrfs_release_path(path);
1804         *ref_ret = NULL;
1805
1806         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1807                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1808                                             root_objectid);
1809         } else {
1810                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1811                                              root_objectid, owner, offset);
1812         }
1813         return ret;
1814 }
1815
1816 /*
1817  * helper to update/remove inline back ref
1818  */
1819 static noinline_for_stack
1820 void update_inline_extent_backref(struct btrfs_root *root,
1821                                   struct btrfs_path *path,
1822                                   struct btrfs_extent_inline_ref *iref,
1823                                   int refs_to_mod,
1824                                   struct btrfs_delayed_extent_op *extent_op,
1825                                   int *last_ref)
1826 {
1827         struct extent_buffer *leaf;
1828         struct btrfs_extent_item *ei;
1829         struct btrfs_extent_data_ref *dref = NULL;
1830         struct btrfs_shared_data_ref *sref = NULL;
1831         unsigned long ptr;
1832         unsigned long end;
1833         u32 item_size;
1834         int size;
1835         int type;
1836         u64 refs;
1837
1838         leaf = path->nodes[0];
1839         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1840         refs = btrfs_extent_refs(leaf, ei);
1841         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1842         refs += refs_to_mod;
1843         btrfs_set_extent_refs(leaf, ei, refs);
1844         if (extent_op)
1845                 __run_delayed_extent_op(extent_op, leaf, ei);
1846
1847         type = btrfs_extent_inline_ref_type(leaf, iref);
1848
1849         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1850                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1851                 refs = btrfs_extent_data_ref_count(leaf, dref);
1852         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1853                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1854                 refs = btrfs_shared_data_ref_count(leaf, sref);
1855         } else {
1856                 refs = 1;
1857                 BUG_ON(refs_to_mod != -1);
1858         }
1859
1860         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1861         refs += refs_to_mod;
1862
1863         if (refs > 0) {
1864                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1865                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1866                 else
1867                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1868         } else {
1869                 *last_ref = 1;
1870                 size =  btrfs_extent_inline_ref_size(type);
1871                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1872                 ptr = (unsigned long)iref;
1873                 end = (unsigned long)ei + item_size;
1874                 if (ptr + size < end)
1875                         memmove_extent_buffer(leaf, ptr, ptr + size,
1876                                               end - ptr - size);
1877                 item_size -= size;
1878                 btrfs_truncate_item(root, path, item_size, 1);
1879         }
1880         btrfs_mark_buffer_dirty(leaf);
1881 }
1882
1883 static noinline_for_stack
1884 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1885                                  struct btrfs_root *root,
1886                                  struct btrfs_path *path,
1887                                  u64 bytenr, u64 num_bytes, u64 parent,
1888                                  u64 root_objectid, u64 owner,
1889                                  u64 offset, int refs_to_add,
1890                                  struct btrfs_delayed_extent_op *extent_op)
1891 {
1892         struct btrfs_extent_inline_ref *iref;
1893         int ret;
1894
1895         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1896                                            bytenr, num_bytes, parent,
1897                                            root_objectid, owner, offset, 1);
1898         if (ret == 0) {
1899                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1900                 update_inline_extent_backref(root, path, iref,
1901                                              refs_to_add, extent_op, NULL);
1902         } else if (ret == -ENOENT) {
1903                 setup_inline_extent_backref(root, path, iref, parent,
1904                                             root_objectid, owner, offset,
1905                                             refs_to_add, extent_op);
1906                 ret = 0;
1907         }
1908         return ret;
1909 }
1910
1911 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1912                                  struct btrfs_root *root,
1913                                  struct btrfs_path *path,
1914                                  u64 bytenr, u64 parent, u64 root_objectid,
1915                                  u64 owner, u64 offset, int refs_to_add)
1916 {
1917         int ret;
1918         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1919                 BUG_ON(refs_to_add != 1);
1920                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1921                                             parent, root_objectid);
1922         } else {
1923                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1924                                              parent, root_objectid,
1925                                              owner, offset, refs_to_add);
1926         }
1927         return ret;
1928 }
1929
1930 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1931                                  struct btrfs_root *root,
1932                                  struct btrfs_path *path,
1933                                  struct btrfs_extent_inline_ref *iref,
1934                                  int refs_to_drop, int is_data, int *last_ref)
1935 {
1936         int ret = 0;
1937
1938         BUG_ON(!is_data && refs_to_drop != 1);
1939         if (iref) {
1940                 update_inline_extent_backref(root, path, iref,
1941                                              -refs_to_drop, NULL, last_ref);
1942         } else if (is_data) {
1943                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1944                                              last_ref);
1945         } else {
1946                 *last_ref = 1;
1947                 ret = btrfs_del_item(trans, root, path);
1948         }
1949         return ret;
1950 }
1951
1952 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1953 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1954                                u64 *discarded_bytes)
1955 {
1956         int j, ret = 0;
1957         u64 bytes_left, end;
1958         u64 aligned_start = ALIGN(start, 1 << 9);
1959
1960         if (WARN_ON(start != aligned_start)) {
1961                 len -= aligned_start - start;
1962                 len = round_down(len, 1 << 9);
1963                 start = aligned_start;
1964         }
1965
1966         *discarded_bytes = 0;
1967
1968         if (!len)
1969                 return 0;
1970
1971         end = start + len;
1972         bytes_left = len;
1973
1974         /* Skip any superblocks on this device. */
1975         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1976                 u64 sb_start = btrfs_sb_offset(j);
1977                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1978                 u64 size = sb_start - start;
1979
1980                 if (!in_range(sb_start, start, bytes_left) &&
1981                     !in_range(sb_end, start, bytes_left) &&
1982                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1983                         continue;
1984
1985                 /*
1986                  * Superblock spans beginning of range.  Adjust start and
1987                  * try again.
1988                  */
1989                 if (sb_start <= start) {
1990                         start += sb_end - start;
1991                         if (start > end) {
1992                                 bytes_left = 0;
1993                                 break;
1994                         }
1995                         bytes_left = end - start;
1996                         continue;
1997                 }
1998
1999                 if (size) {
2000                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
2001                                                    GFP_NOFS, 0);
2002                         if (!ret)
2003                                 *discarded_bytes += size;
2004                         else if (ret != -EOPNOTSUPP)
2005                                 return ret;
2006                 }
2007
2008                 start = sb_end;
2009                 if (start > end) {
2010                         bytes_left = 0;
2011                         break;
2012                 }
2013                 bytes_left = end - start;
2014         }
2015
2016         if (bytes_left) {
2017                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2018                                            GFP_NOFS, 0);
2019                 if (!ret)
2020                         *discarded_bytes += bytes_left;
2021         }
2022         return ret;
2023 }
2024
2025 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2026                          u64 num_bytes, u64 *actual_bytes)
2027 {
2028         int ret;
2029         u64 discarded_bytes = 0;
2030         struct btrfs_bio *bbio = NULL;
2031
2032
2033         /*
2034          * Avoid races with device replace and make sure our bbio has devices
2035          * associated to its stripes that don't go away while we are discarding.
2036          */
2037         btrfs_bio_counter_inc_blocked(root->fs_info);
2038         /* Tell the block device(s) that the sectors can be discarded */
2039         ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
2040                               bytenr, &num_bytes, &bbio, 0);
2041         /* Error condition is -ENOMEM */
2042         if (!ret) {
2043                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2044                 int i;
2045
2046
2047                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2048                         u64 bytes;
2049                         if (!stripe->dev->can_discard)
2050                                 continue;
2051
2052                         ret = btrfs_issue_discard(stripe->dev->bdev,
2053                                                   stripe->physical,
2054                                                   stripe->length,
2055                                                   &bytes);
2056                         if (!ret)
2057                                 discarded_bytes += bytes;
2058                         else if (ret != -EOPNOTSUPP)
2059                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2060
2061                         /*
2062                          * Just in case we get back EOPNOTSUPP for some reason,
2063                          * just ignore the return value so we don't screw up
2064                          * people calling discard_extent.
2065                          */
2066                         ret = 0;
2067                 }
2068                 btrfs_put_bbio(bbio);
2069         }
2070         btrfs_bio_counter_dec(root->fs_info);
2071
2072         if (actual_bytes)
2073                 *actual_bytes = discarded_bytes;
2074
2075
2076         if (ret == -EOPNOTSUPP)
2077                 ret = 0;
2078         return ret;
2079 }
2080
2081 /* Can return -ENOMEM */
2082 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2083                          struct btrfs_root *root,
2084                          u64 bytenr, u64 num_bytes, u64 parent,
2085                          u64 root_objectid, u64 owner, u64 offset)
2086 {
2087         int ret;
2088         struct btrfs_fs_info *fs_info = root->fs_info;
2089
2090         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2091                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2092
2093         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2094                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2095                                         num_bytes,
2096                                         parent, root_objectid, (int)owner,
2097                                         BTRFS_ADD_DELAYED_REF, NULL);
2098         } else {
2099                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2100                                         num_bytes, parent, root_objectid,
2101                                         owner, offset, 0,
2102                                         BTRFS_ADD_DELAYED_REF, NULL);
2103         }
2104         return ret;
2105 }
2106
2107 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2108                                   struct btrfs_root *root,
2109                                   struct btrfs_delayed_ref_node *node,
2110                                   u64 parent, u64 root_objectid,
2111                                   u64 owner, u64 offset, int refs_to_add,
2112                                   struct btrfs_delayed_extent_op *extent_op)
2113 {
2114         struct btrfs_fs_info *fs_info = root->fs_info;
2115         struct btrfs_path *path;
2116         struct extent_buffer *leaf;
2117         struct btrfs_extent_item *item;
2118         struct btrfs_key key;
2119         u64 bytenr = node->bytenr;
2120         u64 num_bytes = node->num_bytes;
2121         u64 refs;
2122         int ret;
2123
2124         path = btrfs_alloc_path();
2125         if (!path)
2126                 return -ENOMEM;
2127
2128         path->reada = READA_FORWARD;
2129         path->leave_spinning = 1;
2130         /* this will setup the path even if it fails to insert the back ref */
2131         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2132                                            bytenr, num_bytes, parent,
2133                                            root_objectid, owner, offset,
2134                                            refs_to_add, extent_op);
2135         if ((ret < 0 && ret != -EAGAIN) || !ret)
2136                 goto out;
2137
2138         /*
2139          * Ok we had -EAGAIN which means we didn't have space to insert and
2140          * inline extent ref, so just update the reference count and add a
2141          * normal backref.
2142          */
2143         leaf = path->nodes[0];
2144         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2145         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2146         refs = btrfs_extent_refs(leaf, item);
2147         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2148         if (extent_op)
2149                 __run_delayed_extent_op(extent_op, leaf, item);
2150
2151         btrfs_mark_buffer_dirty(leaf);
2152         btrfs_release_path(path);
2153
2154         path->reada = READA_FORWARD;
2155         path->leave_spinning = 1;
2156         /* now insert the actual backref */
2157         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2158                                     path, bytenr, parent, root_objectid,
2159                                     owner, offset, refs_to_add);
2160         if (ret)
2161                 btrfs_abort_transaction(trans, ret);
2162 out:
2163         btrfs_free_path(path);
2164         return ret;
2165 }
2166
2167 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2168                                 struct btrfs_root *root,
2169                                 struct btrfs_delayed_ref_node *node,
2170                                 struct btrfs_delayed_extent_op *extent_op,
2171                                 int insert_reserved)
2172 {
2173         int ret = 0;
2174         struct btrfs_delayed_data_ref *ref;
2175         struct btrfs_key ins;
2176         u64 parent = 0;
2177         u64 ref_root = 0;
2178         u64 flags = 0;
2179
2180         ins.objectid = node->bytenr;
2181         ins.offset = node->num_bytes;
2182         ins.type = BTRFS_EXTENT_ITEM_KEY;
2183
2184         ref = btrfs_delayed_node_to_data_ref(node);
2185         trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
2186
2187         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2188                 parent = ref->parent;
2189         ref_root = ref->root;
2190
2191         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2192                 if (extent_op)
2193                         flags |= extent_op->flags_to_set;
2194                 ret = alloc_reserved_file_extent(trans, root,
2195                                                  parent, ref_root, flags,
2196                                                  ref->objectid, ref->offset,
2197                                                  &ins, node->ref_mod);
2198         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2199                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2200                                              ref_root, ref->objectid,
2201                                              ref->offset, node->ref_mod,
2202                                              extent_op);
2203         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2204                 ret = __btrfs_free_extent(trans, root, node, parent,
2205                                           ref_root, ref->objectid,
2206                                           ref->offset, node->ref_mod,
2207                                           extent_op);
2208         } else {
2209                 BUG();
2210         }
2211         return ret;
2212 }
2213
2214 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2215                                     struct extent_buffer *leaf,
2216                                     struct btrfs_extent_item *ei)
2217 {
2218         u64 flags = btrfs_extent_flags(leaf, ei);
2219         if (extent_op->update_flags) {
2220                 flags |= extent_op->flags_to_set;
2221                 btrfs_set_extent_flags(leaf, ei, flags);
2222         }
2223
2224         if (extent_op->update_key) {
2225                 struct btrfs_tree_block_info *bi;
2226                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2227                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2228                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2229         }
2230 }
2231
2232 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2233                                  struct btrfs_root *root,
2234                                  struct btrfs_delayed_ref_node *node,
2235                                  struct btrfs_delayed_extent_op *extent_op)
2236 {
2237         struct btrfs_key key;
2238         struct btrfs_path *path;
2239         struct btrfs_extent_item *ei;
2240         struct extent_buffer *leaf;
2241         u32 item_size;
2242         int ret;
2243         int err = 0;
2244         int metadata = !extent_op->is_data;
2245
2246         if (trans->aborted)
2247                 return 0;
2248
2249         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2250                 metadata = 0;
2251
2252         path = btrfs_alloc_path();
2253         if (!path)
2254                 return -ENOMEM;
2255
2256         key.objectid = node->bytenr;
2257
2258         if (metadata) {
2259                 key.type = BTRFS_METADATA_ITEM_KEY;
2260                 key.offset = extent_op->level;
2261         } else {
2262                 key.type = BTRFS_EXTENT_ITEM_KEY;
2263                 key.offset = node->num_bytes;
2264         }
2265
2266 again:
2267         path->reada = READA_FORWARD;
2268         path->leave_spinning = 1;
2269         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2270                                 path, 0, 1);
2271         if (ret < 0) {
2272                 err = ret;
2273                 goto out;
2274         }
2275         if (ret > 0) {
2276                 if (metadata) {
2277                         if (path->slots[0] > 0) {
2278                                 path->slots[0]--;
2279                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2280                                                       path->slots[0]);
2281                                 if (key.objectid == node->bytenr &&
2282                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2283                                     key.offset == node->num_bytes)
2284                                         ret = 0;
2285                         }
2286                         if (ret > 0) {
2287                                 btrfs_release_path(path);
2288                                 metadata = 0;
2289
2290                                 key.objectid = node->bytenr;
2291                                 key.offset = node->num_bytes;
2292                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2293                                 goto again;
2294                         }
2295                 } else {
2296                         err = -EIO;
2297                         goto out;
2298                 }
2299         }
2300
2301         leaf = path->nodes[0];
2302         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2303 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2304         if (item_size < sizeof(*ei)) {
2305                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2306                                              path, (u64)-1, 0);
2307                 if (ret < 0) {
2308                         err = ret;
2309                         goto out;
2310                 }
2311                 leaf = path->nodes[0];
2312                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2313         }
2314 #endif
2315         BUG_ON(item_size < sizeof(*ei));
2316         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2317         __run_delayed_extent_op(extent_op, leaf, ei);
2318
2319         btrfs_mark_buffer_dirty(leaf);
2320 out:
2321         btrfs_free_path(path);
2322         return err;
2323 }
2324
2325 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2326                                 struct btrfs_root *root,
2327                                 struct btrfs_delayed_ref_node *node,
2328                                 struct btrfs_delayed_extent_op *extent_op,
2329                                 int insert_reserved)
2330 {
2331         int ret = 0;
2332         struct btrfs_delayed_tree_ref *ref;
2333         struct btrfs_key ins;
2334         u64 parent = 0;
2335         u64 ref_root = 0;
2336         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2337                                                  SKINNY_METADATA);
2338
2339         ref = btrfs_delayed_node_to_tree_ref(node);
2340         trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
2341
2342         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2343                 parent = ref->parent;
2344         ref_root = ref->root;
2345
2346         ins.objectid = node->bytenr;
2347         if (skinny_metadata) {
2348                 ins.offset = ref->level;
2349                 ins.type = BTRFS_METADATA_ITEM_KEY;
2350         } else {
2351                 ins.offset = node->num_bytes;
2352                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2353         }
2354
2355         if (node->ref_mod != 1) {
2356                 btrfs_err(root->fs_info,
2357         "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2358                           node->bytenr, node->ref_mod, node->action, ref_root,
2359                           parent);
2360                 return -EIO;
2361         }
2362         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2363                 BUG_ON(!extent_op || !extent_op->update_flags);
2364                 ret = alloc_reserved_tree_block(trans, root,
2365                                                 parent, ref_root,
2366                                                 extent_op->flags_to_set,
2367                                                 &extent_op->key,
2368                                                 ref->level, &ins);
2369         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2370                 ret = __btrfs_inc_extent_ref(trans, root, node,
2371                                              parent, ref_root,
2372                                              ref->level, 0, 1,
2373                                              extent_op);
2374         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2375                 ret = __btrfs_free_extent(trans, root, node,
2376                                           parent, ref_root,
2377                                           ref->level, 0, 1, extent_op);
2378         } else {
2379                 BUG();
2380         }
2381         return ret;
2382 }
2383
2384 /* helper function to actually process a single delayed ref entry */
2385 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2386                                struct btrfs_root *root,
2387                                struct btrfs_delayed_ref_node *node,
2388                                struct btrfs_delayed_extent_op *extent_op,
2389                                int insert_reserved)
2390 {
2391         int ret = 0;
2392
2393         if (trans->aborted) {
2394                 if (insert_reserved)
2395                         btrfs_pin_extent(root, node->bytenr,
2396                                          node->num_bytes, 1);
2397                 return 0;
2398         }
2399
2400         if (btrfs_delayed_ref_is_head(node)) {
2401                 struct btrfs_delayed_ref_head *head;
2402                 /*
2403                  * we've hit the end of the chain and we were supposed
2404                  * to insert this extent into the tree.  But, it got
2405                  * deleted before we ever needed to insert it, so all
2406                  * we have to do is clean up the accounting
2407                  */
2408                 BUG_ON(extent_op);
2409                 head = btrfs_delayed_node_to_head(node);
2410                 trace_run_delayed_ref_head(root->fs_info, node, head,
2411                                            node->action);
2412
2413                 if (insert_reserved) {
2414                         btrfs_pin_extent(root, node->bytenr,
2415                                          node->num_bytes, 1);
2416                         if (head->is_data) {
2417                                 ret = btrfs_del_csums(trans, root,
2418                                                       node->bytenr,
2419                                                       node->num_bytes);
2420                         }
2421                 }
2422
2423                 /* Also free its reserved qgroup space */
2424                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2425                                               head->qgroup_ref_root,
2426                                               head->qgroup_reserved);
2427                 return ret;
2428         }
2429
2430         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2431             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2432                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2433                                            insert_reserved);
2434         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2435                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2436                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2437                                            insert_reserved);
2438         else
2439                 BUG();
2440         return ret;
2441 }
2442
2443 static inline struct btrfs_delayed_ref_node *
2444 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2445 {
2446         struct btrfs_delayed_ref_node *ref;
2447
2448         if (list_empty(&head->ref_list))
2449                 return NULL;
2450
2451         /*
2452          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2453          * This is to prevent a ref count from going down to zero, which deletes
2454          * the extent item from the extent tree, when there still are references
2455          * to add, which would fail because they would not find the extent item.
2456          */
2457         list_for_each_entry(ref, &head->ref_list, list) {
2458                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2459                         return ref;
2460         }
2461
2462         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2463                           list);
2464 }
2465
2466 /*
2467  * Returns 0 on success or if called with an already aborted transaction.
2468  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2469  */
2470 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2471                                              struct btrfs_root *root,
2472                                              unsigned long nr)
2473 {
2474         struct btrfs_delayed_ref_root *delayed_refs;
2475         struct btrfs_delayed_ref_node *ref;
2476         struct btrfs_delayed_ref_head *locked_ref = NULL;
2477         struct btrfs_delayed_extent_op *extent_op;
2478         struct btrfs_fs_info *fs_info = root->fs_info;
2479         ktime_t start = ktime_get();
2480         int ret;
2481         unsigned long count = 0;
2482         unsigned long actual_count = 0;
2483         int must_insert_reserved = 0;
2484
2485         delayed_refs = &trans->transaction->delayed_refs;
2486         while (1) {
2487                 if (!locked_ref) {
2488                         if (count >= nr)
2489                                 break;
2490
2491                         spin_lock(&delayed_refs->lock);
2492                         locked_ref = btrfs_select_ref_head(trans);
2493                         if (!locked_ref) {
2494                                 spin_unlock(&delayed_refs->lock);
2495                                 break;
2496                         }
2497
2498                         /* grab the lock that says we are going to process
2499                          * all the refs for this head */
2500                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2501                         spin_unlock(&delayed_refs->lock);
2502                         /*
2503                          * we may have dropped the spin lock to get the head
2504                          * mutex lock, and that might have given someone else
2505                          * time to free the head.  If that's true, it has been
2506                          * removed from our list and we can move on.
2507                          */
2508                         if (ret == -EAGAIN) {
2509                                 locked_ref = NULL;
2510                                 count++;
2511                                 continue;
2512                         }
2513                 }
2514
2515                 /*
2516                  * We need to try and merge add/drops of the same ref since we
2517                  * can run into issues with relocate dropping the implicit ref
2518                  * and then it being added back again before the drop can
2519                  * finish.  If we merged anything we need to re-loop so we can
2520                  * get a good ref.
2521                  * Or we can get node references of the same type that weren't
2522                  * merged when created due to bumps in the tree mod seq, and
2523                  * we need to merge them to prevent adding an inline extent
2524                  * backref before dropping it (triggering a BUG_ON at
2525                  * insert_inline_extent_backref()).
2526                  */
2527                 spin_lock(&locked_ref->lock);
2528                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2529                                          locked_ref);
2530
2531                 /*
2532                  * locked_ref is the head node, so we have to go one
2533                  * node back for any delayed ref updates
2534                  */
2535                 ref = select_delayed_ref(locked_ref);
2536
2537                 if (ref && ref->seq &&
2538                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2539                         spin_unlock(&locked_ref->lock);
2540                         spin_lock(&delayed_refs->lock);
2541                         locked_ref->processing = 0;
2542                         delayed_refs->num_heads_ready++;
2543                         spin_unlock(&delayed_refs->lock);
2544                         btrfs_delayed_ref_unlock(locked_ref);
2545                         locked_ref = NULL;
2546                         cond_resched();
2547                         count++;
2548                         continue;
2549                 }
2550
2551                 /*
2552                  * record the must insert reserved flag before we
2553                  * drop the spin lock.
2554                  */
2555                 must_insert_reserved = locked_ref->must_insert_reserved;
2556                 locked_ref->must_insert_reserved = 0;
2557
2558                 extent_op = locked_ref->extent_op;
2559                 locked_ref->extent_op = NULL;
2560
2561                 if (!ref) {
2562
2563
2564                         /* All delayed refs have been processed, Go ahead
2565                          * and send the head node to run_one_delayed_ref,
2566                          * so that any accounting fixes can happen
2567                          */
2568                         ref = &locked_ref->node;
2569
2570                         if (extent_op && must_insert_reserved) {
2571                                 btrfs_free_delayed_extent_op(extent_op);
2572                                 extent_op = NULL;
2573                         }
2574
2575                         if (extent_op) {
2576                                 spin_unlock(&locked_ref->lock);
2577                                 ret = run_delayed_extent_op(trans, root,
2578                                                             ref, extent_op);
2579                                 btrfs_free_delayed_extent_op(extent_op);
2580
2581                                 if (ret) {
2582                                         /*
2583                                          * Need to reset must_insert_reserved if
2584                                          * there was an error so the abort stuff
2585                                          * can cleanup the reserved space
2586                                          * properly.
2587                                          */
2588                                         if (must_insert_reserved)
2589                                                 locked_ref->must_insert_reserved = 1;
2590                                         spin_lock(&delayed_refs->lock);
2591                                         locked_ref->processing = 0;
2592                                         delayed_refs->num_heads_ready++;
2593                                         spin_unlock(&delayed_refs->lock);
2594                                         btrfs_debug(fs_info,
2595                                                     "run_delayed_extent_op returned %d",
2596                                                     ret);
2597                                         btrfs_delayed_ref_unlock(locked_ref);
2598                                         return ret;
2599                                 }
2600                                 continue;
2601                         }
2602
2603                         /*
2604                          * Need to drop our head ref lock and re-acquire the
2605                          * delayed ref lock and then re-check to make sure
2606                          * nobody got added.
2607                          */
2608                         spin_unlock(&locked_ref->lock);
2609                         spin_lock(&delayed_refs->lock);
2610                         spin_lock(&locked_ref->lock);
2611                         if (!list_empty(&locked_ref->ref_list) ||
2612                             locked_ref->extent_op) {
2613                                 spin_unlock(&locked_ref->lock);
2614                                 spin_unlock(&delayed_refs->lock);
2615                                 continue;
2616                         }
2617                         ref->in_tree = 0;
2618                         delayed_refs->num_heads--;
2619                         rb_erase(&locked_ref->href_node,
2620                                  &delayed_refs->href_root);
2621                         spin_unlock(&delayed_refs->lock);
2622                 } else {
2623                         actual_count++;
2624                         ref->in_tree = 0;
2625                         list_del(&ref->list);
2626                 }
2627                 atomic_dec(&delayed_refs->num_entries);
2628
2629                 if (!btrfs_delayed_ref_is_head(ref)) {
2630                         /*
2631                          * when we play the delayed ref, also correct the
2632                          * ref_mod on head
2633                          */
2634                         switch (ref->action) {
2635                         case BTRFS_ADD_DELAYED_REF:
2636                         case BTRFS_ADD_DELAYED_EXTENT:
2637                                 locked_ref->node.ref_mod -= ref->ref_mod;
2638                                 break;
2639                         case BTRFS_DROP_DELAYED_REF:
2640                                 locked_ref->node.ref_mod += ref->ref_mod;
2641                                 break;
2642                         default:
2643                                 WARN_ON(1);
2644                         }
2645                 }
2646                 spin_unlock(&locked_ref->lock);
2647
2648                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2649                                           must_insert_reserved);
2650
2651                 btrfs_free_delayed_extent_op(extent_op);
2652                 if (ret) {
2653                         spin_lock(&delayed_refs->lock);
2654                         locked_ref->processing = 0;
2655                         delayed_refs->num_heads_ready++;
2656                         spin_unlock(&delayed_refs->lock);
2657                         btrfs_delayed_ref_unlock(locked_ref);
2658                         btrfs_put_delayed_ref(ref);
2659                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2660                                     ret);
2661                         return ret;
2662                 }
2663
2664                 /*
2665                  * If this node is a head, that means all the refs in this head
2666                  * have been dealt with, and we will pick the next head to deal
2667                  * with, so we must unlock the head and drop it from the cluster
2668                  * list before we release it.
2669                  */
2670                 if (btrfs_delayed_ref_is_head(ref)) {
2671                         if (locked_ref->is_data &&
2672                             locked_ref->total_ref_mod < 0) {
2673                                 spin_lock(&delayed_refs->lock);
2674                                 delayed_refs->pending_csums -= ref->num_bytes;
2675                                 spin_unlock(&delayed_refs->lock);
2676                         }
2677                         btrfs_delayed_ref_unlock(locked_ref);
2678                         locked_ref = NULL;
2679                 }
2680                 btrfs_put_delayed_ref(ref);
2681                 count++;
2682                 cond_resched();
2683         }
2684
2685         /*
2686          * We don't want to include ref heads since we can have empty ref heads
2687          * and those will drastically skew our runtime down since we just do
2688          * accounting, no actual extent tree updates.
2689          */
2690         if (actual_count > 0) {
2691                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2692                 u64 avg;
2693
2694                 /*
2695                  * We weigh the current average higher than our current runtime
2696                  * to avoid large swings in the average.
2697                  */
2698                 spin_lock(&delayed_refs->lock);
2699                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2700                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2701                 spin_unlock(&delayed_refs->lock);
2702         }
2703         return 0;
2704 }
2705
2706 #ifdef SCRAMBLE_DELAYED_REFS
2707 /*
2708  * Normally delayed refs get processed in ascending bytenr order. This
2709  * correlates in most cases to the order added. To expose dependencies on this
2710  * order, we start to process the tree in the middle instead of the beginning
2711  */
2712 static u64 find_middle(struct rb_root *root)
2713 {
2714         struct rb_node *n = root->rb_node;
2715         struct btrfs_delayed_ref_node *entry;
2716         int alt = 1;
2717         u64 middle;
2718         u64 first = 0, last = 0;
2719
2720         n = rb_first(root);
2721         if (n) {
2722                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2723                 first = entry->bytenr;
2724         }
2725         n = rb_last(root);
2726         if (n) {
2727                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2728                 last = entry->bytenr;
2729         }
2730         n = root->rb_node;
2731
2732         while (n) {
2733                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2734                 WARN_ON(!entry->in_tree);
2735
2736                 middle = entry->bytenr;
2737
2738                 if (alt)
2739                         n = n->rb_left;
2740                 else
2741                         n = n->rb_right;
2742
2743                 alt = 1 - alt;
2744         }
2745         return middle;
2746 }
2747 #endif
2748
2749 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2750 {
2751         u64 num_bytes;
2752
2753         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2754                              sizeof(struct btrfs_extent_inline_ref));
2755         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2756                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2757
2758         /*
2759          * We don't ever fill up leaves all the way so multiply by 2 just to be
2760          * closer to what we're really going to want to use.
2761          */
2762         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2763 }
2764
2765 /*
2766  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2767  * would require to store the csums for that many bytes.
2768  */
2769 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2770 {
2771         u64 csum_size;
2772         u64 num_csums_per_leaf;
2773         u64 num_csums;
2774
2775         csum_size = BTRFS_MAX_ITEM_SIZE(root);
2776         num_csums_per_leaf = div64_u64(csum_size,
2777                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2778         num_csums = div64_u64(csum_bytes, root->sectorsize);
2779         num_csums += num_csums_per_leaf - 1;
2780         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2781         return num_csums;
2782 }
2783
2784 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2785                                        struct btrfs_root *root)
2786 {
2787         struct btrfs_block_rsv *global_rsv;
2788         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2789         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2790         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2791         u64 num_bytes, num_dirty_bgs_bytes;
2792         int ret = 0;
2793
2794         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2795         num_heads = heads_to_leaves(root, num_heads);
2796         if (num_heads > 1)
2797                 num_bytes += (num_heads - 1) * root->nodesize;
2798         num_bytes <<= 1;
2799         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2800         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2801                                                              num_dirty_bgs);
2802         global_rsv = &root->fs_info->global_block_rsv;
2803
2804         /*
2805          * If we can't allocate any more chunks lets make sure we have _lots_ of
2806          * wiggle room since running delayed refs can create more delayed refs.
2807          */
2808         if (global_rsv->space_info->full) {
2809                 num_dirty_bgs_bytes <<= 1;
2810                 num_bytes <<= 1;
2811         }
2812
2813         spin_lock(&global_rsv->lock);
2814         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2815                 ret = 1;
2816         spin_unlock(&global_rsv->lock);
2817         return ret;
2818 }
2819
2820 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2821                                        struct btrfs_root *root)
2822 {
2823         struct btrfs_fs_info *fs_info = root->fs_info;
2824         u64 num_entries =
2825                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2826         u64 avg_runtime;
2827         u64 val;
2828
2829         smp_mb();
2830         avg_runtime = fs_info->avg_delayed_ref_runtime;
2831         val = num_entries * avg_runtime;
2832         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2833                 return 1;
2834         if (val >= NSEC_PER_SEC / 2)
2835                 return 2;
2836
2837         return btrfs_check_space_for_delayed_refs(trans, root);
2838 }
2839
2840 struct async_delayed_refs {
2841         struct btrfs_root *root;
2842         u64 transid;
2843         int count;
2844         int error;
2845         int sync;
2846         struct completion wait;
2847         struct btrfs_work work;
2848 };
2849
2850 static void delayed_ref_async_start(struct btrfs_work *work)
2851 {
2852         struct async_delayed_refs *async;
2853         struct btrfs_trans_handle *trans;
2854         int ret;
2855
2856         async = container_of(work, struct async_delayed_refs, work);
2857
2858         /* if the commit is already started, we don't need to wait here */
2859         if (btrfs_transaction_blocked(async->root->fs_info))
2860                 goto done;
2861
2862         trans = btrfs_join_transaction(async->root);
2863         if (IS_ERR(trans)) {
2864                 async->error = PTR_ERR(trans);
2865                 goto done;
2866         }
2867
2868         /*
2869          * trans->sync means that when we call end_transaction, we won't
2870          * wait on delayed refs
2871          */
2872         trans->sync = true;
2873
2874         /* Don't bother flushing if we got into a different transaction */
2875         if (trans->transid > async->transid)
2876                 goto end;
2877
2878         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2879         if (ret)
2880                 async->error = ret;
2881 end:
2882         ret = btrfs_end_transaction(trans, async->root);
2883         if (ret && !async->error)
2884                 async->error = ret;
2885 done:
2886         if (async->sync)
2887                 complete(&async->wait);
2888         else
2889                 kfree(async);
2890 }
2891
2892 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2893                                  unsigned long count, u64 transid, int wait)
2894 {
2895         struct async_delayed_refs *async;
2896         int ret;
2897
2898         async = kmalloc(sizeof(*async), GFP_NOFS);
2899         if (!async)
2900                 return -ENOMEM;
2901
2902         async->root = root->fs_info->tree_root;
2903         async->count = count;
2904         async->error = 0;
2905         async->transid = transid;
2906         if (wait)
2907                 async->sync = 1;
2908         else
2909                 async->sync = 0;
2910         init_completion(&async->wait);
2911
2912         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2913                         delayed_ref_async_start, NULL, NULL);
2914
2915         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2916
2917         if (wait) {
2918                 wait_for_completion(&async->wait);
2919                 ret = async->error;
2920                 kfree(async);
2921                 return ret;
2922         }
2923         return 0;
2924 }
2925
2926 /*
2927  * this starts processing the delayed reference count updates and
2928  * extent insertions we have queued up so far.  count can be
2929  * 0, which means to process everything in the tree at the start
2930  * of the run (but not newly added entries), or it can be some target
2931  * number you'd like to process.
2932  *
2933  * Returns 0 on success or if called with an aborted transaction
2934  * Returns <0 on error and aborts the transaction
2935  */
2936 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2937                            struct btrfs_root *root, unsigned long count)
2938 {
2939         struct rb_node *node;
2940         struct btrfs_delayed_ref_root *delayed_refs;
2941         struct btrfs_delayed_ref_head *head;
2942         int ret;
2943         int run_all = count == (unsigned long)-1;
2944         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2945
2946         /* We'll clean this up in btrfs_cleanup_transaction */
2947         if (trans->aborted)
2948                 return 0;
2949
2950         if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
2951                 return 0;
2952
2953         if (root == root->fs_info->extent_root)
2954                 root = root->fs_info->tree_root;
2955
2956         delayed_refs = &trans->transaction->delayed_refs;
2957         if (count == 0)
2958                 count = atomic_read(&delayed_refs->num_entries) * 2;
2959
2960 again:
2961 #ifdef SCRAMBLE_DELAYED_REFS
2962         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2963 #endif
2964         trans->can_flush_pending_bgs = false;
2965         ret = __btrfs_run_delayed_refs(trans, root, count);
2966         if (ret < 0) {
2967                 btrfs_abort_transaction(trans, ret);
2968                 return ret;
2969         }
2970
2971         if (run_all) {
2972                 if (!list_empty(&trans->new_bgs))
2973                         btrfs_create_pending_block_groups(trans, root);
2974
2975                 spin_lock(&delayed_refs->lock);
2976                 node = rb_first(&delayed_refs->href_root);
2977                 if (!node) {
2978                         spin_unlock(&delayed_refs->lock);
2979                         goto out;
2980                 }
2981
2982                 while (node) {
2983                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2984                                         href_node);
2985                         if (btrfs_delayed_ref_is_head(&head->node)) {
2986                                 struct btrfs_delayed_ref_node *ref;
2987
2988                                 ref = &head->node;
2989                                 atomic_inc(&ref->refs);
2990
2991                                 spin_unlock(&delayed_refs->lock);
2992                                 /*
2993                                  * Mutex was contended, block until it's
2994                                  * released and try again
2995                                  */
2996                                 mutex_lock(&head->mutex);
2997                                 mutex_unlock(&head->mutex);
2998
2999                                 btrfs_put_delayed_ref(ref);
3000                                 cond_resched();
3001                                 goto again;
3002                         } else {
3003                                 WARN_ON(1);
3004                         }
3005                         node = rb_next(node);
3006                 }
3007                 spin_unlock(&delayed_refs->lock);
3008                 cond_resched();
3009                 goto again;
3010         }
3011 out:
3012         assert_qgroups_uptodate(trans);
3013         trans->can_flush_pending_bgs = can_flush_pending_bgs;
3014         return 0;
3015 }
3016
3017 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3018                                 struct btrfs_root *root,
3019                                 u64 bytenr, u64 num_bytes, u64 flags,
3020                                 int level, int is_data)
3021 {
3022         struct btrfs_delayed_extent_op *extent_op;
3023         int ret;
3024
3025         extent_op = btrfs_alloc_delayed_extent_op();
3026         if (!extent_op)
3027                 return -ENOMEM;
3028
3029         extent_op->flags_to_set = flags;
3030         extent_op->update_flags = true;
3031         extent_op->update_key = false;
3032         extent_op->is_data = is_data ? true : false;
3033         extent_op->level = level;
3034
3035         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3036                                           num_bytes, extent_op);
3037         if (ret)
3038                 btrfs_free_delayed_extent_op(extent_op);
3039         return ret;
3040 }
3041
3042 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3043                                       struct btrfs_root *root,
3044                                       struct btrfs_path *path,
3045                                       u64 objectid, u64 offset, u64 bytenr)
3046 {
3047         struct btrfs_delayed_ref_head *head;
3048         struct btrfs_delayed_ref_node *ref;
3049         struct btrfs_delayed_data_ref *data_ref;
3050         struct btrfs_delayed_ref_root *delayed_refs;
3051         int ret = 0;
3052
3053         delayed_refs = &trans->transaction->delayed_refs;
3054         spin_lock(&delayed_refs->lock);
3055         head = btrfs_find_delayed_ref_head(trans, bytenr);
3056         if (!head) {
3057                 spin_unlock(&delayed_refs->lock);
3058                 return 0;
3059         }
3060
3061         if (!mutex_trylock(&head->mutex)) {
3062                 atomic_inc(&head->node.refs);
3063                 spin_unlock(&delayed_refs->lock);
3064
3065                 btrfs_release_path(path);
3066
3067                 /*
3068                  * Mutex was contended, block until it's released and let
3069                  * caller try again
3070                  */
3071                 mutex_lock(&head->mutex);
3072                 mutex_unlock(&head->mutex);
3073                 btrfs_put_delayed_ref(&head->node);
3074                 return -EAGAIN;
3075         }
3076         spin_unlock(&delayed_refs->lock);
3077
3078         spin_lock(&head->lock);
3079         list_for_each_entry(ref, &head->ref_list, list) {
3080                 /* If it's a shared ref we know a cross reference exists */
3081                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3082                         ret = 1;
3083                         break;
3084                 }
3085
3086                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3087
3088                 /*
3089                  * If our ref doesn't match the one we're currently looking at
3090                  * then we have a cross reference.
3091                  */
3092                 if (data_ref->root != root->root_key.objectid ||
3093                     data_ref->objectid != objectid ||
3094                     data_ref->offset != offset) {
3095                         ret = 1;
3096                         break;
3097                 }
3098         }
3099         spin_unlock(&head->lock);
3100         mutex_unlock(&head->mutex);
3101         return ret;
3102 }
3103
3104 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3105                                         struct btrfs_root *root,
3106                                         struct btrfs_path *path,
3107                                         u64 objectid, u64 offset, u64 bytenr)
3108 {
3109         struct btrfs_root *extent_root = root->fs_info->extent_root;
3110         struct extent_buffer *leaf;
3111         struct btrfs_extent_data_ref *ref;
3112         struct btrfs_extent_inline_ref *iref;
3113         struct btrfs_extent_item *ei;
3114         struct btrfs_key key;
3115         u32 item_size;
3116         int ret;
3117
3118         key.objectid = bytenr;
3119         key.offset = (u64)-1;
3120         key.type = BTRFS_EXTENT_ITEM_KEY;
3121
3122         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3123         if (ret < 0)
3124                 goto out;
3125         BUG_ON(ret == 0); /* Corruption */
3126
3127         ret = -ENOENT;
3128         if (path->slots[0] == 0)
3129                 goto out;
3130
3131         path->slots[0]--;
3132         leaf = path->nodes[0];
3133         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3134
3135         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3136                 goto out;
3137
3138         ret = 1;
3139         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3140 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3141         if (item_size < sizeof(*ei)) {
3142                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3143                 goto out;
3144         }
3145 #endif
3146         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3147
3148         if (item_size != sizeof(*ei) +
3149             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3150                 goto out;
3151
3152         if (btrfs_extent_generation(leaf, ei) <=
3153             btrfs_root_last_snapshot(&root->root_item))
3154                 goto out;
3155
3156         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3157         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3158             BTRFS_EXTENT_DATA_REF_KEY)
3159                 goto out;
3160
3161         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3162         if (btrfs_extent_refs(leaf, ei) !=
3163             btrfs_extent_data_ref_count(leaf, ref) ||
3164             btrfs_extent_data_ref_root(leaf, ref) !=
3165             root->root_key.objectid ||
3166             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3167             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3168                 goto out;
3169
3170         ret = 0;
3171 out:
3172         return ret;
3173 }
3174
3175 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3176                           struct btrfs_root *root,
3177                           u64 objectid, u64 offset, u64 bytenr)
3178 {
3179         struct btrfs_path *path;
3180         int ret;
3181         int ret2;
3182
3183         path = btrfs_alloc_path();
3184         if (!path)
3185                 return -ENOENT;
3186
3187         do {
3188                 ret = check_committed_ref(trans, root, path, objectid,
3189                                           offset, bytenr);
3190                 if (ret && ret != -ENOENT)
3191                         goto out;
3192
3193                 ret2 = check_delayed_ref(trans, root, path, objectid,
3194                                          offset, bytenr);
3195         } while (ret2 == -EAGAIN);
3196
3197         if (ret2 && ret2 != -ENOENT) {
3198                 ret = ret2;
3199                 goto out;
3200         }
3201
3202         if (ret != -ENOENT || ret2 != -ENOENT)
3203                 ret = 0;
3204 out:
3205         btrfs_free_path(path);
3206         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3207                 WARN_ON(ret > 0);
3208         return ret;
3209 }
3210
3211 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3212                            struct btrfs_root *root,
3213                            struct extent_buffer *buf,
3214                            int full_backref, int inc)
3215 {
3216         u64 bytenr;
3217         u64 num_bytes;
3218         u64 parent;
3219         u64 ref_root;
3220         u32 nritems;
3221         struct btrfs_key key;
3222         struct btrfs_file_extent_item *fi;
3223         int i;
3224         int level;
3225         int ret = 0;
3226         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3227                             u64, u64, u64, u64, u64, u64);
3228
3229
3230         if (btrfs_is_testing(root->fs_info))
3231                 return 0;
3232
3233         ref_root = btrfs_header_owner(buf);
3234         nritems = btrfs_header_nritems(buf);
3235         level = btrfs_header_level(buf);
3236
3237         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3238                 return 0;
3239
3240         if (inc)
3241                 process_func = btrfs_inc_extent_ref;
3242         else
3243                 process_func = btrfs_free_extent;
3244
3245         if (full_backref)
3246                 parent = buf->start;
3247         else
3248                 parent = 0;
3249
3250         for (i = 0; i < nritems; i++) {
3251                 if (level == 0) {
3252                         btrfs_item_key_to_cpu(buf, &key, i);
3253                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3254                                 continue;
3255                         fi = btrfs_item_ptr(buf, i,
3256                                             struct btrfs_file_extent_item);
3257                         if (btrfs_file_extent_type(buf, fi) ==
3258                             BTRFS_FILE_EXTENT_INLINE)
3259                                 continue;
3260                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3261                         if (bytenr == 0)
3262                                 continue;
3263
3264                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3265                         key.offset -= btrfs_file_extent_offset(buf, fi);
3266                         ret = process_func(trans, root, bytenr, num_bytes,
3267                                            parent, ref_root, key.objectid,
3268                                            key.offset);
3269                         if (ret)
3270                                 goto fail;
3271                 } else {
3272                         bytenr = btrfs_node_blockptr(buf, i);
3273                         num_bytes = root->nodesize;
3274                         ret = process_func(trans, root, bytenr, num_bytes,
3275                                            parent, ref_root, level - 1, 0);
3276                         if (ret)
3277                                 goto fail;
3278                 }
3279         }
3280         return 0;
3281 fail:
3282         return ret;
3283 }
3284
3285 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3286                   struct extent_buffer *buf, int full_backref)
3287 {
3288         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3289 }
3290
3291 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3292                   struct extent_buffer *buf, int full_backref)
3293 {
3294         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3295 }
3296
3297 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3298                                  struct btrfs_root *root,
3299                                  struct btrfs_path *path,
3300                                  struct btrfs_block_group_cache *cache)
3301 {
3302         int ret;
3303         struct btrfs_root *extent_root = root->fs_info->extent_root;
3304         unsigned long bi;
3305         struct extent_buffer *leaf;
3306
3307         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3308         if (ret) {
3309                 if (ret > 0)
3310                         ret = -ENOENT;
3311                 goto fail;
3312         }
3313
3314         leaf = path->nodes[0];
3315         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3316         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3317         btrfs_mark_buffer_dirty(leaf);
3318 fail:
3319         btrfs_release_path(path);
3320         return ret;
3321
3322 }
3323
3324 static struct btrfs_block_group_cache *
3325 next_block_group(struct btrfs_root *root,
3326                  struct btrfs_block_group_cache *cache)
3327 {
3328         struct rb_node *node;
3329
3330         spin_lock(&root->fs_info->block_group_cache_lock);
3331
3332         /* If our block group was removed, we need a full search. */
3333         if (RB_EMPTY_NODE(&cache->cache_node)) {
3334                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3335
3336                 spin_unlock(&root->fs_info->block_group_cache_lock);
3337                 btrfs_put_block_group(cache);
3338                 cache = btrfs_lookup_first_block_group(root->fs_info,
3339                                                        next_bytenr);
3340                 return cache;
3341         }
3342         node = rb_next(&cache->cache_node);
3343         btrfs_put_block_group(cache);
3344         if (node) {
3345                 cache = rb_entry(node, struct btrfs_block_group_cache,
3346                                  cache_node);
3347                 btrfs_get_block_group(cache);
3348         } else
3349                 cache = NULL;
3350         spin_unlock(&root->fs_info->block_group_cache_lock);
3351         return cache;
3352 }
3353
3354 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3355                             struct btrfs_trans_handle *trans,
3356                             struct btrfs_path *path)
3357 {
3358         struct btrfs_root *root = block_group->fs_info->tree_root;
3359         struct inode *inode = NULL;
3360         u64 alloc_hint = 0;
3361         int dcs = BTRFS_DC_ERROR;
3362         u64 num_pages = 0;
3363         int retries = 0;
3364         int ret = 0;
3365
3366         /*
3367          * If this block group is smaller than 100 megs don't bother caching the
3368          * block group.
3369          */
3370         if (block_group->key.offset < (100 * SZ_1M)) {
3371                 spin_lock(&block_group->lock);
3372                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3373                 spin_unlock(&block_group->lock);
3374                 return 0;
3375         }
3376
3377         if (trans->aborted)
3378                 return 0;
3379 again:
3380         inode = lookup_free_space_inode(root, block_group, path);
3381         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3382                 ret = PTR_ERR(inode);
3383                 btrfs_release_path(path);
3384                 goto out;
3385         }
3386
3387         if (IS_ERR(inode)) {
3388                 BUG_ON(retries);
3389                 retries++;
3390
3391                 if (block_group->ro)
3392                         goto out_free;
3393
3394                 ret = create_free_space_inode(root, trans, block_group, path);
3395                 if (ret)
3396                         goto out_free;
3397                 goto again;
3398         }
3399
3400         /*
3401          * We want to set the generation to 0, that way if anything goes wrong
3402          * from here on out we know not to trust this cache when we load up next
3403          * time.
3404          */
3405         BTRFS_I(inode)->generation = 0;
3406         ret = btrfs_update_inode(trans, root, inode);
3407         if (ret) {
3408                 /*
3409                  * So theoretically we could recover from this, simply set the
3410                  * super cache generation to 0 so we know to invalidate the
3411                  * cache, but then we'd have to keep track of the block groups
3412                  * that fail this way so we know we _have_ to reset this cache
3413                  * before the next commit or risk reading stale cache.  So to
3414                  * limit our exposure to horrible edge cases lets just abort the
3415                  * transaction, this only happens in really bad situations
3416                  * anyway.
3417                  */
3418                 btrfs_abort_transaction(trans, ret);
3419                 goto out_put;
3420         }
3421         WARN_ON(ret);
3422
3423         /* We've already setup this transaction, go ahead and exit */
3424         if (block_group->cache_generation == trans->transid &&
3425             i_size_read(inode)) {
3426                 dcs = BTRFS_DC_SETUP;
3427                 goto out_put;
3428         }
3429
3430         if (i_size_read(inode) > 0) {
3431                 ret = btrfs_check_trunc_cache_free_space(root,
3432                                         &root->fs_info->global_block_rsv);
3433                 if (ret)
3434                         goto out_put;
3435
3436                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3437                 if (ret)
3438                         goto out_put;
3439         }
3440
3441         spin_lock(&block_group->lock);
3442         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3443             !btrfs_test_opt(root->fs_info, SPACE_CACHE)) {
3444                 /*
3445                  * don't bother trying to write stuff out _if_
3446                  * a) we're not cached,
3447                  * b) we're with nospace_cache mount option.
3448                  */
3449                 dcs = BTRFS_DC_WRITTEN;
3450                 spin_unlock(&block_group->lock);
3451                 goto out_put;
3452         }
3453         spin_unlock(&block_group->lock);
3454
3455         /*
3456          * We hit an ENOSPC when setting up the cache in this transaction, just
3457          * skip doing the setup, we've already cleared the cache so we're safe.
3458          */
3459         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3460                 ret = -ENOSPC;
3461                 goto out_put;
3462         }
3463
3464         /*
3465          * Try to preallocate enough space based on how big the block group is.
3466          * Keep in mind this has to include any pinned space which could end up
3467          * taking up quite a bit since it's not folded into the other space
3468          * cache.
3469          */
3470         num_pages = div_u64(block_group->key.offset, SZ_256M);
3471         if (!num_pages)
3472                 num_pages = 1;
3473
3474         num_pages *= 16;
3475         num_pages *= PAGE_SIZE;
3476
3477         ret = btrfs_check_data_free_space(inode, 0, num_pages);
3478         if (ret)
3479                 goto out_put;
3480
3481         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3482                                               num_pages, num_pages,
3483                                               &alloc_hint);
3484         /*
3485          * Our cache requires contiguous chunks so that we don't modify a bunch
3486          * of metadata or split extents when writing the cache out, which means
3487          * we can enospc if we are heavily fragmented in addition to just normal
3488          * out of space conditions.  So if we hit this just skip setting up any
3489          * other block groups for this transaction, maybe we'll unpin enough
3490          * space the next time around.
3491          */
3492         if (!ret)
3493                 dcs = BTRFS_DC_SETUP;
3494         else if (ret == -ENOSPC)
3495                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3496
3497 out_put:
3498         iput(inode);
3499 out_free:
3500         btrfs_release_path(path);
3501 out:
3502         spin_lock(&block_group->lock);
3503         if (!ret && dcs == BTRFS_DC_SETUP)
3504                 block_group->cache_generation = trans->transid;
3505         block_group->disk_cache_state = dcs;
3506         spin_unlock(&block_group->lock);
3507
3508         return ret;
3509 }
3510
3511 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3512                             struct btrfs_root *root)
3513 {
3514         struct btrfs_block_group_cache *cache, *tmp;
3515         struct btrfs_transaction *cur_trans = trans->transaction;
3516         struct btrfs_path *path;
3517
3518         if (list_empty(&cur_trans->dirty_bgs) ||
3519             !btrfs_test_opt(root->fs_info, SPACE_CACHE))
3520                 return 0;
3521
3522         path = btrfs_alloc_path();
3523         if (!path)
3524                 return -ENOMEM;
3525
3526         /* Could add new block groups, use _safe just in case */
3527         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3528                                  dirty_list) {
3529                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3530                         cache_save_setup(cache, trans, path);
3531         }
3532
3533         btrfs_free_path(path);
3534         return 0;
3535 }
3536
3537 /*
3538  * transaction commit does final block group cache writeback during a
3539  * critical section where nothing is allowed to change the FS.  This is
3540  * required in order for the cache to actually match the block group,
3541  * but can introduce a lot of latency into the commit.
3542  *
3543  * So, btrfs_start_dirty_block_groups is here to kick off block group
3544  * cache IO.  There's a chance we'll have to redo some of it if the
3545  * block group changes again during the commit, but it greatly reduces
3546  * the commit latency by getting rid of the easy block groups while
3547  * we're still allowing others to join the commit.
3548  */
3549 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3550                                    struct btrfs_root *root)
3551 {
3552         struct btrfs_block_group_cache *cache;
3553         struct btrfs_transaction *cur_trans = trans->transaction;
3554         int ret = 0;
3555         int should_put;
3556         struct btrfs_path *path = NULL;
3557         LIST_HEAD(dirty);
3558         struct list_head *io = &cur_trans->io_bgs;
3559         int num_started = 0;
3560         int loops = 0;
3561
3562         spin_lock(&cur_trans->dirty_bgs_lock);
3563         if (list_empty(&cur_trans->dirty_bgs)) {
3564                 spin_unlock(&cur_trans->dirty_bgs_lock);
3565                 return 0;
3566         }
3567         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3568         spin_unlock(&cur_trans->dirty_bgs_lock);
3569
3570 again:
3571         /*
3572          * make sure all the block groups on our dirty list actually
3573          * exist
3574          */
3575         btrfs_create_pending_block_groups(trans, root);
3576
3577         if (!path) {
3578                 path = btrfs_alloc_path();
3579                 if (!path)
3580                         return -ENOMEM;
3581         }
3582
3583         /*
3584          * cache_write_mutex is here only to save us from balance or automatic
3585          * removal of empty block groups deleting this block group while we are
3586          * writing out the cache
3587          */
3588         mutex_lock(&trans->transaction->cache_write_mutex);
3589         while (!list_empty(&dirty)) {
3590                 cache = list_first_entry(&dirty,
3591                                          struct btrfs_block_group_cache,
3592                                          dirty_list);
3593                 /*
3594                  * this can happen if something re-dirties a block
3595                  * group that is already under IO.  Just wait for it to
3596                  * finish and then do it all again
3597                  */
3598                 if (!list_empty(&cache->io_list)) {
3599                         list_del_init(&cache->io_list);
3600                         btrfs_wait_cache_io(root, trans, cache,
3601                                             &cache->io_ctl, path,
3602                                             cache->key.objectid);
3603                         btrfs_put_block_group(cache);
3604                 }
3605
3606
3607                 /*
3608                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3609                  * if it should update the cache_state.  Don't delete
3610                  * until after we wait.
3611                  *
3612                  * Since we're not running in the commit critical section
3613                  * we need the dirty_bgs_lock to protect from update_block_group
3614                  */
3615                 spin_lock(&cur_trans->dirty_bgs_lock);
3616                 list_del_init(&cache->dirty_list);
3617                 spin_unlock(&cur_trans->dirty_bgs_lock);
3618
3619                 should_put = 1;
3620
3621                 cache_save_setup(cache, trans, path);
3622
3623                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3624                         cache->io_ctl.inode = NULL;
3625                         ret = btrfs_write_out_cache(root, trans, cache, path);
3626                         if (ret == 0 && cache->io_ctl.inode) {
3627                                 num_started++;
3628                                 should_put = 0;
3629
3630                                 /*
3631                                  * the cache_write_mutex is protecting
3632                                  * the io_list
3633                                  */
3634                                 list_add_tail(&cache->io_list, io);
3635                         } else {
3636                                 /*
3637                                  * if we failed to write the cache, the
3638                                  * generation will be bad and life goes on
3639                                  */
3640                                 ret = 0;
3641                         }
3642                 }
3643                 if (!ret) {
3644                         ret = write_one_cache_group(trans, root, path, cache);
3645                         /*
3646                          * Our block group might still be attached to the list
3647                          * of new block groups in the transaction handle of some
3648                          * other task (struct btrfs_trans_handle->new_bgs). This
3649                          * means its block group item isn't yet in the extent
3650                          * tree. If this happens ignore the error, as we will
3651                          * try again later in the critical section of the
3652                          * transaction commit.
3653                          */
3654                         if (ret == -ENOENT) {
3655                                 ret = 0;
3656                                 spin_lock(&cur_trans->dirty_bgs_lock);
3657                                 if (list_empty(&cache->dirty_list)) {
3658                                         list_add_tail(&cache->dirty_list,
3659                                                       &cur_trans->dirty_bgs);
3660                                         btrfs_get_block_group(cache);
3661                                 }
3662                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3663                         } else if (ret) {
3664                                 btrfs_abort_transaction(trans, ret);
3665                         }
3666                 }
3667
3668                 /* if its not on the io list, we need to put the block group */
3669                 if (should_put)
3670                         btrfs_put_block_group(cache);
3671
3672                 if (ret)
3673                         break;
3674
3675                 /*
3676                  * Avoid blocking other tasks for too long. It might even save
3677                  * us from writing caches for block groups that are going to be
3678                  * removed.
3679                  */
3680                 mutex_unlock(&trans->transaction->cache_write_mutex);
3681                 mutex_lock(&trans->transaction->cache_write_mutex);
3682         }
3683         mutex_unlock(&trans->transaction->cache_write_mutex);
3684
3685         /*
3686          * go through delayed refs for all the stuff we've just kicked off
3687          * and then loop back (just once)
3688          */
3689         ret = btrfs_run_delayed_refs(trans, root, 0);
3690         if (!ret && loops == 0) {
3691                 loops++;
3692                 spin_lock(&cur_trans->dirty_bgs_lock);
3693                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3694                 /*
3695                  * dirty_bgs_lock protects us from concurrent block group
3696                  * deletes too (not just cache_write_mutex).
3697                  */
3698                 if (!list_empty(&dirty)) {
3699                         spin_unlock(&cur_trans->dirty_bgs_lock);
3700                         goto again;
3701                 }
3702                 spin_unlock(&cur_trans->dirty_bgs_lock);
3703         } else if (ret < 0) {
3704                 btrfs_cleanup_dirty_bgs(cur_trans, root);
3705         }
3706
3707         btrfs_free_path(path);
3708         return ret;
3709 }
3710
3711 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3712                                    struct btrfs_root *root)
3713 {
3714         struct btrfs_block_group_cache *cache;
3715         struct btrfs_transaction *cur_trans = trans->transaction;
3716         int ret = 0;
3717         int should_put;
3718         struct btrfs_path *path;
3719         struct list_head *io = &cur_trans->io_bgs;
3720         int num_started = 0;
3721
3722         path = btrfs_alloc_path();
3723         if (!path)
3724                 return -ENOMEM;
3725
3726         /*
3727          * Even though we are in the critical section of the transaction commit,
3728          * we can still have concurrent tasks adding elements to this
3729          * transaction's list of dirty block groups. These tasks correspond to
3730          * endio free space workers started when writeback finishes for a
3731          * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3732          * allocate new block groups as a result of COWing nodes of the root
3733          * tree when updating the free space inode. The writeback for the space
3734          * caches is triggered by an earlier call to
3735          * btrfs_start_dirty_block_groups() and iterations of the following
3736          * loop.
3737          * Also we want to do the cache_save_setup first and then run the
3738          * delayed refs to make sure we have the best chance at doing this all
3739          * in one shot.
3740          */
3741         spin_lock(&cur_trans->dirty_bgs_lock);
3742         while (!list_empty(&cur_trans->dirty_bgs)) {
3743                 cache = list_first_entry(&cur_trans->dirty_bgs,
3744                                          struct btrfs_block_group_cache,
3745                                          dirty_list);
3746
3747                 /*
3748                  * this can happen if cache_save_setup re-dirties a block
3749                  * group that is already under IO.  Just wait for it to
3750                  * finish and then do it all again
3751                  */
3752                 if (!list_empty(&cache->io_list)) {
3753                         spin_unlock(&cur_trans->dirty_bgs_lock);
3754                         list_del_init(&cache->io_list);
3755                         btrfs_wait_cache_io(root, trans, cache,
3756                                             &cache->io_ctl, path,
3757                                             cache->key.objectid);
3758                         btrfs_put_block_group(cache);
3759                         spin_lock(&cur_trans->dirty_bgs_lock);
3760                 }
3761
3762                 /*
3763                  * don't remove from the dirty list until after we've waited
3764                  * on any pending IO
3765                  */
3766                 list_del_init(&cache->dirty_list);
3767                 spin_unlock(&cur_trans->dirty_bgs_lock);
3768                 should_put = 1;
3769
3770                 cache_save_setup(cache, trans, path);
3771
3772                 if (!ret)
3773                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3774
3775                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3776                         cache->io_ctl.inode = NULL;
3777                         ret = btrfs_write_out_cache(root, trans, cache, path);
3778                         if (ret == 0 && cache->io_ctl.inode) {
3779                                 num_started++;
3780                                 should_put = 0;
3781                                 list_add_tail(&cache->io_list, io);
3782                         } else {
3783                                 /*
3784                                  * if we failed to write the cache, the
3785                                  * generation will be bad and life goes on
3786                                  */
3787                                 ret = 0;
3788                         }
3789                 }
3790                 if (!ret) {
3791                         ret = write_one_cache_group(trans, root, path, cache);
3792                         /*
3793                          * One of the free space endio workers might have
3794                          * created a new block group while updating a free space
3795                          * cache's inode (at inode.c:btrfs_finish_ordered_io())
3796                          * and hasn't released its transaction handle yet, in
3797                          * which case the new block group is still attached to
3798                          * its transaction handle and its creation has not
3799                          * finished yet (no block group item in the extent tree
3800                          * yet, etc). If this is the case, wait for all free
3801                          * space endio workers to finish and retry. This is a
3802                          * a very rare case so no need for a more efficient and
3803                          * complex approach.
3804                          */
3805                         if (ret == -ENOENT) {
3806                                 wait_event(cur_trans->writer_wait,
3807                                    atomic_read(&cur_trans->num_writers) == 1);
3808                                 ret = write_one_cache_group(trans, root, path,
3809                                                             cache);
3810                         }
3811                         if (ret)
3812                                 btrfs_abort_transaction(trans, ret);
3813                 }
3814
3815                 /* if its not on the io list, we need to put the block group */
3816                 if (should_put)
3817                         btrfs_put_block_group(cache);
3818                 spin_lock(&cur_trans->dirty_bgs_lock);
3819         }
3820         spin_unlock(&cur_trans->dirty_bgs_lock);
3821
3822         while (!list_empty(io)) {
3823                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3824                                          io_list);
3825                 list_del_init(&cache->io_list);
3826                 btrfs_wait_cache_io(root, trans, cache,
3827                                     &cache->io_ctl, path, cache->key.objectid);
3828                 btrfs_put_block_group(cache);
3829         }
3830
3831         btrfs_free_path(path);
3832         return ret;
3833 }
3834
3835 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3836 {
3837         struct btrfs_block_group_cache *block_group;
3838         int readonly = 0;
3839
3840         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3841         if (!block_group || block_group->ro)
3842                 readonly = 1;
3843         if (block_group)
3844                 btrfs_put_block_group(block_group);
3845         return readonly;
3846 }
3847
3848 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3849 {
3850         struct btrfs_block_group_cache *bg;
3851         bool ret = true;
3852
3853         bg = btrfs_lookup_block_group(fs_info, bytenr);
3854         if (!bg)
3855                 return false;
3856
3857         spin_lock(&bg->lock);
3858         if (bg->ro)
3859                 ret = false;
3860         else
3861                 atomic_inc(&bg->nocow_writers);
3862         spin_unlock(&bg->lock);
3863
3864         /* no put on block group, done by btrfs_dec_nocow_writers */
3865         if (!ret)
3866                 btrfs_put_block_group(bg);
3867
3868         return ret;
3869
3870 }
3871
3872 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3873 {
3874         struct btrfs_block_group_cache *bg;
3875
3876         bg = btrfs_lookup_block_group(fs_info, bytenr);
3877         ASSERT(bg);
3878         if (atomic_dec_and_test(&bg->nocow_writers))
3879                 wake_up_atomic_t(&bg->nocow_writers);
3880         /*
3881          * Once for our lookup and once for the lookup done by a previous call
3882          * to btrfs_inc_nocow_writers()
3883          */
3884         btrfs_put_block_group(bg);
3885         btrfs_put_block_group(bg);
3886 }
3887
3888 static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a)
3889 {
3890         schedule();
3891         return 0;
3892 }
3893
3894 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
3895 {
3896         wait_on_atomic_t(&bg->nocow_writers,
3897                          btrfs_wait_nocow_writers_atomic_t,
3898                          TASK_UNINTERRUPTIBLE);
3899 }
3900
3901 static const char *alloc_name(u64 flags)
3902 {
3903         switch (flags) {
3904         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3905                 return "mixed";
3906         case BTRFS_BLOCK_GROUP_METADATA:
3907                 return "metadata";
3908         case BTRFS_BLOCK_GROUP_DATA:
3909                 return "data";
3910         case BTRFS_BLOCK_GROUP_SYSTEM:
3911                 return "system";
3912         default:
3913                 WARN_ON(1);
3914                 return "invalid-combination";
3915         };
3916 }
3917
3918 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3919                              u64 total_bytes, u64 bytes_used,
3920                              u64 bytes_readonly,
3921                              struct btrfs_space_info **space_info)
3922 {
3923         struct btrfs_space_info *found;
3924         int i;
3925         int factor;
3926         int ret;
3927
3928         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3929                      BTRFS_BLOCK_GROUP_RAID10))
3930                 factor = 2;
3931         else
3932                 factor = 1;
3933
3934         found = __find_space_info(info, flags);
3935         if (found) {
3936                 spin_lock(&found->lock);
3937                 found->total_bytes += total_bytes;
3938                 found->disk_total += total_bytes * factor;
3939                 found->bytes_used += bytes_used;
3940                 found->disk_used += bytes_used * factor;
3941                 found->bytes_readonly += bytes_readonly;
3942                 if (total_bytes > 0)
3943                         found->full = 0;
3944                 space_info_add_new_bytes(info, found, total_bytes -
3945                                          bytes_used - bytes_readonly);
3946                 spin_unlock(&found->lock);
3947                 *space_info = found;
3948                 return 0;
3949         }
3950         found = kzalloc(sizeof(*found), GFP_NOFS);
3951         if (!found)
3952                 return -ENOMEM;
3953
3954         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3955         if (ret) {
3956                 kfree(found);
3957                 return ret;
3958         }
3959
3960         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3961                 INIT_LIST_HEAD(&found->block_groups[i]);
3962         init_rwsem(&found->groups_sem);
3963         spin_lock_init(&found->lock);
3964         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3965         found->total_bytes = total_bytes;
3966         found->disk_total = total_bytes * factor;
3967         found->bytes_used = bytes_used;
3968         found->disk_used = bytes_used * factor;
3969         found->bytes_pinned = 0;
3970         found->bytes_reserved = 0;
3971         found->bytes_readonly = bytes_readonly;
3972         found->bytes_may_use = 0;
3973         found->full = 0;
3974         found->max_extent_size = 0;
3975         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3976         found->chunk_alloc = 0;
3977         found->flush = 0;
3978         init_waitqueue_head(&found->wait);
3979         INIT_LIST_HEAD(&found->ro_bgs);
3980         INIT_LIST_HEAD(&found->tickets);
3981         INIT_LIST_HEAD(&found->priority_tickets);
3982
3983         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3984                                     info->space_info_kobj, "%s",
3985                                     alloc_name(found->flags));
3986         if (ret) {
3987                 percpu_counter_destroy(&found->total_bytes_pinned);
3988                 kfree(found);
3989                 return ret;
3990         }
3991
3992         *space_info = found;
3993         list_add_rcu(&found->list, &info->space_info);
3994         if (flags & BTRFS_BLOCK_GROUP_DATA)
3995                 info->data_sinfo = found;
3996
3997         return ret;
3998 }
3999
4000 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
4001 {
4002         u64 extra_flags = chunk_to_extended(flags) &
4003                                 BTRFS_EXTENDED_PROFILE_MASK;
4004
4005         write_seqlock(&fs_info->profiles_lock);
4006         if (flags & BTRFS_BLOCK_GROUP_DATA)
4007                 fs_info->avail_data_alloc_bits |= extra_flags;
4008         if (flags & BTRFS_BLOCK_GROUP_METADATA)
4009                 fs_info->avail_metadata_alloc_bits |= extra_flags;
4010         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4011                 fs_info->avail_system_alloc_bits |= extra_flags;
4012         write_sequnlock(&fs_info->profiles_lock);
4013 }
4014
4015 /*
4016  * returns target flags in extended format or 0 if restripe for this
4017  * chunk_type is not in progress
4018  *
4019  * should be called with either volume_mutex or balance_lock held
4020  */
4021 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
4022 {
4023         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4024         u64 target = 0;
4025
4026         if (!bctl)
4027                 return 0;
4028
4029         if (flags & BTRFS_BLOCK_GROUP_DATA &&
4030             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4031                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
4032         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
4033                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4034                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
4035         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
4036                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4037                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
4038         }
4039
4040         return target;
4041 }
4042
4043 /*
4044  * @flags: available profiles in extended format (see ctree.h)
4045  *
4046  * Returns reduced profile in chunk format.  If profile changing is in
4047  * progress (either running or paused) picks the target profile (if it's
4048  * already available), otherwise falls back to plain reducing.
4049  */
4050 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
4051 {
4052         u64 num_devices = root->fs_info->fs_devices->rw_devices;
4053         u64 target;
4054         u64 raid_type;
4055         u64 allowed = 0;
4056
4057         /*
4058          * see if restripe for this chunk_type is in progress, if so
4059          * try to reduce to the target profile
4060          */
4061         spin_lock(&root->fs_info->balance_lock);
4062         target = get_restripe_target(root->fs_info, flags);
4063         if (target) {
4064                 /* pick target profile only if it's already available */
4065                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
4066                         spin_unlock(&root->fs_info->balance_lock);
4067                         return extended_to_chunk(target);
4068                 }
4069         }
4070         spin_unlock(&root->fs_info->balance_lock);
4071
4072         /* First, mask out the RAID levels which aren't possible */
4073         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4074                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
4075                         allowed |= btrfs_raid_group[raid_type];
4076         }
4077         allowed &= flags;
4078
4079         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
4080                 allowed = BTRFS_BLOCK_GROUP_RAID6;
4081         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4082                 allowed = BTRFS_BLOCK_GROUP_RAID5;
4083         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4084                 allowed = BTRFS_BLOCK_GROUP_RAID10;
4085         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4086                 allowed = BTRFS_BLOCK_GROUP_RAID1;
4087         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4088                 allowed = BTRFS_BLOCK_GROUP_RAID0;
4089
4090         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4091
4092         return extended_to_chunk(flags | allowed);
4093 }
4094
4095 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
4096 {
4097         unsigned seq;
4098         u64 flags;
4099
4100         do {
4101                 flags = orig_flags;
4102                 seq = read_seqbegin(&root->fs_info->profiles_lock);
4103
4104                 if (flags & BTRFS_BLOCK_GROUP_DATA)
4105                         flags |= root->fs_info->avail_data_alloc_bits;
4106                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4107                         flags |= root->fs_info->avail_system_alloc_bits;
4108                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4109                         flags |= root->fs_info->avail_metadata_alloc_bits;
4110         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
4111
4112         return btrfs_reduce_alloc_profile(root, flags);
4113 }
4114
4115 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
4116 {
4117         u64 flags;
4118         u64 ret;
4119
4120         if (data)
4121                 flags = BTRFS_BLOCK_GROUP_DATA;
4122         else if (root == root->fs_info->chunk_root)
4123                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
4124         else
4125                 flags = BTRFS_BLOCK_GROUP_METADATA;
4126
4127         ret = get_alloc_profile(root, flags);
4128         return ret;
4129 }
4130
4131 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4132 {
4133         struct btrfs_space_info *data_sinfo;
4134         struct btrfs_root *root = BTRFS_I(inode)->root;
4135         struct btrfs_fs_info *fs_info = root->fs_info;
4136         u64 used;
4137         int ret = 0;
4138         int need_commit = 2;
4139         int have_pinned_space;
4140
4141         /* make sure bytes are sectorsize aligned */
4142         bytes = ALIGN(bytes, root->sectorsize);
4143
4144         if (btrfs_is_free_space_inode(inode)) {
4145                 need_commit = 0;
4146                 ASSERT(current->journal_info);
4147         }
4148
4149         data_sinfo = fs_info->data_sinfo;
4150         if (!data_sinfo)
4151                 goto alloc;
4152
4153 again:
4154         /* make sure we have enough space to handle the data first */
4155         spin_lock(&data_sinfo->lock);
4156         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4157                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4158                 data_sinfo->bytes_may_use;
4159
4160         if (used + bytes > data_sinfo->total_bytes) {
4161                 struct btrfs_trans_handle *trans;
4162
4163                 /*
4164                  * if we don't have enough free bytes in this space then we need
4165                  * to alloc a new chunk.
4166                  */
4167                 if (!data_sinfo->full) {
4168                         u64 alloc_target;
4169
4170                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4171                         spin_unlock(&data_sinfo->lock);
4172 alloc:
4173                         alloc_target = btrfs_get_alloc_profile(root, 1);
4174                         /*
4175                          * It is ugly that we don't call nolock join
4176                          * transaction for the free space inode case here.
4177                          * But it is safe because we only do the data space
4178                          * reservation for the free space cache in the
4179                          * transaction context, the common join transaction
4180                          * just increase the counter of the current transaction
4181                          * handler, doesn't try to acquire the trans_lock of
4182                          * the fs.
4183                          */
4184                         trans = btrfs_join_transaction(root);
4185                         if (IS_ERR(trans))
4186                                 return PTR_ERR(trans);
4187
4188                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4189                                              alloc_target,
4190                                              CHUNK_ALLOC_NO_FORCE);
4191                         btrfs_end_transaction(trans, root);
4192                         if (ret < 0) {
4193                                 if (ret != -ENOSPC)
4194                                         return ret;
4195                                 else {
4196                                         have_pinned_space = 1;
4197                                         goto commit_trans;
4198                                 }
4199                         }
4200
4201                         if (!data_sinfo)
4202                                 data_sinfo = fs_info->data_sinfo;
4203
4204                         goto again;
4205                 }
4206
4207                 /*
4208                  * If we don't have enough pinned space to deal with this
4209                  * allocation, and no removed chunk in current transaction,
4210                  * don't bother committing the transaction.
4211                  */
4212                 have_pinned_space = percpu_counter_compare(
4213                         &data_sinfo->total_bytes_pinned,
4214                         used + bytes - data_sinfo->total_bytes);
4215                 spin_unlock(&data_sinfo->lock);
4216
4217                 /* commit the current transaction and try again */
4218 commit_trans:
4219                 if (need_commit &&
4220                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4221                         need_commit--;
4222
4223                         if (need_commit > 0) {
4224                                 btrfs_start_delalloc_roots(fs_info, 0, -1);
4225                                 btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
4226                         }
4227
4228                         trans = btrfs_join_transaction(root);
4229                         if (IS_ERR(trans))
4230                                 return PTR_ERR(trans);
4231                         if (have_pinned_space >= 0 ||
4232                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4233                                      &trans->transaction->flags) ||
4234                             need_commit > 0) {
4235                                 ret = btrfs_commit_transaction(trans, root);
4236                                 if (ret)
4237                                         return ret;
4238                                 /*
4239                                  * The cleaner kthread might still be doing iput
4240                                  * operations. Wait for it to finish so that
4241                                  * more space is released.
4242                                  */
4243                                 mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
4244                                 mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
4245                                 goto again;
4246                         } else {
4247                                 btrfs_end_transaction(trans, root);
4248                         }
4249                 }
4250
4251                 trace_btrfs_space_reservation(root->fs_info,
4252                                               "space_info:enospc",
4253                                               data_sinfo->flags, bytes, 1);
4254                 return -ENOSPC;
4255         }
4256         data_sinfo->bytes_may_use += bytes;
4257         trace_btrfs_space_reservation(root->fs_info, "space_info",
4258                                       data_sinfo->flags, bytes, 1);
4259         spin_unlock(&data_sinfo->lock);
4260
4261         return ret;
4262 }
4263
4264 /*
4265  * New check_data_free_space() with ability for precious data reservation
4266  * Will replace old btrfs_check_data_free_space(), but for patch split,
4267  * add a new function first and then replace it.
4268  */
4269 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4270 {
4271         struct btrfs_root *root = BTRFS_I(inode)->root;
4272         int ret;
4273
4274         /* align the range */
4275         len = round_up(start + len, root->sectorsize) -
4276               round_down(start, root->sectorsize);
4277         start = round_down(start, root->sectorsize);
4278
4279         ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4280         if (ret < 0)
4281                 return ret;
4282
4283         /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
4284         ret = btrfs_qgroup_reserve_data(inode, start, len);
4285         if (ret)
4286                 btrfs_free_reserved_data_space_noquota(inode, start, len);
4287         return ret;
4288 }
4289
4290 /*
4291  * Called if we need to clear a data reservation for this inode
4292  * Normally in a error case.
4293  *
4294  * This one will *NOT* use accurate qgroup reserved space API, just for case
4295  * which we can't sleep and is sure it won't affect qgroup reserved space.
4296  * Like clear_bit_hook().
4297  */
4298 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4299                                             u64 len)
4300 {
4301         struct btrfs_root *root = BTRFS_I(inode)->root;
4302         struct btrfs_space_info *data_sinfo;
4303
4304         /* Make sure the range is aligned to sectorsize */
4305         len = round_up(start + len, root->sectorsize) -
4306               round_down(start, root->sectorsize);
4307         start = round_down(start, root->sectorsize);
4308
4309         data_sinfo = root->fs_info->data_sinfo;
4310         spin_lock(&data_sinfo->lock);
4311         if (WARN_ON(data_sinfo->bytes_may_use < len))
4312                 data_sinfo->bytes_may_use = 0;
4313         else
4314                 data_sinfo->bytes_may_use -= len;
4315         trace_btrfs_space_reservation(root->fs_info, "space_info",
4316                                       data_sinfo->flags, len, 0);
4317         spin_unlock(&data_sinfo->lock);
4318 }
4319
4320 /*
4321  * Called if we need to clear a data reservation for this inode
4322  * Normally in a error case.
4323  *
4324  * This one will handle the per-inode data rsv map for accurate reserved
4325  * space framework.
4326  */
4327 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4328 {
4329         btrfs_free_reserved_data_space_noquota(inode, start, len);
4330         btrfs_qgroup_free_data(inode, start, len);
4331 }
4332
4333 static void force_metadata_allocation(struct btrfs_fs_info *info)
4334 {
4335         struct list_head *head = &info->space_info;
4336         struct btrfs_space_info *found;
4337
4338         rcu_read_lock();
4339         list_for_each_entry_rcu(found, head, list) {
4340                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4341                         found->force_alloc = CHUNK_ALLOC_FORCE;
4342         }
4343         rcu_read_unlock();
4344 }
4345
4346 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4347 {
4348         return (global->size << 1);
4349 }
4350
4351 static int should_alloc_chunk(struct btrfs_root *root,
4352                               struct btrfs_space_info *sinfo, int force)
4353 {
4354         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4355         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4356         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4357         u64 thresh;
4358
4359         if (force == CHUNK_ALLOC_FORCE)
4360                 return 1;
4361
4362         /*
4363          * We need to take into account the global rsv because for all intents
4364          * and purposes it's used space.  Don't worry about locking the
4365          * global_rsv, it doesn't change except when the transaction commits.
4366          */
4367         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4368                 num_allocated += calc_global_rsv_need_space(global_rsv);
4369
4370         /*
4371          * in limited mode, we want to have some free space up to
4372          * about 1% of the FS size.
4373          */
4374         if (force == CHUNK_ALLOC_LIMITED) {
4375                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4376                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4377
4378                 if (num_bytes - num_allocated < thresh)
4379                         return 1;
4380         }
4381
4382         if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
4383                 return 0;
4384         return 1;
4385 }
4386
4387 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4388 {
4389         u64 num_dev;
4390
4391         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4392                     BTRFS_BLOCK_GROUP_RAID0 |
4393                     BTRFS_BLOCK_GROUP_RAID5 |
4394                     BTRFS_BLOCK_GROUP_RAID6))
4395                 num_dev = root->fs_info->fs_devices->rw_devices;
4396         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4397                 num_dev = 2;
4398         else
4399                 num_dev = 1;    /* DUP or single */
4400
4401         return num_dev;
4402 }
4403
4404 /*
4405  * If @is_allocation is true, reserve space in the system space info necessary
4406  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4407  * removing a chunk.
4408  */
4409 void check_system_chunk(struct btrfs_trans_handle *trans,
4410                         struct btrfs_root *root,
4411                         u64 type)
4412 {
4413         struct btrfs_space_info *info;
4414         u64 left;
4415         u64 thresh;
4416         int ret = 0;
4417         u64 num_devs;
4418
4419         /*
4420          * Needed because we can end up allocating a system chunk and for an
4421          * atomic and race free space reservation in the chunk block reserve.
4422          */
4423         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4424
4425         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4426         spin_lock(&info->lock);
4427         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4428                 info->bytes_reserved - info->bytes_readonly -
4429                 info->bytes_may_use;
4430         spin_unlock(&info->lock);
4431
4432         num_devs = get_profile_num_devs(root, type);
4433
4434         /* num_devs device items to update and 1 chunk item to add or remove */
4435         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4436                 btrfs_calc_trans_metadata_size(root, 1);
4437
4438         if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
4439                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4440                         left, thresh, type);
4441                 dump_space_info(root->fs_info, info, 0, 0);
4442         }
4443
4444         if (left < thresh) {
4445                 u64 flags;
4446
4447                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4448                 /*
4449                  * Ignore failure to create system chunk. We might end up not
4450                  * needing it, as we might not need to COW all nodes/leafs from
4451                  * the paths we visit in the chunk tree (they were already COWed
4452                  * or created in the current transaction for example).
4453                  */
4454                 ret = btrfs_alloc_chunk(trans, root, flags);
4455         }
4456
4457         if (!ret) {
4458                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4459                                           &root->fs_info->chunk_block_rsv,
4460                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4461                 if (!ret)
4462                         trans->chunk_bytes_reserved += thresh;
4463         }
4464 }
4465
4466 /*
4467  * If force is CHUNK_ALLOC_FORCE:
4468  *    - return 1 if it successfully allocates a chunk,
4469  *    - return errors including -ENOSPC otherwise.
4470  * If force is NOT CHUNK_ALLOC_FORCE:
4471  *    - return 0 if it doesn't need to allocate a new chunk,
4472  *    - return 1 if it successfully allocates a chunk,
4473  *    - return errors including -ENOSPC otherwise.
4474  */
4475 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4476                           struct btrfs_root *extent_root, u64 flags, int force)
4477 {
4478         struct btrfs_space_info *space_info;
4479         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4480         int wait_for_alloc = 0;
4481         int ret = 0;
4482
4483         /* Don't re-enter if we're already allocating a chunk */
4484         if (trans->allocating_chunk)
4485                 return -ENOSPC;
4486
4487         space_info = __find_space_info(extent_root->fs_info, flags);
4488         if (!space_info) {
4489                 ret = update_space_info(extent_root->fs_info, flags,
4490                                         0, 0, 0, &space_info);
4491                 BUG_ON(ret); /* -ENOMEM */
4492         }
4493         BUG_ON(!space_info); /* Logic error */
4494
4495 again:
4496         spin_lock(&space_info->lock);
4497         if (force < space_info->force_alloc)
4498                 force = space_info->force_alloc;
4499         if (space_info->full) {
4500                 if (should_alloc_chunk(extent_root, space_info, force))
4501                         ret = -ENOSPC;
4502                 else
4503                         ret = 0;
4504                 spin_unlock(&space_info->lock);
4505                 return ret;
4506         }
4507
4508         if (!should_alloc_chunk(extent_root, space_info, force)) {
4509                 spin_unlock(&space_info->lock);
4510                 return 0;
4511         } else if (space_info->chunk_alloc) {
4512                 wait_for_alloc = 1;
4513         } else {
4514                 space_info->chunk_alloc = 1;
4515         }
4516
4517         spin_unlock(&space_info->lock);
4518
4519         mutex_lock(&fs_info->chunk_mutex);
4520
4521         /*
4522          * The chunk_mutex is held throughout the entirety of a chunk
4523          * allocation, so once we've acquired the chunk_mutex we know that the
4524          * other guy is done and we need to recheck and see if we should
4525          * allocate.
4526          */
4527         if (wait_for_alloc) {
4528                 mutex_unlock(&fs_info->chunk_mutex);
4529                 wait_for_alloc = 0;
4530                 cond_resched();
4531                 goto again;
4532         }
4533
4534         trans->allocating_chunk = true;
4535
4536         /*
4537          * If we have mixed data/metadata chunks we want to make sure we keep
4538          * allocating mixed chunks instead of individual chunks.
4539          */
4540         if (btrfs_mixed_space_info(space_info))
4541                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4542
4543         /*
4544          * if we're doing a data chunk, go ahead and make sure that
4545          * we keep a reasonable number of metadata chunks allocated in the
4546          * FS as well.
4547          */
4548         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4549                 fs_info->data_chunk_allocations++;
4550                 if (!(fs_info->data_chunk_allocations %
4551                       fs_info->metadata_ratio))
4552                         force_metadata_allocation(fs_info);
4553         }
4554
4555         /*
4556          * Check if we have enough space in SYSTEM chunk because we may need
4557          * to update devices.
4558          */
4559         check_system_chunk(trans, extent_root, flags);
4560
4561         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4562         trans->allocating_chunk = false;
4563
4564         spin_lock(&space_info->lock);
4565         if (ret < 0 && ret != -ENOSPC)
4566                 goto out;
4567         if (ret)
4568                 space_info->full = 1;
4569         else
4570                 ret = 1;
4571
4572         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4573 out:
4574         space_info->chunk_alloc = 0;
4575         spin_unlock(&space_info->lock);
4576         mutex_unlock(&fs_info->chunk_mutex);
4577         /*
4578          * When we allocate a new chunk we reserve space in the chunk block
4579          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4580          * add new nodes/leafs to it if we end up needing to do it when
4581          * inserting the chunk item and updating device items as part of the
4582          * second phase of chunk allocation, performed by
4583          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4584          * large number of new block groups to create in our transaction
4585          * handle's new_bgs list to avoid exhausting the chunk block reserve
4586          * in extreme cases - like having a single transaction create many new
4587          * block groups when starting to write out the free space caches of all
4588          * the block groups that were made dirty during the lifetime of the
4589          * transaction.
4590          */
4591         if (trans->can_flush_pending_bgs &&
4592             trans->chunk_bytes_reserved >= (u64)SZ_2M) {
4593                 btrfs_create_pending_block_groups(trans, extent_root);
4594                 btrfs_trans_release_chunk_metadata(trans);
4595         }
4596         return ret;
4597 }
4598
4599 static int can_overcommit(struct btrfs_root *root,
4600                           struct btrfs_space_info *space_info, u64 bytes,
4601                           enum btrfs_reserve_flush_enum flush)
4602 {
4603         struct btrfs_block_rsv *global_rsv;
4604         u64 profile;
4605         u64 space_size;
4606         u64 avail;
4607         u64 used;
4608
4609         /* Don't overcommit when in mixed mode. */
4610         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
4611                 return 0;
4612
4613         BUG_ON(root->fs_info == NULL);
4614         global_rsv = &root->fs_info->global_block_rsv;
4615         profile = btrfs_get_alloc_profile(root, 0);
4616         used = space_info->bytes_used + space_info->bytes_reserved +
4617                 space_info->bytes_pinned + space_info->bytes_readonly;
4618
4619         /*
4620          * We only want to allow over committing if we have lots of actual space
4621          * free, but if we don't have enough space to handle the global reserve
4622          * space then we could end up having a real enospc problem when trying
4623          * to allocate a chunk or some other such important allocation.
4624          */
4625         spin_lock(&global_rsv->lock);
4626         space_size = calc_global_rsv_need_space(global_rsv);
4627         spin_unlock(&global_rsv->lock);
4628         if (used + space_size >= space_info->total_bytes)
4629                 return 0;
4630
4631         used += space_info->bytes_may_use;
4632
4633         spin_lock(&root->fs_info->free_chunk_lock);
4634         avail = root->fs_info->free_chunk_space;
4635         spin_unlock(&root->fs_info->free_chunk_lock);
4636
4637         /*
4638          * If we have dup, raid1 or raid10 then only half of the free
4639          * space is actually useable.  For raid56, the space info used
4640          * doesn't include the parity drive, so we don't have to
4641          * change the math
4642          */
4643         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4644                        BTRFS_BLOCK_GROUP_RAID1 |
4645                        BTRFS_BLOCK_GROUP_RAID10))
4646                 avail >>= 1;
4647
4648         /*
4649          * If we aren't flushing all things, let us overcommit up to
4650          * 1/2th of the space. If we can flush, don't let us overcommit
4651          * too much, let it overcommit up to 1/8 of the space.
4652          */
4653         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4654                 avail >>= 3;
4655         else
4656                 avail >>= 1;
4657
4658         if (used + bytes < space_info->total_bytes + avail)
4659                 return 1;
4660         return 0;
4661 }
4662
4663 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4664                                          unsigned long nr_pages, int nr_items)
4665 {
4666         struct super_block *sb = root->fs_info->sb;
4667
4668         if (down_read_trylock(&sb->s_umount)) {
4669                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4670                 up_read(&sb->s_umount);
4671         } else {
4672                 /*
4673                  * We needn't worry the filesystem going from r/w to r/o though
4674                  * we don't acquire ->s_umount mutex, because the filesystem
4675                  * should guarantee the delalloc inodes list be empty after
4676                  * the filesystem is readonly(all dirty pages are written to
4677                  * the disk).
4678                  */
4679                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4680                 if (!current->journal_info)
4681                         btrfs_wait_ordered_roots(root->fs_info, nr_items,
4682                                                  0, (u64)-1);
4683         }
4684 }
4685
4686 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4687 {
4688         u64 bytes;
4689         int nr;
4690
4691         bytes = btrfs_calc_trans_metadata_size(root, 1);
4692         nr = (int)div64_u64(to_reclaim, bytes);
4693         if (!nr)
4694                 nr = 1;
4695         return nr;
4696 }
4697
4698 #define EXTENT_SIZE_PER_ITEM    SZ_256K
4699
4700 /*
4701  * shrink metadata reservation for delalloc
4702  */
4703 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4704                             bool wait_ordered)
4705 {
4706         struct btrfs_block_rsv *block_rsv;
4707         struct btrfs_space_info *space_info;
4708         struct btrfs_trans_handle *trans;
4709         u64 delalloc_bytes;
4710         u64 max_reclaim;
4711         long time_left;
4712         unsigned long nr_pages;
4713         int loops;
4714         int items;
4715         enum btrfs_reserve_flush_enum flush;
4716
4717         /* Calc the number of the pages we need flush for space reservation */
4718         items = calc_reclaim_items_nr(root, to_reclaim);
4719         to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
4720
4721         trans = (struct btrfs_trans_handle *)current->journal_info;
4722         block_rsv = &root->fs_info->delalloc_block_rsv;
4723         space_info = block_rsv->space_info;
4724
4725         delalloc_bytes = percpu_counter_sum_positive(
4726                                                 &root->fs_info->delalloc_bytes);
4727         if (delalloc_bytes == 0) {
4728                 if (trans)
4729                         return;
4730                 if (wait_ordered)
4731                         btrfs_wait_ordered_roots(root->fs_info, items,
4732                                                  0, (u64)-1);
4733                 return;
4734         }
4735
4736         loops = 0;
4737         while (delalloc_bytes && loops < 3) {
4738                 max_reclaim = min(delalloc_bytes, to_reclaim);
4739                 nr_pages = max_reclaim >> PAGE_SHIFT;
4740                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4741                 /*
4742                  * We need to wait for the async pages to actually start before
4743                  * we do anything.
4744                  */
4745                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4746                 if (!max_reclaim)
4747                         goto skip_async;
4748
4749                 if (max_reclaim <= nr_pages)
4750                         max_reclaim = 0;
4751                 else
4752                         max_reclaim -= nr_pages;
4753
4754                 wait_event(root->fs_info->async_submit_wait,
4755                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4756                            (int)max_reclaim);
4757 skip_async:
4758                 if (!trans)
4759                         flush = BTRFS_RESERVE_FLUSH_ALL;
4760                 else
4761                         flush = BTRFS_RESERVE_NO_FLUSH;
4762                 spin_lock(&space_info->lock);
4763                 if (list_empty(&space_info->tickets) &&
4764                     list_empty(&space_info->priority_tickets)) {
4765                         spin_unlock(&space_info->lock);
4766                         break;
4767                 }
4768                 spin_unlock(&space_info->lock);
4769
4770                 loops++;
4771                 if (wait_ordered && !trans) {
4772                         btrfs_wait_ordered_roots(root->fs_info, items,
4773                                                  0, (u64)-1);
4774                 } else {
4775                         time_left = schedule_timeout_killable(1);
4776                         if (time_left)
4777                                 break;
4778                 }
4779                 delalloc_bytes = percpu_counter_sum_positive(
4780                                                 &root->fs_info->delalloc_bytes);
4781         }
4782 }
4783
4784 /**
4785  * maybe_commit_transaction - possibly commit the transaction if its ok to
4786  * @root - the root we're allocating for
4787  * @bytes - the number of bytes we want to reserve
4788  * @force - force the commit
4789  *
4790  * This will check to make sure that committing the transaction will actually
4791  * get us somewhere and then commit the transaction if it does.  Otherwise it
4792  * will return -ENOSPC.
4793  */
4794 static int may_commit_transaction(struct btrfs_root *root,
4795                                   struct btrfs_space_info *space_info,
4796                                   u64 bytes, int force)
4797 {
4798         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4799         struct btrfs_trans_handle *trans;
4800
4801         trans = (struct btrfs_trans_handle *)current->journal_info;
4802         if (trans)
4803                 return -EAGAIN;
4804
4805         if (force)
4806                 goto commit;
4807
4808         /* See if there is enough pinned space to make this reservation */
4809         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4810                                    bytes) >= 0)
4811                 goto commit;
4812
4813         /*
4814          * See if there is some space in the delayed insertion reservation for
4815          * this reservation.
4816          */
4817         if (space_info != delayed_rsv->space_info)
4818                 return -ENOSPC;
4819
4820         spin_lock(&delayed_rsv->lock);
4821         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4822                                    bytes - delayed_rsv->size) >= 0) {
4823                 spin_unlock(&delayed_rsv->lock);
4824                 return -ENOSPC;
4825         }
4826         spin_unlock(&delayed_rsv->lock);
4827
4828 commit:
4829         trans = btrfs_join_transaction(root);
4830         if (IS_ERR(trans))
4831                 return -ENOSPC;
4832
4833         return btrfs_commit_transaction(trans, root);
4834 }
4835
4836 struct reserve_ticket {
4837         u64 bytes;
4838         int error;
4839         struct list_head list;
4840         wait_queue_head_t wait;
4841 };
4842
4843 static int flush_space(struct btrfs_root *root,
4844                        struct btrfs_space_info *space_info, u64 num_bytes,
4845                        u64 orig_bytes, int state)
4846 {
4847         struct btrfs_trans_handle *trans;
4848         int nr;
4849         int ret = 0;
4850
4851         switch (state) {
4852         case FLUSH_DELAYED_ITEMS_NR:
4853         case FLUSH_DELAYED_ITEMS:
4854                 if (state == FLUSH_DELAYED_ITEMS_NR)
4855                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4856                 else
4857                         nr = -1;
4858
4859                 trans = btrfs_join_transaction(root);
4860                 if (IS_ERR(trans)) {
4861                         ret = PTR_ERR(trans);
4862                         break;
4863                 }
4864                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4865                 btrfs_end_transaction(trans, root);
4866                 break;
4867         case FLUSH_DELALLOC:
4868         case FLUSH_DELALLOC_WAIT:
4869                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4870                                 state == FLUSH_DELALLOC_WAIT);
4871                 break;
4872         case ALLOC_CHUNK:
4873                 trans = btrfs_join_transaction(root);
4874                 if (IS_ERR(trans)) {
4875                         ret = PTR_ERR(trans);
4876                         break;
4877                 }
4878                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4879                                      btrfs_get_alloc_profile(root, 0),
4880                                      CHUNK_ALLOC_NO_FORCE);
4881                 btrfs_end_transaction(trans, root);
4882                 if (ret > 0 || ret == -ENOSPC)
4883                         ret = 0;
4884                 break;
4885         case COMMIT_TRANS:
4886                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4887                 break;
4888         default:
4889                 ret = -ENOSPC;
4890                 break;
4891         }
4892
4893         trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes,
4894                                 orig_bytes, state, ret);
4895         return ret;
4896 }
4897
4898 static inline u64
4899 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4900                                  struct btrfs_space_info *space_info)
4901 {
4902         struct reserve_ticket *ticket;
4903         u64 used;
4904         u64 expected;
4905         u64 to_reclaim = 0;
4906
4907         list_for_each_entry(ticket, &space_info->tickets, list)
4908                 to_reclaim += ticket->bytes;
4909         list_for_each_entry(ticket, &space_info->priority_tickets, list)
4910                 to_reclaim += ticket->bytes;
4911         if (to_reclaim)
4912                 return to_reclaim;
4913
4914         to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4915         if (can_overcommit(root, space_info, to_reclaim,
4916                            BTRFS_RESERVE_FLUSH_ALL))
4917                 return 0;
4918
4919         used = space_info->bytes_used + space_info->bytes_reserved +
4920                space_info->bytes_pinned + space_info->bytes_readonly +
4921                space_info->bytes_may_use;
4922         if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
4923                 expected = div_factor_fine(space_info->total_bytes, 95);
4924         else
4925                 expected = div_factor_fine(space_info->total_bytes, 90);
4926
4927         if (used > expected)
4928                 to_reclaim = used - expected;
4929         else
4930                 to_reclaim = 0;
4931         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4932                                      space_info->bytes_reserved);
4933         return to_reclaim;
4934 }
4935
4936 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4937                                         struct btrfs_root *root, u64 used)
4938 {
4939         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4940
4941         /* If we're just plain full then async reclaim just slows us down. */
4942         if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
4943                 return 0;
4944
4945         if (!btrfs_calc_reclaim_metadata_size(root, space_info))
4946                 return 0;
4947
4948         return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
4949                 !test_bit(BTRFS_FS_STATE_REMOUNTING,
4950                           &root->fs_info->fs_state));
4951 }
4952
4953 static void wake_all_tickets(struct list_head *head)
4954 {
4955         struct reserve_ticket *ticket;
4956
4957         while (!list_empty(head)) {
4958                 ticket = list_first_entry(head, struct reserve_ticket, list);
4959                 list_del_init(&ticket->list);
4960                 ticket->error = -ENOSPC;
4961                 wake_up(&ticket->wait);
4962         }
4963 }
4964
4965 /*
4966  * This is for normal flushers, we can wait all goddamned day if we want to.  We
4967  * will loop and continuously try to flush as long as we are making progress.
4968  * We count progress as clearing off tickets each time we have to loop.
4969  */
4970 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4971 {
4972         struct btrfs_fs_info *fs_info;
4973         struct btrfs_space_info *space_info;
4974         u64 to_reclaim;
4975         int flush_state;
4976         int commit_cycles = 0;
4977         u64 last_tickets_id;
4978
4979         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4980         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4981
4982         spin_lock(&space_info->lock);
4983         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4984                                                       space_info);
4985         if (!to_reclaim) {
4986                 space_info->flush = 0;
4987                 spin_unlock(&space_info->lock);
4988                 return;
4989         }
4990         last_tickets_id = space_info->tickets_id;
4991         spin_unlock(&space_info->lock);
4992
4993         flush_state = FLUSH_DELAYED_ITEMS_NR;
4994         do {
4995                 struct reserve_ticket *ticket;
4996                 int ret;
4997
4998                 ret = flush_space(fs_info->fs_root, space_info, to_reclaim,
4999                             to_reclaim, flush_state);
5000                 spin_lock(&space_info->lock);
5001                 if (list_empty(&space_info->tickets)) {
5002                         space_info->flush = 0;
5003                         spin_unlock(&space_info->lock);
5004                         return;
5005                 }
5006                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
5007                                                               space_info);
5008                 ticket = list_first_entry(&space_info->tickets,
5009                                           struct reserve_ticket, list);
5010                 if (last_tickets_id == space_info->tickets_id) {
5011                         flush_state++;
5012                 } else {
5013                         last_tickets_id = space_info->tickets_id;
5014                         flush_state = FLUSH_DELAYED_ITEMS_NR;
5015                         if (commit_cycles)
5016                                 commit_cycles--;
5017                 }
5018
5019                 if (flush_state > COMMIT_TRANS) {
5020                         commit_cycles++;
5021                         if (commit_cycles > 2) {
5022                                 wake_all_tickets(&space_info->tickets);
5023                                 space_info->flush = 0;
5024                         } else {
5025                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
5026                         }
5027                 }
5028                 spin_unlock(&space_info->lock);
5029         } while (flush_state <= COMMIT_TRANS);
5030 }
5031
5032 void btrfs_init_async_reclaim_work(struct work_struct *work)
5033 {
5034         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
5035 }
5036
5037 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5038                                             struct btrfs_space_info *space_info,
5039                                             struct reserve_ticket *ticket)
5040 {
5041         u64 to_reclaim;
5042         int flush_state = FLUSH_DELAYED_ITEMS_NR;
5043
5044         spin_lock(&space_info->lock);
5045         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
5046                                                       space_info);
5047         if (!to_reclaim) {
5048                 spin_unlock(&space_info->lock);
5049                 return;
5050         }
5051         spin_unlock(&space_info->lock);
5052
5053         do {
5054                 flush_space(fs_info->fs_root, space_info, to_reclaim,
5055                             to_reclaim, flush_state);
5056                 flush_state++;
5057                 spin_lock(&space_info->lock);
5058                 if (ticket->bytes == 0) {
5059                         spin_unlock(&space_info->lock);
5060                         return;
5061                 }
5062                 spin_unlock(&space_info->lock);
5063
5064                 /*
5065                  * Priority flushers can't wait on delalloc without
5066                  * deadlocking.
5067                  */
5068                 if (flush_state == FLUSH_DELALLOC ||
5069                     flush_state == FLUSH_DELALLOC_WAIT)
5070                         flush_state = ALLOC_CHUNK;
5071         } while (flush_state < COMMIT_TRANS);
5072 }
5073
5074 static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5075                                struct btrfs_space_info *space_info,
5076                                struct reserve_ticket *ticket, u64 orig_bytes)
5077
5078 {
5079         DEFINE_WAIT(wait);
5080         int ret = 0;
5081
5082         spin_lock(&space_info->lock);
5083         while (ticket->bytes > 0 && ticket->error == 0) {
5084                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
5085                 if (ret) {
5086                         ret = -EINTR;
5087                         break;
5088                 }
5089                 spin_unlock(&space_info->lock);
5090
5091                 schedule();
5092
5093                 finish_wait(&ticket->wait, &wait);
5094                 spin_lock(&space_info->lock);
5095         }
5096         if (!ret)
5097                 ret = ticket->error;
5098         if (!list_empty(&ticket->list))
5099                 list_del_init(&ticket->list);
5100         if (ticket->bytes && ticket->bytes < orig_bytes) {
5101                 u64 num_bytes = orig_bytes - ticket->bytes;
5102                 space_info->bytes_may_use -= num_bytes;
5103                 trace_btrfs_space_reservation(fs_info, "space_info",
5104                                               space_info->flags, num_bytes, 0);
5105         }
5106         spin_unlock(&space_info->lock);
5107
5108         return ret;
5109 }
5110
5111 /**
5112  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5113  * @root - the root we're allocating for
5114  * @space_info - the space info we want to allocate from
5115  * @orig_bytes - the number of bytes we want
5116  * @flush - whether or not we can flush to make our reservation
5117  *
5118  * This will reserve orig_bytes number of bytes from the space info associated
5119  * with the block_rsv.  If there is not enough space it will make an attempt to
5120  * flush out space to make room.  It will do this by flushing delalloc if
5121  * possible or committing the transaction.  If flush is 0 then no attempts to
5122  * regain reservations will be made and this will fail if there is not enough
5123  * space already.
5124  */
5125 static int __reserve_metadata_bytes(struct btrfs_root *root,
5126                                     struct btrfs_space_info *space_info,
5127                                     u64 orig_bytes,
5128                                     enum btrfs_reserve_flush_enum flush)
5129 {
5130         struct reserve_ticket ticket;
5131         u64 used;
5132         int ret = 0;
5133
5134         ASSERT(orig_bytes);
5135         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
5136
5137         spin_lock(&space_info->lock);
5138         ret = -ENOSPC;
5139         used = space_info->bytes_used + space_info->bytes_reserved +
5140                 space_info->bytes_pinned + space_info->bytes_readonly +
5141                 space_info->bytes_may_use;
5142
5143         /*
5144          * If we have enough space then hooray, make our reservation and carry
5145          * on.  If not see if we can overcommit, and if we can, hooray carry on.
5146          * If not things get more complicated.
5147          */
5148         if (used + orig_bytes <= space_info->total_bytes) {
5149                 space_info->bytes_may_use += orig_bytes;
5150                 trace_btrfs_space_reservation(root->fs_info, "space_info",
5151                                               space_info->flags, orig_bytes,
5152                                               1);
5153                 ret = 0;
5154         } else if (can_overcommit(root, space_info, orig_bytes, flush)) {
5155                 space_info->bytes_may_use += orig_bytes;
5156                 trace_btrfs_space_reservation(root->fs_info, "space_info",
5157                                               space_info->flags, orig_bytes,
5158                                               1);
5159                 ret = 0;
5160         }
5161
5162         /*
5163          * If we couldn't make a reservation then setup our reservation ticket
5164          * and kick the async worker if it's not already running.
5165          *
5166          * If we are a priority flusher then we just need to add our ticket to
5167          * the list and we will do our own flushing further down.
5168          */
5169         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5170                 ticket.bytes = orig_bytes;
5171                 ticket.error = 0;
5172                 init_waitqueue_head(&ticket.wait);
5173                 if (flush == BTRFS_RESERVE_FLUSH_ALL) {
5174                         list_add_tail(&ticket.list, &space_info->tickets);
5175                         if (!space_info->flush) {
5176                                 space_info->flush = 1;
5177                                 trace_btrfs_trigger_flush(root->fs_info,
5178                                                           space_info->flags,
5179                                                           orig_bytes, flush,
5180                                                           "enospc");
5181                                 queue_work(system_unbound_wq,
5182                                            &root->fs_info->async_reclaim_work);
5183                         }
5184                 } else {
5185                         list_add_tail(&ticket.list,
5186                                       &space_info->priority_tickets);
5187                 }
5188         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5189                 used += orig_bytes;
5190                 /*
5191                  * We will do the space reservation dance during log replay,
5192                  * which means we won't have fs_info->fs_root set, so don't do
5193                  * the async reclaim as we will panic.
5194                  */
5195                 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) &&
5196                     need_do_async_reclaim(space_info, root, used) &&
5197                     !work_busy(&root->fs_info->async_reclaim_work)) {
5198                         trace_btrfs_trigger_flush(root->fs_info,
5199                                                   space_info->flags,
5200                                                   orig_bytes, flush,
5201                                                   "preempt");
5202                         queue_work(system_unbound_wq,
5203                                    &root->fs_info->async_reclaim_work);
5204                 }
5205         }
5206         spin_unlock(&space_info->lock);
5207         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5208                 return ret;
5209
5210         if (flush == BTRFS_RESERVE_FLUSH_ALL)
5211                 return wait_reserve_ticket(root->fs_info, space_info, &ticket,
5212                                            orig_bytes);
5213
5214         ret = 0;
5215         priority_reclaim_metadata_space(root->fs_info, space_info, &ticket);
5216         spin_lock(&space_info->lock);
5217         if (ticket.bytes) {
5218                 if (ticket.bytes < orig_bytes) {
5219                         u64 num_bytes = orig_bytes - ticket.bytes;
5220                         space_info->bytes_may_use -= num_bytes;
5221                         trace_btrfs_space_reservation(root->fs_info,
5222                                         "space_info", space_info->flags,
5223                                         num_bytes, 0);
5224
5225                 }
5226                 list_del_init(&ticket.list);
5227                 ret = -ENOSPC;
5228         }
5229         spin_unlock(&space_info->lock);
5230         ASSERT(list_empty(&ticket.list));
5231         return ret;
5232 }
5233
5234 /**
5235  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5236  * @root - the root we're allocating for
5237  * @block_rsv - the block_rsv we're allocating for
5238  * @orig_bytes - the number of bytes we want
5239  * @flush - whether or not we can flush to make our reservation
5240  *
5241  * This will reserve orgi_bytes number of bytes from the space info associated
5242  * with the block_rsv.  If there is not enough space it will make an attempt to
5243  * flush out space to make room.  It will do this by flushing delalloc if
5244  * possible or committing the transaction.  If flush is 0 then no attempts to
5245  * regain reservations will be made and this will fail if there is not enough
5246  * space already.
5247  */
5248 static int reserve_metadata_bytes(struct btrfs_root *root,
5249                                   struct btrfs_block_rsv *block_rsv,
5250                                   u64 orig_bytes,
5251                                   enum btrfs_reserve_flush_enum flush)
5252 {
5253         int ret;
5254
5255         ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
5256                                        flush);
5257         if (ret == -ENOSPC &&
5258             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5259                 struct btrfs_block_rsv *global_rsv =
5260                         &root->fs_info->global_block_rsv;
5261
5262                 if (block_rsv != global_rsv &&
5263                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5264                         ret = 0;
5265         }
5266         if (ret == -ENOSPC)
5267                 trace_btrfs_space_reservation(root->fs_info,
5268                                               "space_info:enospc",
5269                                               block_rsv->space_info->flags,
5270                                               orig_bytes, 1);
5271         return ret;
5272 }
5273
5274 static struct btrfs_block_rsv *get_block_rsv(
5275                                         const struct btrfs_trans_handle *trans,
5276                                         const struct btrfs_root *root)
5277 {
5278         struct btrfs_block_rsv *block_rsv = NULL;
5279
5280         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5281             (root == root->fs_info->csum_root && trans->adding_csums) ||
5282              (root == root->fs_info->uuid_root))
5283                 block_rsv = trans->block_rsv;
5284
5285         if (!block_rsv)
5286                 block_rsv = root->block_rsv;
5287
5288         if (!block_rsv)
5289                 block_rsv = &root->fs_info->empty_block_rsv;
5290
5291         return block_rsv;
5292 }
5293
5294 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5295                                u64 num_bytes)
5296 {
5297         int ret = -ENOSPC;
5298         spin_lock(&block_rsv->lock);
5299         if (block_rsv->reserved >= num_bytes) {
5300                 block_rsv->reserved -= num_bytes;
5301                 if (block_rsv->reserved < block_rsv->size)
5302                         block_rsv->full = 0;
5303                 ret = 0;
5304         }
5305         spin_unlock(&block_rsv->lock);
5306         return ret;
5307 }
5308
5309 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5310                                 u64 num_bytes, int update_size)
5311 {
5312         spin_lock(&block_rsv->lock);
5313         block_rsv->reserved += num_bytes;
5314         if (update_size)
5315                 block_rsv->size += num_bytes;
5316         else if (block_rsv->reserved >= block_rsv->size)
5317                 block_rsv->full = 1;
5318         spin_unlock(&block_rsv->lock);
5319 }
5320
5321 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5322                              struct btrfs_block_rsv *dest, u64 num_bytes,
5323                              int min_factor)
5324 {
5325         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5326         u64 min_bytes;
5327
5328         if (global_rsv->space_info != dest->space_info)
5329                 return -ENOSPC;
5330
5331         spin_lock(&global_rsv->lock);
5332         min_bytes = div_factor(global_rsv->size, min_factor);
5333         if (global_rsv->reserved < min_bytes + num_bytes) {
5334                 spin_unlock(&global_rsv->lock);
5335                 return -ENOSPC;
5336         }
5337         global_rsv->reserved -= num_bytes;
5338         if (global_rsv->reserved < global_rsv->size)
5339                 global_rsv->full = 0;
5340         spin_unlock(&global_rsv->lock);
5341
5342         block_rsv_add_bytes(dest, num_bytes, 1);
5343         return 0;
5344 }
5345
5346 /*
5347  * This is for space we already have accounted in space_info->bytes_may_use, so
5348  * basically when we're returning space from block_rsv's.
5349  */
5350 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
5351                                      struct btrfs_space_info *space_info,
5352                                      u64 num_bytes)
5353 {
5354         struct reserve_ticket *ticket;
5355         struct list_head *head;
5356         u64 used;
5357         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
5358         bool check_overcommit = false;
5359
5360         spin_lock(&space_info->lock);
5361         head = &space_info->priority_tickets;
5362
5363         /*
5364          * If we are over our limit then we need to check and see if we can
5365          * overcommit, and if we can't then we just need to free up our space
5366          * and not satisfy any requests.
5367          */
5368         used = space_info->bytes_used + space_info->bytes_reserved +
5369                 space_info->bytes_pinned + space_info->bytes_readonly +
5370                 space_info->bytes_may_use;
5371         if (used - num_bytes >= space_info->total_bytes)
5372                 check_overcommit = true;
5373 again:
5374         while (!list_empty(head) && num_bytes) {
5375                 ticket = list_first_entry(head, struct reserve_ticket,
5376                                           list);
5377                 /*
5378                  * We use 0 bytes because this space is already reserved, so
5379                  * adding the ticket space would be a double count.
5380                  */
5381                 if (check_overcommit &&
5382                     !can_overcommit(fs_info->extent_root, space_info, 0,
5383                                     flush))
5384                         break;
5385                 if (num_bytes >= ticket->bytes) {
5386                         list_del_init(&ticket->list);
5387                         num_bytes -= ticket->bytes;
5388                         ticket->bytes = 0;
5389                         space_info->tickets_id++;
5390                         wake_up(&ticket->wait);
5391                 } else {
5392                         ticket->bytes -= num_bytes;
5393                         num_bytes = 0;
5394                 }
5395         }
5396
5397         if (num_bytes && head == &space_info->priority_tickets) {
5398                 head = &space_info->tickets;
5399                 flush = BTRFS_RESERVE_FLUSH_ALL;
5400                 goto again;
5401         }
5402         space_info->bytes_may_use -= num_bytes;
5403         trace_btrfs_space_reservation(fs_info, "space_info",
5404                                       space_info->flags, num_bytes, 0);
5405         spin_unlock(&space_info->lock);
5406 }
5407
5408 /*
5409  * This is for newly allocated space that isn't accounted in
5410  * space_info->bytes_may_use yet.  So if we allocate a chunk or unpin an extent
5411  * we use this helper.
5412  */
5413 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
5414                                      struct btrfs_space_info *space_info,
5415                                      u64 num_bytes)
5416 {
5417         struct reserve_ticket *ticket;
5418         struct list_head *head = &space_info->priority_tickets;
5419
5420 again:
5421         while (!list_empty(head) && num_bytes) {
5422                 ticket = list_first_entry(head, struct reserve_ticket,
5423                                           list);
5424                 if (num_bytes >= ticket->bytes) {
5425                         trace_btrfs_space_reservation(fs_info, "space_info",
5426                                                       space_info->flags,
5427                                                       ticket->bytes, 1);
5428                         list_del_init(&ticket->list);
5429                         num_bytes -= ticket->bytes;
5430                         space_info->bytes_may_use += ticket->bytes;
5431                         ticket->bytes = 0;
5432                         space_info->tickets_id++;
5433                         wake_up(&ticket->wait);
5434                 } else {
5435                         trace_btrfs_space_reservation(fs_info, "space_info",
5436                                                       space_info->flags,
5437                                                       num_bytes, 1);
5438                         space_info->bytes_may_use += num_bytes;
5439                         ticket->bytes -= num_bytes;
5440                         num_bytes = 0;
5441                 }
5442         }
5443
5444         if (num_bytes && head == &space_info->priority_tickets) {
5445                 head = &space_info->tickets;
5446                 goto again;
5447         }
5448 }
5449
5450 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5451                                     struct btrfs_block_rsv *block_rsv,
5452                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5453 {
5454         struct btrfs_space_info *space_info = block_rsv->space_info;
5455
5456         spin_lock(&block_rsv->lock);
5457         if (num_bytes == (u64)-1)
5458                 num_bytes = block_rsv->size;
5459         block_rsv->size -= num_bytes;
5460         if (block_rsv->reserved >= block_rsv->size) {
5461                 num_bytes = block_rsv->reserved - block_rsv->size;
5462                 block_rsv->reserved = block_rsv->size;
5463                 block_rsv->full = 1;
5464         } else {
5465                 num_bytes = 0;
5466         }
5467         spin_unlock(&block_rsv->lock);
5468
5469         if (num_bytes > 0) {
5470                 if (dest) {
5471                         spin_lock(&dest->lock);
5472                         if (!dest->full) {
5473                                 u64 bytes_to_add;
5474
5475                                 bytes_to_add = dest->size - dest->reserved;
5476                                 bytes_to_add = min(num_bytes, bytes_to_add);
5477                                 dest->reserved += bytes_to_add;
5478                                 if (dest->reserved >= dest->size)
5479                                         dest->full = 1;
5480                                 num_bytes -= bytes_to_add;
5481                         }
5482                         spin_unlock(&dest->lock);
5483                 }
5484                 if (num_bytes)
5485                         space_info_add_old_bytes(fs_info, space_info,
5486                                                  num_bytes);
5487         }
5488 }
5489
5490 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
5491                             struct btrfs_block_rsv *dst, u64 num_bytes,
5492                             int update_size)
5493 {
5494         int ret;
5495
5496         ret = block_rsv_use_bytes(src, num_bytes);
5497         if (ret)
5498                 return ret;
5499
5500         block_rsv_add_bytes(dst, num_bytes, update_size);
5501         return 0;
5502 }
5503
5504 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5505 {
5506         memset(rsv, 0, sizeof(*rsv));
5507         spin_lock_init(&rsv->lock);
5508         rsv->type = type;
5509 }
5510
5511 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5512                                               unsigned short type)
5513 {
5514         struct btrfs_block_rsv *block_rsv;
5515         struct btrfs_fs_info *fs_info = root->fs_info;
5516
5517         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5518         if (!block_rsv)
5519                 return NULL;
5520
5521         btrfs_init_block_rsv(block_rsv, type);
5522         block_rsv->space_info = __find_space_info(fs_info,
5523                                                   BTRFS_BLOCK_GROUP_METADATA);
5524         return block_rsv;
5525 }
5526
5527 void btrfs_free_block_rsv(struct btrfs_root *root,
5528                           struct btrfs_block_rsv *rsv)
5529 {
5530         if (!rsv)
5531                 return;
5532         btrfs_block_rsv_release(root, rsv, (u64)-1);
5533         kfree(rsv);
5534 }
5535
5536 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5537 {
5538         kfree(rsv);
5539 }
5540
5541 int btrfs_block_rsv_add(struct btrfs_root *root,
5542                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5543                         enum btrfs_reserve_flush_enum flush)
5544 {
5545         int ret;
5546
5547         if (num_bytes == 0)
5548                 return 0;
5549
5550         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5551         if (!ret) {
5552                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5553                 return 0;
5554         }
5555
5556         return ret;
5557 }
5558
5559 int btrfs_block_rsv_check(struct btrfs_root *root,
5560                           struct btrfs_block_rsv *block_rsv, int min_factor)
5561 {
5562         u64 num_bytes = 0;
5563         int ret = -ENOSPC;
5564
5565         if (!block_rsv)
5566                 return 0;
5567
5568         spin_lock(&block_rsv->lock);
5569         num_bytes = div_factor(block_rsv->size, min_factor);
5570         if (block_rsv->reserved >= num_bytes)
5571                 ret = 0;
5572         spin_unlock(&block_rsv->lock);
5573
5574         return ret;
5575 }
5576
5577 int btrfs_block_rsv_refill(struct btrfs_root *root,
5578                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5579                            enum btrfs_reserve_flush_enum flush)
5580 {
5581         u64 num_bytes = 0;
5582         int ret = -ENOSPC;
5583
5584         if (!block_rsv)
5585                 return 0;
5586
5587         spin_lock(&block_rsv->lock);
5588         num_bytes = min_reserved;
5589         if (block_rsv->reserved >= num_bytes)
5590                 ret = 0;
5591         else
5592                 num_bytes -= block_rsv->reserved;
5593         spin_unlock(&block_rsv->lock);
5594
5595         if (!ret)
5596                 return 0;
5597
5598         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5599         if (!ret) {
5600                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5601                 return 0;
5602         }
5603
5604         return ret;
5605 }
5606
5607 void btrfs_block_rsv_release(struct btrfs_root *root,
5608                              struct btrfs_block_rsv *block_rsv,
5609                              u64 num_bytes)
5610 {
5611         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5612         if (global_rsv == block_rsv ||
5613             block_rsv->space_info != global_rsv->space_info)
5614                 global_rsv = NULL;
5615         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5616                                 num_bytes);
5617 }
5618
5619 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5620 {
5621         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5622         struct btrfs_space_info *sinfo = block_rsv->space_info;
5623         u64 num_bytes;
5624
5625         /*
5626          * The global block rsv is based on the size of the extent tree, the
5627          * checksum tree and the root tree.  If the fs is empty we want to set
5628          * it to a minimal amount for safety.
5629          */
5630         num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
5631                 btrfs_root_used(&fs_info->csum_root->root_item) +
5632                 btrfs_root_used(&fs_info->tree_root->root_item);
5633         num_bytes = max_t(u64, num_bytes, SZ_16M);
5634
5635         spin_lock(&sinfo->lock);
5636         spin_lock(&block_rsv->lock);
5637
5638         block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5639
5640         if (block_rsv->reserved < block_rsv->size) {
5641                 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5642                         sinfo->bytes_reserved + sinfo->bytes_readonly +
5643                         sinfo->bytes_may_use;
5644                 if (sinfo->total_bytes > num_bytes) {
5645                         num_bytes = sinfo->total_bytes - num_bytes;
5646                         num_bytes = min(num_bytes,
5647                                         block_rsv->size - block_rsv->reserved);
5648                         block_rsv->reserved += num_bytes;
5649                         sinfo->bytes_may_use += num_bytes;
5650                         trace_btrfs_space_reservation(fs_info, "space_info",
5651                                                       sinfo->flags, num_bytes,
5652                                                       1);
5653                 }
5654         } else if (block_rsv->reserved > block_rsv->size) {
5655                 num_bytes = block_rsv->reserved - block_rsv->size;
5656                 sinfo->bytes_may_use -= num_bytes;
5657                 trace_btrfs_space_reservation(fs_info, "space_info",
5658                                       sinfo->flags, num_bytes, 0);
5659                 block_rsv->reserved = block_rsv->size;
5660         }
5661
5662         if (block_rsv->reserved == block_rsv->size)
5663                 block_rsv->full = 1;
5664         else
5665                 block_rsv->full = 0;
5666
5667         spin_unlock(&block_rsv->lock);
5668         spin_unlock(&sinfo->lock);
5669 }
5670
5671 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5672 {
5673         struct btrfs_space_info *space_info;
5674
5675         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5676         fs_info->chunk_block_rsv.space_info = space_info;
5677
5678         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5679         fs_info->global_block_rsv.space_info = space_info;
5680         fs_info->delalloc_block_rsv.space_info = space_info;
5681         fs_info->trans_block_rsv.space_info = space_info;
5682         fs_info->empty_block_rsv.space_info = space_info;
5683         fs_info->delayed_block_rsv.space_info = space_info;
5684
5685         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5686         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5687         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5688         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5689         if (fs_info->quota_root)
5690                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5691         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5692
5693         update_global_block_rsv(fs_info);
5694 }
5695
5696 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5697 {
5698         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5699                                 (u64)-1);
5700         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5701         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5702         WARN_ON(fs_info->trans_block_rsv.size > 0);
5703         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5704         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5705         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5706         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5707         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5708 }
5709
5710 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5711                                   struct btrfs_root *root)
5712 {
5713         if (!trans->block_rsv)
5714                 return;
5715
5716         if (!trans->bytes_reserved)
5717                 return;
5718
5719         trace_btrfs_space_reservation(root->fs_info, "transaction",
5720                                       trans->transid, trans->bytes_reserved, 0);
5721         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5722         trans->bytes_reserved = 0;
5723 }
5724
5725 /*
5726  * To be called after all the new block groups attached to the transaction
5727  * handle have been created (btrfs_create_pending_block_groups()).
5728  */
5729 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5730 {
5731         struct btrfs_fs_info *fs_info = trans->fs_info;
5732
5733         if (!trans->chunk_bytes_reserved)
5734                 return;
5735
5736         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5737
5738         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5739                                 trans->chunk_bytes_reserved);
5740         trans->chunk_bytes_reserved = 0;
5741 }
5742
5743 /* Can only return 0 or -ENOSPC */
5744 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5745                                   struct inode *inode)
5746 {
5747         struct btrfs_root *root = BTRFS_I(inode)->root;
5748         /*
5749          * We always use trans->block_rsv here as we will have reserved space
5750          * for our orphan when starting the transaction, using get_block_rsv()
5751          * here will sometimes make us choose the wrong block rsv as we could be
5752          * doing a reloc inode for a non refcounted root.
5753          */
5754         struct btrfs_block_rsv *src_rsv = trans->block_rsv;
5755         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5756
5757         /*
5758          * We need to hold space in order to delete our orphan item once we've
5759          * added it, so this takes the reservation so we can release it later
5760          * when we are truly done with the orphan item.
5761          */
5762         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5763         trace_btrfs_space_reservation(root->fs_info, "orphan",
5764                                       btrfs_ino(inode), num_bytes, 1);
5765         return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
5766 }
5767
5768 void btrfs_orphan_release_metadata(struct inode *inode)
5769 {
5770         struct btrfs_root *root = BTRFS_I(inode)->root;
5771         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5772         trace_btrfs_space_reservation(root->fs_info, "orphan",
5773                                       btrfs_ino(inode), num_bytes, 0);
5774         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5775 }
5776
5777 /*
5778  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5779  * root: the root of the parent directory
5780  * rsv: block reservation
5781  * items: the number of items that we need do reservation
5782  * qgroup_reserved: used to return the reserved size in qgroup
5783  *
5784  * This function is used to reserve the space for snapshot/subvolume
5785  * creation and deletion. Those operations are different with the
5786  * common file/directory operations, they change two fs/file trees
5787  * and root tree, the number of items that the qgroup reserves is
5788  * different with the free space reservation. So we can not use
5789  * the space reservation mechanism in start_transaction().
5790  */
5791 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5792                                      struct btrfs_block_rsv *rsv,
5793                                      int items,
5794                                      u64 *qgroup_reserved,
5795                                      bool use_global_rsv)
5796 {
5797         u64 num_bytes;
5798         int ret;
5799         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5800
5801         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
5802                 /* One for parent inode, two for dir entries */
5803                 num_bytes = 3 * root->nodesize;
5804                 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5805                 if (ret)
5806                         return ret;
5807         } else {
5808                 num_bytes = 0;
5809         }
5810
5811         *qgroup_reserved = num_bytes;
5812
5813         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5814         rsv->space_info = __find_space_info(root->fs_info,
5815                                             BTRFS_BLOCK_GROUP_METADATA);
5816         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5817                                   BTRFS_RESERVE_FLUSH_ALL);
5818
5819         if (ret == -ENOSPC && use_global_rsv)
5820                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
5821
5822         if (ret && *qgroup_reserved)
5823                 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5824
5825         return ret;
5826 }
5827
5828 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5829                                       struct btrfs_block_rsv *rsv,
5830                                       u64 qgroup_reserved)
5831 {
5832         btrfs_block_rsv_release(root, rsv, (u64)-1);
5833 }
5834
5835 /**
5836  * drop_outstanding_extent - drop an outstanding extent
5837  * @inode: the inode we're dropping the extent for
5838  * @num_bytes: the number of bytes we're releasing.
5839  *
5840  * This is called when we are freeing up an outstanding extent, either called
5841  * after an error or after an extent is written.  This will return the number of
5842  * reserved extents that need to be freed.  This must be called with
5843  * BTRFS_I(inode)->lock held.
5844  */
5845 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5846 {
5847         unsigned drop_inode_space = 0;
5848         unsigned dropped_extents = 0;
5849         unsigned num_extents = 0;
5850
5851         num_extents = (unsigned)div64_u64(num_bytes +
5852                                           BTRFS_MAX_EXTENT_SIZE - 1,
5853                                           BTRFS_MAX_EXTENT_SIZE);
5854         ASSERT(num_extents);
5855         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5856         BTRFS_I(inode)->outstanding_extents -= num_extents;
5857
5858         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5859             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5860                                &BTRFS_I(inode)->runtime_flags))
5861                 drop_inode_space = 1;
5862
5863         /*
5864          * If we have more or the same amount of outstanding extents than we have
5865          * reserved then we need to leave the reserved extents count alone.
5866          */
5867         if (BTRFS_I(inode)->outstanding_extents >=
5868             BTRFS_I(inode)->reserved_extents)
5869                 return drop_inode_space;
5870
5871         dropped_extents = BTRFS_I(inode)->reserved_extents -
5872                 BTRFS_I(inode)->outstanding_extents;
5873         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5874         return dropped_extents + drop_inode_space;
5875 }
5876
5877 /**
5878  * calc_csum_metadata_size - return the amount of metadata space that must be
5879  *      reserved/freed for the given bytes.
5880  * @inode: the inode we're manipulating
5881  * @num_bytes: the number of bytes in question
5882  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5883  *
5884  * This adjusts the number of csum_bytes in the inode and then returns the
5885  * correct amount of metadata that must either be reserved or freed.  We
5886  * calculate how many checksums we can fit into one leaf and then divide the
5887  * number of bytes that will need to be checksumed by this value to figure out
5888  * how many checksums will be required.  If we are adding bytes then the number
5889  * may go up and we will return the number of additional bytes that must be
5890  * reserved.  If it is going down we will return the number of bytes that must
5891  * be freed.
5892  *
5893  * This must be called with BTRFS_I(inode)->lock held.
5894  */
5895 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5896                                    int reserve)
5897 {
5898         struct btrfs_root *root = BTRFS_I(inode)->root;
5899         u64 old_csums, num_csums;
5900
5901         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5902             BTRFS_I(inode)->csum_bytes == 0)
5903                 return 0;
5904
5905         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5906         if (reserve)
5907                 BTRFS_I(inode)->csum_bytes += num_bytes;
5908         else
5909                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5910         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5911
5912         /* No change, no need to reserve more */
5913         if (old_csums == num_csums)
5914                 return 0;
5915
5916         if (reserve)
5917                 return btrfs_calc_trans_metadata_size(root,
5918                                                       num_csums - old_csums);
5919
5920         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5921 }
5922
5923 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5924 {
5925         struct btrfs_root *root = BTRFS_I(inode)->root;
5926         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5927         u64 to_reserve = 0;
5928         u64 csum_bytes;
5929         unsigned nr_extents = 0;
5930         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5931         int ret = 0;
5932         bool delalloc_lock = true;
5933         u64 to_free = 0;
5934         unsigned dropped;
5935         bool release_extra = false;
5936
5937         /* If we are a free space inode we need to not flush since we will be in
5938          * the middle of a transaction commit.  We also don't need the delalloc
5939          * mutex since we won't race with anybody.  We need this mostly to make
5940          * lockdep shut its filthy mouth.
5941          *
5942          * If we have a transaction open (can happen if we call truncate_block
5943          * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
5944          */
5945         if (btrfs_is_free_space_inode(inode)) {
5946                 flush = BTRFS_RESERVE_NO_FLUSH;
5947                 delalloc_lock = false;
5948         } else if (current->journal_info) {
5949                 flush = BTRFS_RESERVE_FLUSH_LIMIT;
5950         }
5951
5952         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5953             btrfs_transaction_in_commit(root->fs_info))
5954                 schedule_timeout(1);
5955
5956         if (delalloc_lock)
5957                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5958
5959         num_bytes = ALIGN(num_bytes, root->sectorsize);
5960
5961         spin_lock(&BTRFS_I(inode)->lock);
5962         nr_extents = (unsigned)div64_u64(num_bytes +
5963                                          BTRFS_MAX_EXTENT_SIZE - 1,
5964                                          BTRFS_MAX_EXTENT_SIZE);
5965         BTRFS_I(inode)->outstanding_extents += nr_extents;
5966
5967         nr_extents = 0;
5968         if (BTRFS_I(inode)->outstanding_extents >
5969             BTRFS_I(inode)->reserved_extents)
5970                 nr_extents += BTRFS_I(inode)->outstanding_extents -
5971                         BTRFS_I(inode)->reserved_extents;
5972
5973         /* We always want to reserve a slot for updating the inode. */
5974         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents + 1);
5975         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5976         csum_bytes = BTRFS_I(inode)->csum_bytes;
5977         spin_unlock(&BTRFS_I(inode)->lock);
5978
5979         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
5980                 ret = btrfs_qgroup_reserve_meta(root,
5981                                 nr_extents * root->nodesize);
5982                 if (ret)
5983                         goto out_fail;
5984         }
5985
5986         ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
5987         if (unlikely(ret)) {
5988                 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5989                 goto out_fail;
5990         }
5991
5992         spin_lock(&BTRFS_I(inode)->lock);
5993         if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5994                              &BTRFS_I(inode)->runtime_flags)) {
5995                 to_reserve -= btrfs_calc_trans_metadata_size(root, 1);
5996                 release_extra = true;
5997         }
5998         BTRFS_I(inode)->reserved_extents += nr_extents;
5999         spin_unlock(&BTRFS_I(inode)->lock);
6000
6001         if (delalloc_lock)
6002                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
6003
6004         if (to_reserve)
6005                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
6006                                               btrfs_ino(inode), to_reserve, 1);
6007         if (release_extra)
6008                 btrfs_block_rsv_release(root, block_rsv,
6009                                         btrfs_calc_trans_metadata_size(root,
6010                                                                        1));
6011         return 0;
6012
6013 out_fail:
6014         spin_lock(&BTRFS_I(inode)->lock);
6015         dropped = drop_outstanding_extent(inode, num_bytes);
6016         /*
6017          * If the inodes csum_bytes is the same as the original
6018          * csum_bytes then we know we haven't raced with any free()ers
6019          * so we can just reduce our inodes csum bytes and carry on.
6020          */
6021         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
6022                 calc_csum_metadata_size(inode, num_bytes, 0);
6023         } else {
6024                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
6025                 u64 bytes;
6026
6027                 /*
6028                  * This is tricky, but first we need to figure out how much we
6029                  * freed from any free-ers that occurred during this
6030                  * reservation, so we reset ->csum_bytes to the csum_bytes
6031                  * before we dropped our lock, and then call the free for the
6032                  * number of bytes that were freed while we were trying our
6033                  * reservation.
6034                  */
6035                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
6036                 BTRFS_I(inode)->csum_bytes = csum_bytes;
6037                 to_free = calc_csum_metadata_size(inode, bytes, 0);
6038
6039
6040                 /*
6041                  * Now we need to see how much we would have freed had we not
6042                  * been making this reservation and our ->csum_bytes were not
6043                  * artificially inflated.
6044                  */
6045                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
6046                 bytes = csum_bytes - orig_csum_bytes;
6047                 bytes = calc_csum_metadata_size(inode, bytes, 0);
6048
6049                 /*
6050                  * Now reset ->csum_bytes to what it should be.  If bytes is
6051                  * more than to_free then we would have freed more space had we
6052                  * not had an artificially high ->csum_bytes, so we need to free
6053                  * the remainder.  If bytes is the same or less then we don't
6054                  * need to do anything, the other free-ers did the correct
6055                  * thing.
6056                  */
6057                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
6058                 if (bytes > to_free)
6059                         to_free = bytes - to_free;
6060                 else
6061                         to_free = 0;
6062         }
6063         spin_unlock(&BTRFS_I(inode)->lock);
6064         if (dropped)
6065                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
6066
6067         if (to_free) {
6068                 btrfs_block_rsv_release(root, block_rsv, to_free);
6069                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
6070                                               btrfs_ino(inode), to_free, 0);
6071         }
6072         if (delalloc_lock)
6073                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
6074         return ret;
6075 }
6076
6077 /**
6078  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
6079  * @inode: the inode to release the reservation for
6080  * @num_bytes: the number of bytes we're releasing
6081  *
6082  * This will release the metadata reservation for an inode.  This can be called
6083  * once we complete IO for a given set of bytes to release their metadata
6084  * reservations.
6085  */
6086 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
6087 {
6088         struct btrfs_root *root = BTRFS_I(inode)->root;
6089         u64 to_free = 0;
6090         unsigned dropped;
6091
6092         num_bytes = ALIGN(num_bytes, root->sectorsize);
6093         spin_lock(&BTRFS_I(inode)->lock);
6094         dropped = drop_outstanding_extent(inode, num_bytes);
6095
6096         if (num_bytes)
6097                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
6098         spin_unlock(&BTRFS_I(inode)->lock);
6099         if (dropped > 0)
6100                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
6101
6102         if (btrfs_is_testing(root->fs_info))
6103                 return;
6104
6105         trace_btrfs_space_reservation(root->fs_info, "delalloc",
6106                                       btrfs_ino(inode), to_free, 0);
6107
6108         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
6109                                 to_free);
6110 }
6111
6112 /**
6113  * btrfs_delalloc_reserve_space - reserve data and metadata space for
6114  * delalloc
6115  * @inode: inode we're writing to
6116  * @start: start range we are writing to
6117  * @len: how long the range we are writing to
6118  *
6119  * This will do the following things
6120  *
6121  * o reserve space in data space info for num bytes
6122  *   and reserve precious corresponding qgroup space
6123  *   (Done in check_data_free_space)
6124  *
6125  * o reserve space for metadata space, based on the number of outstanding
6126  *   extents and how much csums will be needed
6127  *   also reserve metadata space in a per root over-reserve method.
6128  * o add to the inodes->delalloc_bytes
6129  * o add it to the fs_info's delalloc inodes list.
6130  *   (Above 3 all done in delalloc_reserve_metadata)
6131  *
6132  * Return 0 for success
6133  * Return <0 for error(-ENOSPC or -EQUOT)
6134  */
6135 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
6136 {
6137         int ret;
6138
6139         ret = btrfs_check_data_free_space(inode, start, len);
6140         if (ret < 0)
6141                 return ret;
6142         ret = btrfs_delalloc_reserve_metadata(inode, len);
6143         if (ret < 0)
6144                 btrfs_free_reserved_data_space(inode, start, len);
6145         return ret;
6146 }
6147
6148 /**
6149  * btrfs_delalloc_release_space - release data and metadata space for delalloc
6150  * @inode: inode we're releasing space for
6151  * @start: start position of the space already reserved
6152  * @len: the len of the space already reserved
6153  *
6154  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
6155  * called in the case that we don't need the metadata AND data reservations
6156  * anymore.  So if there is an error or we insert an inline extent.
6157  *
6158  * This function will release the metadata space that was not used and will
6159  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
6160  * list if there are no delalloc bytes left.
6161  * Also it will handle the qgroup reserved space.
6162  */
6163 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
6164 {
6165         btrfs_delalloc_release_metadata(inode, len);
6166         btrfs_free_reserved_data_space(inode, start, len);
6167 }
6168
6169 static int update_block_group(struct btrfs_trans_handle *trans,
6170                               struct btrfs_root *root, u64 bytenr,
6171                               u64 num_bytes, int alloc)
6172 {
6173         struct btrfs_block_group_cache *cache = NULL;
6174         struct btrfs_fs_info *info = root->fs_info;
6175         u64 total = num_bytes;
6176         u64 old_val;
6177         u64 byte_in_group;
6178         int factor;
6179
6180         /* block accounting for super block */
6181         spin_lock(&info->delalloc_root_lock);
6182         old_val = btrfs_super_bytes_used(info->super_copy);
6183         if (alloc)
6184                 old_val += num_bytes;
6185         else
6186                 old_val -= num_bytes;
6187         btrfs_set_super_bytes_used(info->super_copy, old_val);
6188         spin_unlock(&info->delalloc_root_lock);
6189
6190         while (total) {
6191                 cache = btrfs_lookup_block_group(info, bytenr);
6192                 if (!cache)
6193                         return -ENOENT;
6194                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
6195                                     BTRFS_BLOCK_GROUP_RAID1 |
6196                                     BTRFS_BLOCK_GROUP_RAID10))
6197                         factor = 2;
6198                 else
6199                         factor = 1;
6200                 /*
6201                  * If this block group has free space cache written out, we
6202                  * need to make sure to load it if we are removing space.  This
6203                  * is because we need the unpinning stage to actually add the
6204                  * space back to the block group, otherwise we will leak space.
6205                  */
6206                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
6207                         cache_block_group(cache, 1);
6208
6209                 byte_in_group = bytenr - cache->key.objectid;
6210                 WARN_ON(byte_in_group > cache->key.offset);
6211
6212                 spin_lock(&cache->space_info->lock);
6213                 spin_lock(&cache->lock);
6214
6215                 if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
6216                     cache->disk_cache_state < BTRFS_DC_CLEAR)
6217                         cache->disk_cache_state = BTRFS_DC_CLEAR;
6218
6219                 old_val = btrfs_block_group_used(&cache->item);
6220                 num_bytes = min(total, cache->key.offset - byte_in_group);
6221                 if (alloc) {
6222                         old_val += num_bytes;
6223                         btrfs_set_block_group_used(&cache->item, old_val);
6224                         cache->reserved -= num_bytes;
6225                         cache->space_info->bytes_reserved -= num_bytes;
6226                         cache->space_info->bytes_used += num_bytes;
6227                         cache->space_info->disk_used += num_bytes * factor;
6228                         spin_unlock(&cache->lock);
6229                         spin_unlock(&cache->space_info->lock);
6230                 } else {
6231                         old_val -= num_bytes;
6232                         btrfs_set_block_group_used(&cache->item, old_val);
6233                         cache->pinned += num_bytes;
6234                         cache->space_info->bytes_pinned += num_bytes;
6235                         cache->space_info->bytes_used -= num_bytes;
6236                         cache->space_info->disk_used -= num_bytes * factor;
6237                         spin_unlock(&cache->lock);
6238                         spin_unlock(&cache->space_info->lock);
6239
6240                         trace_btrfs_space_reservation(root->fs_info, "pinned",
6241                                                       cache->space_info->flags,
6242                                                       num_bytes, 1);
6243                         set_extent_dirty(info->pinned_extents,
6244                                          bytenr, bytenr + num_bytes - 1,
6245                                          GFP_NOFS | __GFP_NOFAIL);
6246                 }
6247
6248                 spin_lock(&trans->transaction->dirty_bgs_lock);
6249                 if (list_empty(&cache->dirty_list)) {
6250                         list_add_tail(&cache->dirty_list,
6251                                       &trans->transaction->dirty_bgs);
6252                                 trans->transaction->num_dirty_bgs++;
6253                         btrfs_get_block_group(cache);
6254                 }
6255                 spin_unlock(&trans->transaction->dirty_bgs_lock);
6256
6257                 /*
6258                  * No longer have used bytes in this block group, queue it for
6259                  * deletion. We do this after adding the block group to the
6260                  * dirty list to avoid races between cleaner kthread and space
6261                  * cache writeout.
6262                  */
6263                 if (!alloc && old_val == 0) {
6264                         spin_lock(&info->unused_bgs_lock);
6265                         if (list_empty(&cache->bg_list)) {
6266                                 btrfs_get_block_group(cache);
6267                                 list_add_tail(&cache->bg_list,
6268                                               &info->unused_bgs);
6269                         }
6270                         spin_unlock(&info->unused_bgs_lock);
6271                 }
6272
6273                 btrfs_put_block_group(cache);
6274                 total -= num_bytes;
6275                 bytenr += num_bytes;
6276         }
6277         return 0;
6278 }
6279
6280 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
6281 {
6282         struct btrfs_block_group_cache *cache;
6283         u64 bytenr;
6284
6285         spin_lock(&root->fs_info->block_group_cache_lock);
6286         bytenr = root->fs_info->first_logical_byte;
6287         spin_unlock(&root->fs_info->block_group_cache_lock);
6288
6289         if (bytenr < (u64)-1)
6290                 return bytenr;
6291
6292         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
6293         if (!cache)
6294                 return 0;
6295
6296         bytenr = cache->key.objectid;
6297         btrfs_put_block_group(cache);
6298
6299         return bytenr;
6300 }
6301
6302 static int pin_down_extent(struct btrfs_root *root,
6303                            struct btrfs_block_group_cache *cache,
6304                            u64 bytenr, u64 num_bytes, int reserved)
6305 {
6306         spin_lock(&cache->space_info->lock);
6307         spin_lock(&cache->lock);
6308         cache->pinned += num_bytes;
6309         cache->space_info->bytes_pinned += num_bytes;
6310         if (reserved) {
6311                 cache->reserved -= num_bytes;
6312                 cache->space_info->bytes_reserved -= num_bytes;
6313         }
6314         spin_unlock(&cache->lock);
6315         spin_unlock(&cache->space_info->lock);
6316
6317         trace_btrfs_space_reservation(root->fs_info, "pinned",
6318                                       cache->space_info->flags, num_bytes, 1);
6319         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
6320                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6321         return 0;
6322 }
6323
6324 /*
6325  * this function must be called within transaction
6326  */
6327 int btrfs_pin_extent(struct btrfs_root *root,
6328                      u64 bytenr, u64 num_bytes, int reserved)
6329 {
6330         struct btrfs_block_group_cache *cache;
6331
6332         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6333         BUG_ON(!cache); /* Logic error */
6334
6335         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6336
6337         btrfs_put_block_group(cache);
6338         return 0;
6339 }
6340
6341 /*
6342  * this function must be called within transaction
6343  */
6344 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6345                                     u64 bytenr, u64 num_bytes)
6346 {
6347         struct btrfs_block_group_cache *cache;
6348         int ret;
6349
6350         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6351         if (!cache)
6352                 return -EINVAL;
6353
6354         /*
6355          * pull in the free space cache (if any) so that our pin
6356          * removes the free space from the cache.  We have load_only set
6357          * to one because the slow code to read in the free extents does check
6358          * the pinned extents.
6359          */
6360         cache_block_group(cache, 1);
6361
6362         pin_down_extent(root, cache, bytenr, num_bytes, 0);
6363
6364         /* remove us from the free space cache (if we're there at all) */
6365         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6366         btrfs_put_block_group(cache);
6367         return ret;
6368 }
6369
6370 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6371 {
6372         int ret;
6373         struct btrfs_block_group_cache *block_group;
6374         struct btrfs_caching_control *caching_ctl;
6375
6376         block_group = btrfs_lookup_block_group(root->fs_info, start);
6377         if (!block_group)
6378                 return -EINVAL;
6379
6380         cache_block_group(block_group, 0);
6381         caching_ctl = get_caching_control(block_group);
6382
6383         if (!caching_ctl) {
6384                 /* Logic error */
6385                 BUG_ON(!block_group_cache_done(block_group));
6386                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6387         } else {
6388                 mutex_lock(&caching_ctl->mutex);
6389
6390                 if (start >= caching_ctl->progress) {
6391                         ret = add_excluded_extent(root, start, num_bytes);
6392                 } else if (start + num_bytes <= caching_ctl->progress) {
6393                         ret = btrfs_remove_free_space(block_group,
6394                                                       start, num_bytes);
6395                 } else {
6396                         num_bytes = caching_ctl->progress - start;
6397                         ret = btrfs_remove_free_space(block_group,
6398                                                       start, num_bytes);
6399                         if (ret)
6400                                 goto out_lock;
6401
6402                         num_bytes = (start + num_bytes) -
6403                                 caching_ctl->progress;
6404                         start = caching_ctl->progress;
6405                         ret = add_excluded_extent(root, start, num_bytes);
6406                 }
6407 out_lock:
6408                 mutex_unlock(&caching_ctl->mutex);
6409                 put_caching_control(caching_ctl);
6410         }
6411         btrfs_put_block_group(block_group);
6412         return ret;
6413 }
6414
6415 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6416                                  struct extent_buffer *eb)
6417 {
6418         struct btrfs_file_extent_item *item;
6419         struct btrfs_key key;
6420         int found_type;
6421         int i;
6422
6423         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6424                 return 0;
6425
6426         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6427                 btrfs_item_key_to_cpu(eb, &key, i);
6428                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6429                         continue;
6430                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6431                 found_type = btrfs_file_extent_type(eb, item);
6432                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6433                         continue;
6434                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6435                         continue;
6436                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6437                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6438                 __exclude_logged_extent(log, key.objectid, key.offset);
6439         }
6440
6441         return 0;
6442 }
6443
6444 static void
6445 btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
6446 {
6447         atomic_inc(&bg->reservations);
6448 }
6449
6450 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6451                                         const u64 start)
6452 {
6453         struct btrfs_block_group_cache *bg;
6454
6455         bg = btrfs_lookup_block_group(fs_info, start);
6456         ASSERT(bg);
6457         if (atomic_dec_and_test(&bg->reservations))
6458                 wake_up_atomic_t(&bg->reservations);
6459         btrfs_put_block_group(bg);
6460 }
6461
6462 static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a)
6463 {
6464         schedule();
6465         return 0;
6466 }
6467
6468 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6469 {
6470         struct btrfs_space_info *space_info = bg->space_info;
6471
6472         ASSERT(bg->ro);
6473
6474         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
6475                 return;
6476
6477         /*
6478          * Our block group is read only but before we set it to read only,
6479          * some task might have had allocated an extent from it already, but it
6480          * has not yet created a respective ordered extent (and added it to a
6481          * root's list of ordered extents).
6482          * Therefore wait for any task currently allocating extents, since the
6483          * block group's reservations counter is incremented while a read lock
6484          * on the groups' semaphore is held and decremented after releasing
6485          * the read access on that semaphore and creating the ordered extent.
6486          */
6487         down_write(&space_info->groups_sem);
6488         up_write(&space_info->groups_sem);
6489
6490         wait_on_atomic_t(&bg->reservations,
6491                          btrfs_wait_bg_reservations_atomic_t,
6492                          TASK_UNINTERRUPTIBLE);
6493 }
6494
6495 /**
6496  * btrfs_add_reserved_bytes - update the block_group and space info counters
6497  * @cache:      The cache we are manipulating
6498  * @ram_bytes:  The number of bytes of file content, and will be same to
6499  *              @num_bytes except for the compress path.
6500  * @num_bytes:  The number of bytes in question
6501  * @delalloc:   The blocks are allocated for the delalloc write
6502  *
6503  * This is called by the allocator when it reserves space. Metadata
6504  * reservations should be called with RESERVE_ALLOC so we do the proper
6505  * ENOSPC accounting.  For data we handle the reservation through clearing the
6506  * delalloc bits in the io_tree.  We have to do this since we could end up
6507  * allocating less disk space for the amount of data we have reserved in the
6508  * case of compression.
6509  *
6510  * If this is a reservation and the block group has become read only we cannot
6511  * make the reservation and return -EAGAIN, otherwise this function always
6512  * succeeds.
6513  */
6514 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
6515                                     u64 ram_bytes, u64 num_bytes, int delalloc)
6516 {
6517         struct btrfs_space_info *space_info = cache->space_info;
6518         int ret = 0;
6519
6520         spin_lock(&space_info->lock);
6521         spin_lock(&cache->lock);
6522         if (cache->ro) {
6523                 ret = -EAGAIN;
6524         } else {
6525                 cache->reserved += num_bytes;
6526                 space_info->bytes_reserved += num_bytes;
6527
6528                 trace_btrfs_space_reservation(cache->fs_info,
6529                                 "space_info", space_info->flags,
6530                                 ram_bytes, 0);
6531                 space_info->bytes_may_use -= ram_bytes;
6532                 if (delalloc)
6533                         cache->delalloc_bytes += num_bytes;
6534         }
6535         spin_unlock(&cache->lock);
6536         spin_unlock(&space_info->lock);
6537         return ret;
6538 }
6539
6540 /**
6541  * btrfs_free_reserved_bytes - update the block_group and space info counters
6542  * @cache:      The cache we are manipulating
6543  * @num_bytes:  The number of bytes in question
6544  * @delalloc:   The blocks are allocated for the delalloc write
6545  *
6546  * This is called by somebody who is freeing space that was never actually used
6547  * on disk.  For example if you reserve some space for a new leaf in transaction
6548  * A and before transaction A commits you free that leaf, you call this with
6549  * reserve set to 0 in order to clear the reservation.
6550  */
6551
6552 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
6553                                      u64 num_bytes, int delalloc)
6554 {
6555         struct btrfs_space_info *space_info = cache->space_info;
6556         int ret = 0;
6557
6558         spin_lock(&space_info->lock);
6559         spin_lock(&cache->lock);
6560         if (cache->ro)
6561                 space_info->bytes_readonly += num_bytes;
6562         cache->reserved -= num_bytes;
6563         space_info->bytes_reserved -= num_bytes;
6564
6565         if (delalloc)
6566                 cache->delalloc_bytes -= num_bytes;
6567         spin_unlock(&cache->lock);
6568         spin_unlock(&space_info->lock);
6569         return ret;
6570 }
6571 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6572                                 struct btrfs_root *root)
6573 {
6574         struct btrfs_fs_info *fs_info = root->fs_info;
6575         struct btrfs_caching_control *next;
6576         struct btrfs_caching_control *caching_ctl;
6577         struct btrfs_block_group_cache *cache;
6578
6579         down_write(&fs_info->commit_root_sem);
6580
6581         list_for_each_entry_safe(caching_ctl, next,
6582                                  &fs_info->caching_block_groups, list) {
6583                 cache = caching_ctl->block_group;
6584                 if (block_group_cache_done(cache)) {
6585                         cache->last_byte_to_unpin = (u64)-1;
6586                         list_del_init(&caching_ctl->list);
6587                         put_caching_control(caching_ctl);
6588                 } else {
6589                         cache->last_byte_to_unpin = caching_ctl->progress;
6590                 }
6591         }
6592
6593         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6594                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6595         else
6596                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6597
6598         up_write(&fs_info->commit_root_sem);
6599
6600         update_global_block_rsv(fs_info);
6601 }
6602
6603 /*
6604  * Returns the free cluster for the given space info and sets empty_cluster to
6605  * what it should be based on the mount options.
6606  */
6607 static struct btrfs_free_cluster *
6608 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6609                    u64 *empty_cluster)
6610 {
6611         struct btrfs_free_cluster *ret = NULL;
6612         bool ssd = btrfs_test_opt(root->fs_info, SSD);
6613
6614         *empty_cluster = 0;
6615         if (btrfs_mixed_space_info(space_info))
6616                 return ret;
6617
6618         if (ssd)
6619                 *empty_cluster = SZ_2M;
6620         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6621                 ret = &root->fs_info->meta_alloc_cluster;
6622                 if (!ssd)
6623                         *empty_cluster = SZ_64K;
6624         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6625                 ret = &root->fs_info->data_alloc_cluster;
6626         }
6627
6628         return ret;
6629 }
6630
6631 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6632                               const bool return_free_space)
6633 {
6634         struct btrfs_fs_info *fs_info = root->fs_info;
6635         struct btrfs_block_group_cache *cache = NULL;
6636         struct btrfs_space_info *space_info;
6637         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6638         struct btrfs_free_cluster *cluster = NULL;
6639         u64 len;
6640         u64 total_unpinned = 0;
6641         u64 empty_cluster = 0;
6642         bool readonly;
6643
6644         while (start <= end) {
6645                 readonly = false;
6646                 if (!cache ||
6647                     start >= cache->key.objectid + cache->key.offset) {
6648                         if (cache)
6649                                 btrfs_put_block_group(cache);
6650                         total_unpinned = 0;
6651                         cache = btrfs_lookup_block_group(fs_info, start);
6652                         BUG_ON(!cache); /* Logic error */
6653
6654                         cluster = fetch_cluster_info(root,
6655                                                      cache->space_info,
6656                                                      &empty_cluster);
6657                         empty_cluster <<= 1;
6658                 }
6659
6660                 len = cache->key.objectid + cache->key.offset - start;
6661                 len = min(len, end + 1 - start);
6662
6663                 if (start < cache->last_byte_to_unpin) {
6664                         len = min(len, cache->last_byte_to_unpin - start);
6665                         if (return_free_space)
6666                                 btrfs_add_free_space(cache, start, len);
6667                 }
6668
6669                 start += len;
6670                 total_unpinned += len;
6671                 space_info = cache->space_info;
6672
6673                 /*
6674                  * If this space cluster has been marked as fragmented and we've
6675                  * unpinned enough in this block group to potentially allow a
6676                  * cluster to be created inside of it go ahead and clear the
6677                  * fragmented check.
6678                  */
6679                 if (cluster && cluster->fragmented &&
6680                     total_unpinned > empty_cluster) {
6681                         spin_lock(&cluster->lock);
6682                         cluster->fragmented = 0;
6683                         spin_unlock(&cluster->lock);
6684                 }
6685
6686                 spin_lock(&space_info->lock);
6687                 spin_lock(&cache->lock);
6688                 cache->pinned -= len;
6689                 space_info->bytes_pinned -= len;
6690
6691                 trace_btrfs_space_reservation(fs_info, "pinned",
6692                                               space_info->flags, len, 0);
6693                 space_info->max_extent_size = 0;
6694                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6695                 if (cache->ro) {
6696                         space_info->bytes_readonly += len;
6697                         readonly = true;
6698                 }
6699                 spin_unlock(&cache->lock);
6700                 if (!readonly && return_free_space &&
6701                     global_rsv->space_info == space_info) {
6702                         u64 to_add = len;
6703                         WARN_ON(!return_free_space);
6704                         spin_lock(&global_rsv->lock);
6705                         if (!global_rsv->full) {
6706                                 to_add = min(len, global_rsv->size -
6707                                              global_rsv->reserved);
6708                                 global_rsv->reserved += to_add;
6709                                 space_info->bytes_may_use += to_add;
6710                                 if (global_rsv->reserved >= global_rsv->size)
6711                                         global_rsv->full = 1;
6712                                 trace_btrfs_space_reservation(fs_info,
6713                                                               "space_info",
6714                                                               space_info->flags,
6715                                                               to_add, 1);
6716                                 len -= to_add;
6717                         }
6718                         spin_unlock(&global_rsv->lock);
6719                         /* Add to any tickets we may have */
6720                         if (len)
6721                                 space_info_add_new_bytes(fs_info, space_info,
6722                                                          len);
6723                 }
6724                 spin_unlock(&space_info->lock);
6725         }
6726
6727         if (cache)
6728                 btrfs_put_block_group(cache);
6729         return 0;
6730 }
6731
6732 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6733                                struct btrfs_root *root)
6734 {
6735         struct btrfs_fs_info *fs_info = root->fs_info;
6736         struct btrfs_block_group_cache *block_group, *tmp;
6737         struct list_head *deleted_bgs;
6738         struct extent_io_tree *unpin;
6739         u64 start;
6740         u64 end;
6741         int ret;
6742
6743         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6744                 unpin = &fs_info->freed_extents[1];
6745         else
6746                 unpin = &fs_info->freed_extents[0];
6747
6748         while (!trans->aborted) {
6749                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6750                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6751                                             EXTENT_DIRTY, NULL);
6752                 if (ret) {
6753                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6754                         break;
6755                 }
6756
6757                 if (btrfs_test_opt(root->fs_info, DISCARD))
6758                         ret = btrfs_discard_extent(root, start,
6759                                                    end + 1 - start, NULL);
6760
6761                 clear_extent_dirty(unpin, start, end);
6762                 unpin_extent_range(root, start, end, true);
6763                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6764                 cond_resched();
6765         }
6766
6767         /*
6768          * Transaction is finished.  We don't need the lock anymore.  We
6769          * do need to clean up the block groups in case of a transaction
6770          * abort.
6771          */
6772         deleted_bgs = &trans->transaction->deleted_bgs;
6773         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6774                 u64 trimmed = 0;
6775
6776                 ret = -EROFS;
6777                 if (!trans->aborted)
6778                         ret = btrfs_discard_extent(root,
6779                                                    block_group->key.objectid,
6780                                                    block_group->key.offset,
6781                                                    &trimmed);
6782
6783                 list_del_init(&block_group->bg_list);
6784                 btrfs_put_block_group_trimming(block_group);
6785                 btrfs_put_block_group(block_group);
6786
6787                 if (ret) {
6788                         const char *errstr = btrfs_decode_error(ret);
6789                         btrfs_warn(fs_info,
6790                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6791                                    ret, errstr);
6792                 }
6793         }
6794
6795         return 0;
6796 }
6797
6798 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6799                              u64 owner, u64 root_objectid)
6800 {
6801         struct btrfs_space_info *space_info;
6802         u64 flags;
6803
6804         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6805                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6806                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6807                 else
6808                         flags = BTRFS_BLOCK_GROUP_METADATA;
6809         } else {
6810                 flags = BTRFS_BLOCK_GROUP_DATA;
6811         }
6812
6813         space_info = __find_space_info(fs_info, flags);
6814         BUG_ON(!space_info); /* Logic bug */
6815         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6816 }
6817
6818
6819 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6820                                 struct btrfs_root *root,
6821                                 struct btrfs_delayed_ref_node *node, u64 parent,
6822                                 u64 root_objectid, u64 owner_objectid,
6823                                 u64 owner_offset, int refs_to_drop,
6824                                 struct btrfs_delayed_extent_op *extent_op)
6825 {
6826         struct btrfs_key key;
6827         struct btrfs_path *path;
6828         struct btrfs_fs_info *info = root->fs_info;
6829         struct btrfs_root *extent_root = info->extent_root;
6830         struct extent_buffer *leaf;
6831         struct btrfs_extent_item *ei;
6832         struct btrfs_extent_inline_ref *iref;
6833         int ret;
6834         int is_data;
6835         int extent_slot = 0;
6836         int found_extent = 0;
6837         int num_to_del = 1;
6838         u32 item_size;
6839         u64 refs;
6840         u64 bytenr = node->bytenr;
6841         u64 num_bytes = node->num_bytes;
6842         int last_ref = 0;
6843         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6844                                                  SKINNY_METADATA);
6845
6846         path = btrfs_alloc_path();
6847         if (!path)
6848                 return -ENOMEM;
6849
6850         path->reada = READA_FORWARD;
6851         path->leave_spinning = 1;
6852
6853         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6854         BUG_ON(!is_data && refs_to_drop != 1);
6855
6856         if (is_data)
6857                 skinny_metadata = 0;
6858
6859         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6860                                     bytenr, num_bytes, parent,
6861                                     root_objectid, owner_objectid,
6862                                     owner_offset);
6863         if (ret == 0) {
6864                 extent_slot = path->slots[0];
6865                 while (extent_slot >= 0) {
6866                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6867                                               extent_slot);
6868                         if (key.objectid != bytenr)
6869                                 break;
6870                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6871                             key.offset == num_bytes) {
6872                                 found_extent = 1;
6873                                 break;
6874                         }
6875                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6876                             key.offset == owner_objectid) {
6877                                 found_extent = 1;
6878                                 break;
6879                         }
6880                         if (path->slots[0] - extent_slot > 5)
6881                                 break;
6882                         extent_slot--;
6883                 }
6884 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6885                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6886                 if (found_extent && item_size < sizeof(*ei))
6887                         found_extent = 0;
6888 #endif
6889                 if (!found_extent) {
6890                         BUG_ON(iref);
6891                         ret = remove_extent_backref(trans, extent_root, path,
6892                                                     NULL, refs_to_drop,
6893                                                     is_data, &last_ref);
6894                         if (ret) {
6895                                 btrfs_abort_transaction(trans, ret);
6896                                 goto out;
6897                         }
6898                         btrfs_release_path(path);
6899                         path->leave_spinning = 1;
6900
6901                         key.objectid = bytenr;
6902                         key.type = BTRFS_EXTENT_ITEM_KEY;
6903                         key.offset = num_bytes;
6904
6905                         if (!is_data && skinny_metadata) {
6906                                 key.type = BTRFS_METADATA_ITEM_KEY;
6907                                 key.offset = owner_objectid;
6908                         }
6909
6910                         ret = btrfs_search_slot(trans, extent_root,
6911                                                 &key, path, -1, 1);
6912                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6913                                 /*
6914                                  * Couldn't find our skinny metadata item,
6915                                  * see if we have ye olde extent item.
6916                                  */
6917                                 path->slots[0]--;
6918                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6919                                                       path->slots[0]);
6920                                 if (key.objectid == bytenr &&
6921                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6922                                     key.offset == num_bytes)
6923                                         ret = 0;
6924                         }
6925
6926                         if (ret > 0 && skinny_metadata) {
6927                                 skinny_metadata = false;
6928                                 key.objectid = bytenr;
6929                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6930                                 key.offset = num_bytes;
6931                                 btrfs_release_path(path);
6932                                 ret = btrfs_search_slot(trans, extent_root,
6933                                                         &key, path, -1, 1);
6934                         }
6935
6936                         if (ret) {
6937                                 btrfs_err(info,
6938                                           "umm, got %d back from search, was looking for %llu",
6939                                           ret, bytenr);
6940                                 if (ret > 0)
6941                                         btrfs_print_leaf(extent_root,
6942                                                          path->nodes[0]);
6943                         }
6944                         if (ret < 0) {
6945                                 btrfs_abort_transaction(trans, ret);
6946                                 goto out;
6947                         }
6948                         extent_slot = path->slots[0];
6949                 }
6950         } else if (WARN_ON(ret == -ENOENT)) {
6951                 btrfs_print_leaf(extent_root, path->nodes[0]);
6952                 btrfs_err(info,
6953                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6954                         bytenr, parent, root_objectid, owner_objectid,
6955                         owner_offset);
6956                 btrfs_abort_transaction(trans, ret);
6957                 goto out;
6958         } else {
6959                 btrfs_abort_transaction(trans, ret);
6960                 goto out;
6961         }
6962
6963         leaf = path->nodes[0];
6964         item_size = btrfs_item_size_nr(leaf, extent_slot);
6965 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6966         if (item_size < sizeof(*ei)) {
6967                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6968                 ret = convert_extent_item_v0(trans, extent_root, path,
6969                                              owner_objectid, 0);
6970                 if (ret < 0) {
6971                         btrfs_abort_transaction(trans, ret);
6972                         goto out;
6973                 }
6974
6975                 btrfs_release_path(path);
6976                 path->leave_spinning = 1;
6977
6978                 key.objectid = bytenr;
6979                 key.type = BTRFS_EXTENT_ITEM_KEY;
6980                 key.offset = num_bytes;
6981
6982                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6983                                         -1, 1);
6984                 if (ret) {
6985                         btrfs_err(info,
6986                                   "umm, got %d back from search, was looking for %llu",
6987                                 ret, bytenr);
6988                         btrfs_print_leaf(extent_root, path->nodes[0]);
6989                 }
6990                 if (ret < 0) {
6991                         btrfs_abort_transaction(trans, ret);
6992                         goto out;
6993                 }
6994
6995                 extent_slot = path->slots[0];
6996                 leaf = path->nodes[0];
6997                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6998         }
6999 #endif
7000         BUG_ON(item_size < sizeof(*ei));
7001         ei = btrfs_item_ptr(leaf, extent_slot,
7002                             struct btrfs_extent_item);
7003         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
7004             key.type == BTRFS_EXTENT_ITEM_KEY) {
7005                 struct btrfs_tree_block_info *bi;
7006                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
7007                 bi = (struct btrfs_tree_block_info *)(ei + 1);
7008                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
7009         }
7010
7011         refs = btrfs_extent_refs(leaf, ei);
7012         if (refs < refs_to_drop) {
7013                 btrfs_err(info,
7014                           "trying to drop %d refs but we only have %Lu for bytenr %Lu",
7015                           refs_to_drop, refs, bytenr);
7016                 ret = -EINVAL;
7017                 btrfs_abort_transaction(trans, ret);
7018                 goto out;
7019         }
7020         refs -= refs_to_drop;
7021
7022         if (refs > 0) {
7023                 if (extent_op)
7024                         __run_delayed_extent_op(extent_op, leaf, ei);
7025                 /*
7026                  * In the case of inline back ref, reference count will
7027                  * be updated by remove_extent_backref
7028                  */
7029                 if (iref) {
7030                         BUG_ON(!found_extent);
7031                 } else {
7032                         btrfs_set_extent_refs(leaf, ei, refs);
7033                         btrfs_mark_buffer_dirty(leaf);
7034                 }
7035                 if (found_extent) {
7036                         ret = remove_extent_backref(trans, extent_root, path,
7037                                                     iref, refs_to_drop,
7038                                                     is_data, &last_ref);
7039                         if (ret) {
7040                                 btrfs_abort_transaction(trans, ret);
7041                                 goto out;
7042                         }
7043                 }
7044                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
7045                                  root_objectid);
7046         } else {
7047                 if (found_extent) {
7048                         BUG_ON(is_data && refs_to_drop !=
7049                                extent_data_ref_count(path, iref));
7050                         if (iref) {
7051                                 BUG_ON(path->slots[0] != extent_slot);
7052                         } else {
7053                                 BUG_ON(path->slots[0] != extent_slot + 1);
7054                                 path->slots[0] = extent_slot;
7055                                 num_to_del = 2;
7056                         }
7057                 }
7058
7059                 last_ref = 1;
7060                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
7061                                       num_to_del);
7062                 if (ret) {
7063                         btrfs_abort_transaction(trans, ret);
7064                         goto out;
7065                 }
7066                 btrfs_release_path(path);
7067
7068                 if (is_data) {
7069                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
7070                         if (ret) {
7071                                 btrfs_abort_transaction(trans, ret);
7072                                 goto out;
7073                         }
7074                 }
7075
7076                 ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
7077                                              num_bytes);
7078                 if (ret) {
7079                         btrfs_abort_transaction(trans, ret);
7080                         goto out;
7081                 }
7082
7083                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
7084                 if (ret) {
7085                         btrfs_abort_transaction(trans, ret);
7086                         goto out;
7087                 }
7088         }
7089         btrfs_release_path(path);
7090
7091 out:
7092         btrfs_free_path(path);
7093         return ret;
7094 }
7095
7096 /*
7097  * when we free an block, it is possible (and likely) that we free the last
7098  * delayed ref for that extent as well.  This searches the delayed ref tree for
7099  * a given extent, and if there are no other delayed refs to be processed, it
7100  * removes it from the tree.
7101  */
7102 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
7103                                       struct btrfs_root *root, u64 bytenr)
7104 {
7105         struct btrfs_delayed_ref_head *head;
7106         struct btrfs_delayed_ref_root *delayed_refs;
7107         int ret = 0;
7108
7109         delayed_refs = &trans->transaction->delayed_refs;
7110         spin_lock(&delayed_refs->lock);
7111         head = btrfs_find_delayed_ref_head(trans, bytenr);
7112         if (!head)
7113                 goto out_delayed_unlock;
7114
7115         spin_lock(&head->lock);
7116         if (!list_empty(&head->ref_list))
7117                 goto out;
7118
7119         if (head->extent_op) {
7120                 if (!head->must_insert_reserved)
7121                         goto out;
7122                 btrfs_free_delayed_extent_op(head->extent_op);
7123                 head->extent_op = NULL;
7124         }
7125
7126         /*
7127          * waiting for the lock here would deadlock.  If someone else has it
7128          * locked they are already in the process of dropping it anyway
7129          */
7130         if (!mutex_trylock(&head->mutex))
7131                 goto out;
7132
7133         /*
7134          * at this point we have a head with no other entries.  Go
7135          * ahead and process it.
7136          */
7137         head->node.in_tree = 0;
7138         rb_erase(&head->href_node, &delayed_refs->href_root);
7139
7140         atomic_dec(&delayed_refs->num_entries);
7141
7142         /*
7143          * we don't take a ref on the node because we're removing it from the
7144          * tree, so we just steal the ref the tree was holding.
7145          */
7146         delayed_refs->num_heads--;
7147         if (head->processing == 0)
7148                 delayed_refs->num_heads_ready--;
7149         head->processing = 0;
7150         spin_unlock(&head->lock);
7151         spin_unlock(&delayed_refs->lock);
7152
7153         BUG_ON(head->extent_op);
7154         if (head->must_insert_reserved)
7155                 ret = 1;
7156
7157         mutex_unlock(&head->mutex);
7158         btrfs_put_delayed_ref(&head->node);
7159         return ret;
7160 out:
7161         spin_unlock(&head->lock);
7162
7163 out_delayed_unlock:
7164         spin_unlock(&delayed_refs->lock);
7165         return 0;
7166 }
7167
7168 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
7169                            struct btrfs_root *root,
7170                            struct extent_buffer *buf,
7171                            u64 parent, int last_ref)
7172 {
7173         int pin = 1;
7174         int ret;
7175
7176         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7177                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7178                                         buf->start, buf->len,
7179                                         parent, root->root_key.objectid,
7180                                         btrfs_header_level(buf),
7181                                         BTRFS_DROP_DELAYED_REF, NULL);
7182                 BUG_ON(ret); /* -ENOMEM */
7183         }
7184
7185         if (!last_ref)
7186                 return;
7187
7188         if (btrfs_header_generation(buf) == trans->transid) {
7189                 struct btrfs_block_group_cache *cache;
7190
7191                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7192                         ret = check_ref_cleanup(trans, root, buf->start);
7193                         if (!ret)
7194                                 goto out;
7195                 }
7196
7197                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
7198
7199                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
7200                         pin_down_extent(root, cache, buf->start, buf->len, 1);
7201                         btrfs_put_block_group(cache);
7202                         goto out;
7203                 }
7204
7205                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
7206
7207                 btrfs_add_free_space(cache, buf->start, buf->len);
7208                 btrfs_free_reserved_bytes(cache, buf->len, 0);
7209                 btrfs_put_block_group(cache);
7210                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
7211                 pin = 0;
7212         }
7213 out:
7214         if (pin)
7215                 add_pinned_bytes(root->fs_info, buf->len,
7216                                  btrfs_header_level(buf),
7217                                  root->root_key.objectid);
7218
7219         /*
7220          * Deleting the buffer, clear the corrupt flag since it doesn't matter
7221          * anymore.
7222          */
7223         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
7224 }
7225
7226 /* Can return -ENOMEM */
7227 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7228                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
7229                       u64 owner, u64 offset)
7230 {
7231         int ret;
7232         struct btrfs_fs_info *fs_info = root->fs_info;
7233
7234         if (btrfs_is_testing(fs_info))
7235                 return 0;
7236
7237         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
7238
7239         /*
7240          * tree log blocks never actually go into the extent allocation
7241          * tree, just update pinning info and exit early.
7242          */
7243         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
7244                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
7245                 /* unlocks the pinned mutex */
7246                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
7247                 ret = 0;
7248         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
7249                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
7250                                         num_bytes,
7251                                         parent, root_objectid, (int)owner,
7252                                         BTRFS_DROP_DELAYED_REF, NULL);
7253         } else {
7254                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
7255                                                 num_bytes,
7256                                                 parent, root_objectid, owner,
7257                                                 offset, 0,
7258                                                 BTRFS_DROP_DELAYED_REF, NULL);
7259         }
7260         return ret;
7261 }
7262
7263 /*
7264  * when we wait for progress in the block group caching, its because
7265  * our allocation attempt failed at least once.  So, we must sleep
7266  * and let some progress happen before we try again.
7267  *
7268  * This function will sleep at least once waiting for new free space to
7269  * show up, and then it will check the block group free space numbers
7270  * for our min num_bytes.  Another option is to have it go ahead
7271  * and look in the rbtree for a free extent of a given size, but this
7272  * is a good start.
7273  *
7274  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
7275  * any of the information in this block group.
7276  */
7277 static noinline void
7278 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
7279                                 u64 num_bytes)
7280 {
7281         struct btrfs_caching_control *caching_ctl;
7282
7283         caching_ctl = get_caching_control(cache);
7284         if (!caching_ctl)
7285                 return;
7286
7287         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
7288                    (cache->free_space_ctl->free_space >= num_bytes));
7289
7290         put_caching_control(caching_ctl);
7291 }
7292
7293 static noinline int
7294 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
7295 {
7296         struct btrfs_caching_control *caching_ctl;
7297         int ret = 0;
7298
7299         caching_ctl = get_caching_control(cache);
7300         if (!caching_ctl)
7301                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
7302
7303         wait_event(caching_ctl->wait, block_group_cache_done(cache));
7304         if (cache->cached == BTRFS_CACHE_ERROR)
7305                 ret = -EIO;
7306         put_caching_control(caching_ctl);
7307         return ret;
7308 }
7309
7310 int __get_raid_index(u64 flags)
7311 {
7312         if (flags & BTRFS_BLOCK_GROUP_RAID10)
7313                 return BTRFS_RAID_RAID10;
7314         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
7315                 return BTRFS_RAID_RAID1;
7316         else if (flags & BTRFS_BLOCK_GROUP_DUP)
7317                 return BTRFS_RAID_DUP;
7318         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
7319                 return BTRFS_RAID_RAID0;
7320         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
7321                 return BTRFS_RAID_RAID5;
7322         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
7323                 return BTRFS_RAID_RAID6;
7324
7325         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
7326 }
7327
7328 int get_block_group_index(struct btrfs_block_group_cache *cache)
7329 {
7330         return __get_raid_index(cache->flags);
7331 }
7332
7333 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
7334         [BTRFS_RAID_RAID10]     = "raid10",
7335         [BTRFS_RAID_RAID1]      = "raid1",
7336         [BTRFS_RAID_DUP]        = "dup",
7337         [BTRFS_RAID_RAID0]      = "raid0",
7338         [BTRFS_RAID_SINGLE]     = "single",
7339         [BTRFS_RAID_RAID5]      = "raid5",
7340         [BTRFS_RAID_RAID6]      = "raid6",
7341 };
7342
7343 static const char *get_raid_name(enum btrfs_raid_types type)
7344 {
7345         if (type >= BTRFS_NR_RAID_TYPES)
7346                 return NULL;
7347
7348         return btrfs_raid_type_names[type];
7349 }
7350
7351 enum btrfs_loop_type {
7352         LOOP_CACHING_NOWAIT = 0,
7353         LOOP_CACHING_WAIT = 1,
7354         LOOP_ALLOC_CHUNK = 2,
7355         LOOP_NO_EMPTY_SIZE = 3,
7356 };
7357
7358 static inline void
7359 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7360                        int delalloc)
7361 {
7362         if (delalloc)
7363                 down_read(&cache->data_rwsem);
7364 }
7365
7366 static inline void
7367 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7368                        int delalloc)
7369 {
7370         btrfs_get_block_group(cache);
7371         if (delalloc)
7372                 down_read(&cache->data_rwsem);
7373 }
7374
7375 static struct btrfs_block_group_cache *
7376 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7377                    struct btrfs_free_cluster *cluster,
7378                    int delalloc)
7379 {
7380         struct btrfs_block_group_cache *used_bg = NULL;
7381
7382         spin_lock(&cluster->refill_lock);
7383         while (1) {
7384                 used_bg = cluster->block_group;
7385                 if (!used_bg)
7386                         return NULL;
7387
7388                 if (used_bg == block_group)
7389                         return used_bg;
7390
7391                 btrfs_get_block_group(used_bg);
7392
7393                 if (!delalloc)
7394                         return used_bg;
7395
7396                 if (down_read_trylock(&used_bg->data_rwsem))
7397                         return used_bg;
7398
7399                 spin_unlock(&cluster->refill_lock);
7400
7401                 /* We should only have one-level nested. */
7402                 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
7403
7404                 spin_lock(&cluster->refill_lock);
7405                 if (used_bg == cluster->block_group)
7406                         return used_bg;
7407
7408                 up_read(&used_bg->data_rwsem);
7409                 btrfs_put_block_group(used_bg);
7410         }
7411 }
7412
7413 static inline void
7414 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7415                          int delalloc)
7416 {
7417         if (delalloc)
7418                 up_read(&cache->data_rwsem);
7419         btrfs_put_block_group(cache);
7420 }
7421
7422 /*
7423  * walks the btree of allocated extents and find a hole of a given size.
7424  * The key ins is changed to record the hole:
7425  * ins->objectid == start position
7426  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7427  * ins->offset == the size of the hole.
7428  * Any available blocks before search_start are skipped.
7429  *
7430  * If there is no suitable free space, we will record the max size of
7431  * the free space extent currently.
7432  */
7433 static noinline int find_free_extent(struct btrfs_root *orig_root,
7434                                 u64 ram_bytes, u64 num_bytes, u64 empty_size,
7435                                 u64 hint_byte, struct btrfs_key *ins,
7436                                 u64 flags, int delalloc)
7437 {
7438         int ret = 0;
7439         struct btrfs_root *root = orig_root->fs_info->extent_root;
7440         struct btrfs_free_cluster *last_ptr = NULL;
7441         struct btrfs_block_group_cache *block_group = NULL;
7442         u64 search_start = 0;
7443         u64 max_extent_size = 0;
7444         u64 empty_cluster = 0;
7445         struct btrfs_space_info *space_info;
7446         int loop = 0;
7447         int index = __get_raid_index(flags);
7448         bool failed_cluster_refill = false;
7449         bool failed_alloc = false;
7450         bool use_cluster = true;
7451         bool have_caching_bg = false;
7452         bool orig_have_caching_bg = false;
7453         bool full_search = false;
7454
7455         WARN_ON(num_bytes < root->sectorsize);
7456         ins->type = BTRFS_EXTENT_ITEM_KEY;
7457         ins->objectid = 0;
7458         ins->offset = 0;
7459
7460         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7461
7462         space_info = __find_space_info(root->fs_info, flags);
7463         if (!space_info) {
7464                 btrfs_err(root->fs_info, "No space info for %llu", flags);
7465                 return -ENOSPC;
7466         }
7467
7468         /*
7469          * If our free space is heavily fragmented we may not be able to make
7470          * big contiguous allocations, so instead of doing the expensive search
7471          * for free space, simply return ENOSPC with our max_extent_size so we
7472          * can go ahead and search for a more manageable chunk.
7473          *
7474          * If our max_extent_size is large enough for our allocation simply
7475          * disable clustering since we will likely not be able to find enough
7476          * space to create a cluster and induce latency trying.
7477          */
7478         if (unlikely(space_info->max_extent_size)) {
7479                 spin_lock(&space_info->lock);
7480                 if (space_info->max_extent_size &&
7481                     num_bytes > space_info->max_extent_size) {
7482                         ins->offset = space_info->max_extent_size;
7483                         spin_unlock(&space_info->lock);
7484                         return -ENOSPC;
7485                 } else if (space_info->max_extent_size) {
7486                         use_cluster = false;
7487                 }
7488                 spin_unlock(&space_info->lock);
7489         }
7490
7491         last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7492         if (last_ptr) {
7493                 spin_lock(&last_ptr->lock);
7494                 if (last_ptr->block_group)
7495                         hint_byte = last_ptr->window_start;
7496                 if (last_ptr->fragmented) {
7497                         /*
7498                          * We still set window_start so we can keep track of the
7499                          * last place we found an allocation to try and save
7500                          * some time.
7501                          */
7502                         hint_byte = last_ptr->window_start;
7503                         use_cluster = false;
7504                 }
7505                 spin_unlock(&last_ptr->lock);
7506         }
7507
7508         search_start = max(search_start, first_logical_byte(root, 0));
7509         search_start = max(search_start, hint_byte);
7510         if (search_start == hint_byte) {
7511                 block_group = btrfs_lookup_block_group(root->fs_info,
7512                                                        search_start);
7513                 /*
7514                  * we don't want to use the block group if it doesn't match our
7515                  * allocation bits, or if its not cached.
7516                  *
7517                  * However if we are re-searching with an ideal block group
7518                  * picked out then we don't care that the block group is cached.
7519                  */
7520                 if (block_group && block_group_bits(block_group, flags) &&
7521                     block_group->cached != BTRFS_CACHE_NO) {
7522                         down_read(&space_info->groups_sem);
7523                         if (list_empty(&block_group->list) ||
7524                             block_group->ro) {
7525                                 /*
7526                                  * someone is removing this block group,
7527                                  * we can't jump into the have_block_group
7528                                  * target because our list pointers are not
7529                                  * valid
7530                                  */
7531                                 btrfs_put_block_group(block_group);
7532                                 up_read(&space_info->groups_sem);
7533                         } else {
7534                                 index = get_block_group_index(block_group);
7535                                 btrfs_lock_block_group(block_group, delalloc);
7536                                 goto have_block_group;
7537                         }
7538                 } else if (block_group) {
7539                         btrfs_put_block_group(block_group);
7540                 }
7541         }
7542 search:
7543         have_caching_bg = false;
7544         if (index == 0 || index == __get_raid_index(flags))
7545                 full_search = true;
7546         down_read(&space_info->groups_sem);
7547         list_for_each_entry(block_group, &space_info->block_groups[index],
7548                             list) {
7549                 u64 offset;
7550                 int cached;
7551
7552                 btrfs_grab_block_group(block_group, delalloc);
7553                 search_start = block_group->key.objectid;
7554
7555                 /*
7556                  * this can happen if we end up cycling through all the
7557                  * raid types, but we want to make sure we only allocate
7558                  * for the proper type.
7559                  */
7560                 if (!block_group_bits(block_group, flags)) {
7561                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7562                                 BTRFS_BLOCK_GROUP_RAID1 |
7563                                 BTRFS_BLOCK_GROUP_RAID5 |
7564                                 BTRFS_BLOCK_GROUP_RAID6 |
7565                                 BTRFS_BLOCK_GROUP_RAID10;
7566
7567                         /*
7568                          * if they asked for extra copies and this block group
7569                          * doesn't provide them, bail.  This does allow us to
7570                          * fill raid0 from raid1.
7571                          */
7572                         if ((flags & extra) && !(block_group->flags & extra))
7573                                 goto loop;
7574
7575                         /*
7576                          * This block group has different flags than we want.
7577                          * It's possible that we have MIXED_GROUP flag but no
7578                          * block group is mixed.  Just skip such block group.
7579                          */
7580                         btrfs_release_block_group(block_group, delalloc);
7581                         continue;
7582                 }
7583
7584 have_block_group:
7585                 cached = block_group_cache_done(block_group);
7586                 if (unlikely(!cached)) {
7587                         have_caching_bg = true;
7588                         ret = cache_block_group(block_group, 0);
7589                         BUG_ON(ret < 0);
7590                         ret = 0;
7591                 }
7592
7593                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7594                         goto loop;
7595                 if (unlikely(block_group->ro))
7596                         goto loop;
7597
7598                 /*
7599                  * Ok we want to try and use the cluster allocator, so
7600                  * lets look there
7601                  */
7602                 if (last_ptr && use_cluster) {
7603                         struct btrfs_block_group_cache *used_block_group;
7604                         unsigned long aligned_cluster;
7605                         /*
7606                          * the refill lock keeps out other
7607                          * people trying to start a new cluster
7608                          */
7609                         used_block_group = btrfs_lock_cluster(block_group,
7610                                                               last_ptr,
7611                                                               delalloc);
7612                         if (!used_block_group)
7613                                 goto refill_cluster;
7614
7615                         if (used_block_group != block_group &&
7616                             (used_block_group->ro ||
7617                              !block_group_bits(used_block_group, flags)))
7618                                 goto release_cluster;
7619
7620                         offset = btrfs_alloc_from_cluster(used_block_group,
7621                                                 last_ptr,
7622                                                 num_bytes,
7623                                                 used_block_group->key.objectid,
7624                                                 &max_extent_size);
7625                         if (offset) {
7626                                 /* we have a block, we're done */
7627                                 spin_unlock(&last_ptr->refill_lock);
7628                                 trace_btrfs_reserve_extent_cluster(root,
7629                                                 used_block_group,
7630                                                 search_start, num_bytes);
7631                                 if (used_block_group != block_group) {
7632                                         btrfs_release_block_group(block_group,
7633                                                                   delalloc);
7634                                         block_group = used_block_group;
7635                                 }
7636                                 goto checks;
7637                         }
7638
7639                         WARN_ON(last_ptr->block_group != used_block_group);
7640 release_cluster:
7641                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7642                          * set up a new clusters, so lets just skip it
7643                          * and let the allocator find whatever block
7644                          * it can find.  If we reach this point, we
7645                          * will have tried the cluster allocator
7646                          * plenty of times and not have found
7647                          * anything, so we are likely way too
7648                          * fragmented for the clustering stuff to find
7649                          * anything.
7650                          *
7651                          * However, if the cluster is taken from the
7652                          * current block group, release the cluster
7653                          * first, so that we stand a better chance of
7654                          * succeeding in the unclustered
7655                          * allocation.  */
7656                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7657                             used_block_group != block_group) {
7658                                 spin_unlock(&last_ptr->refill_lock);
7659                                 btrfs_release_block_group(used_block_group,
7660                                                           delalloc);
7661                                 goto unclustered_alloc;
7662                         }
7663
7664                         /*
7665                          * this cluster didn't work out, free it and
7666                          * start over
7667                          */
7668                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7669
7670                         if (used_block_group != block_group)
7671                                 btrfs_release_block_group(used_block_group,
7672                                                           delalloc);
7673 refill_cluster:
7674                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7675                                 spin_unlock(&last_ptr->refill_lock);
7676                                 goto unclustered_alloc;
7677                         }
7678
7679                         aligned_cluster = max_t(unsigned long,
7680                                                 empty_cluster + empty_size,
7681                                               block_group->full_stripe_len);
7682
7683                         /* allocate a cluster in this block group */
7684                         ret = btrfs_find_space_cluster(root, block_group,
7685                                                        last_ptr, search_start,
7686                                                        num_bytes,
7687                                                        aligned_cluster);
7688                         if (ret == 0) {
7689                                 /*
7690                                  * now pull our allocation out of this
7691                                  * cluster
7692                                  */
7693                                 offset = btrfs_alloc_from_cluster(block_group,
7694                                                         last_ptr,
7695                                                         num_bytes,
7696                                                         search_start,
7697                                                         &max_extent_size);
7698                                 if (offset) {
7699                                         /* we found one, proceed */
7700                                         spin_unlock(&last_ptr->refill_lock);
7701                                         trace_btrfs_reserve_extent_cluster(root,
7702                                                 block_group, search_start,
7703                                                 num_bytes);
7704                                         goto checks;
7705                                 }
7706                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7707                                    && !failed_cluster_refill) {
7708                                 spin_unlock(&last_ptr->refill_lock);
7709
7710                                 failed_cluster_refill = true;
7711                                 wait_block_group_cache_progress(block_group,
7712                                        num_bytes + empty_cluster + empty_size);
7713                                 goto have_block_group;
7714                         }
7715
7716                         /*
7717                          * at this point we either didn't find a cluster
7718                          * or we weren't able to allocate a block from our
7719                          * cluster.  Free the cluster we've been trying
7720                          * to use, and go to the next block group
7721                          */
7722                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7723                         spin_unlock(&last_ptr->refill_lock);
7724                         goto loop;
7725                 }
7726
7727 unclustered_alloc:
7728                 /*
7729                  * We are doing an unclustered alloc, set the fragmented flag so
7730                  * we don't bother trying to setup a cluster again until we get
7731                  * more space.
7732                  */
7733                 if (unlikely(last_ptr)) {
7734                         spin_lock(&last_ptr->lock);
7735                         last_ptr->fragmented = 1;
7736                         spin_unlock(&last_ptr->lock);
7737                 }
7738                 spin_lock(&block_group->free_space_ctl->tree_lock);
7739                 if (cached &&
7740                     block_group->free_space_ctl->free_space <
7741                     num_bytes + empty_cluster + empty_size) {
7742                         if (block_group->free_space_ctl->free_space >
7743                             max_extent_size)
7744                                 max_extent_size =
7745                                         block_group->free_space_ctl->free_space;
7746                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7747                         goto loop;
7748                 }
7749                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7750
7751                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7752                                                     num_bytes, empty_size,
7753                                                     &max_extent_size);
7754                 /*
7755                  * If we didn't find a chunk, and we haven't failed on this
7756                  * block group before, and this block group is in the middle of
7757                  * caching and we are ok with waiting, then go ahead and wait
7758                  * for progress to be made, and set failed_alloc to true.
7759                  *
7760                  * If failed_alloc is true then we've already waited on this
7761                  * block group once and should move on to the next block group.
7762                  */
7763                 if (!offset && !failed_alloc && !cached &&
7764                     loop > LOOP_CACHING_NOWAIT) {
7765                         wait_block_group_cache_progress(block_group,
7766                                                 num_bytes + empty_size);
7767                         failed_alloc = true;
7768                         goto have_block_group;
7769                 } else if (!offset) {
7770                         goto loop;
7771                 }
7772 checks:
7773                 search_start = ALIGN(offset, root->stripesize);
7774
7775                 /* move on to the next group */
7776                 if (search_start + num_bytes >
7777                     block_group->key.objectid + block_group->key.offset) {
7778                         btrfs_add_free_space(block_group, offset, num_bytes);
7779                         goto loop;
7780                 }
7781
7782                 if (offset < search_start)
7783                         btrfs_add_free_space(block_group, offset,
7784                                              search_start - offset);
7785                 BUG_ON(offset > search_start);
7786
7787                 ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
7788                                 num_bytes, delalloc);
7789                 if (ret == -EAGAIN) {
7790                         btrfs_add_free_space(block_group, offset, num_bytes);
7791                         goto loop;
7792                 }
7793                 btrfs_inc_block_group_reservations(block_group);
7794
7795                 /* we are all good, lets return */
7796                 ins->objectid = search_start;
7797                 ins->offset = num_bytes;
7798
7799                 trace_btrfs_reserve_extent(orig_root, block_group,
7800                                            search_start, num_bytes);
7801                 btrfs_release_block_group(block_group, delalloc);
7802                 break;
7803 loop:
7804                 failed_cluster_refill = false;
7805                 failed_alloc = false;
7806                 BUG_ON(index != get_block_group_index(block_group));
7807                 btrfs_release_block_group(block_group, delalloc);
7808         }
7809         up_read(&space_info->groups_sem);
7810
7811         if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7812                 && !orig_have_caching_bg)
7813                 orig_have_caching_bg = true;
7814
7815         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7816                 goto search;
7817
7818         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7819                 goto search;
7820
7821         /*
7822          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7823          *                      caching kthreads as we move along
7824          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7825          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7826          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7827          *                      again
7828          */
7829         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7830                 index = 0;
7831                 if (loop == LOOP_CACHING_NOWAIT) {
7832                         /*
7833                          * We want to skip the LOOP_CACHING_WAIT step if we
7834                          * don't have any uncached bgs and we've already done a
7835                          * full search through.
7836                          */
7837                         if (orig_have_caching_bg || !full_search)
7838                                 loop = LOOP_CACHING_WAIT;
7839                         else
7840                                 loop = LOOP_ALLOC_CHUNK;
7841                 } else {
7842                         loop++;
7843                 }
7844
7845                 if (loop == LOOP_ALLOC_CHUNK) {
7846                         struct btrfs_trans_handle *trans;
7847                         int exist = 0;
7848
7849                         trans = current->journal_info;
7850                         if (trans)
7851                                 exist = 1;
7852                         else
7853                                 trans = btrfs_join_transaction(root);
7854
7855                         if (IS_ERR(trans)) {
7856                                 ret = PTR_ERR(trans);
7857                                 goto out;
7858                         }
7859
7860                         ret = do_chunk_alloc(trans, root, flags,
7861                                              CHUNK_ALLOC_FORCE);
7862
7863                         /*
7864                          * If we can't allocate a new chunk we've already looped
7865                          * through at least once, move on to the NO_EMPTY_SIZE
7866                          * case.
7867                          */
7868                         if (ret == -ENOSPC)
7869                                 loop = LOOP_NO_EMPTY_SIZE;
7870
7871                         /*
7872                          * Do not bail out on ENOSPC since we
7873                          * can do more things.
7874                          */
7875                         if (ret < 0 && ret != -ENOSPC)
7876                                 btrfs_abort_transaction(trans, ret);
7877                         else
7878                                 ret = 0;
7879                         if (!exist)
7880                                 btrfs_end_transaction(trans, root);
7881                         if (ret)
7882                                 goto out;
7883                 }
7884
7885                 if (loop == LOOP_NO_EMPTY_SIZE) {
7886                         /*
7887                          * Don't loop again if we already have no empty_size and
7888                          * no empty_cluster.
7889                          */
7890                         if (empty_size == 0 &&
7891                             empty_cluster == 0) {
7892                                 ret = -ENOSPC;
7893                                 goto out;
7894                         }
7895                         empty_size = 0;
7896                         empty_cluster = 0;
7897                 }
7898
7899                 goto search;
7900         } else if (!ins->objectid) {
7901                 ret = -ENOSPC;
7902         } else if (ins->objectid) {
7903                 if (!use_cluster && last_ptr) {
7904                         spin_lock(&last_ptr->lock);
7905                         last_ptr->window_start = ins->objectid;
7906                         spin_unlock(&last_ptr->lock);
7907                 }
7908                 ret = 0;
7909         }
7910 out:
7911         if (ret == -ENOSPC) {
7912                 spin_lock(&space_info->lock);
7913                 space_info->max_extent_size = max_extent_size;
7914                 spin_unlock(&space_info->lock);
7915                 ins->offset = max_extent_size;
7916         }
7917         return ret;
7918 }
7919
7920 static void dump_space_info(struct btrfs_fs_info *fs_info,
7921                             struct btrfs_space_info *info, u64 bytes,
7922                             int dump_block_groups)
7923 {
7924         struct btrfs_block_group_cache *cache;
7925         int index = 0;
7926
7927         spin_lock(&info->lock);
7928         btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
7929                    info->flags,
7930                    info->total_bytes - info->bytes_used - info->bytes_pinned -
7931                    info->bytes_reserved - info->bytes_readonly -
7932                    info->bytes_may_use, (info->full) ? "" : "not ");
7933         btrfs_info(fs_info,
7934                 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
7935                 info->total_bytes, info->bytes_used, info->bytes_pinned,
7936                 info->bytes_reserved, info->bytes_may_use,
7937                 info->bytes_readonly);
7938         spin_unlock(&info->lock);
7939
7940         if (!dump_block_groups)
7941                 return;
7942
7943         down_read(&info->groups_sem);
7944 again:
7945         list_for_each_entry(cache, &info->block_groups[index], list) {
7946                 spin_lock(&cache->lock);
7947                 btrfs_info(fs_info,
7948                         "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
7949                         cache->key.objectid, cache->key.offset,
7950                         btrfs_block_group_used(&cache->item), cache->pinned,
7951                         cache->reserved, cache->ro ? "[readonly]" : "");
7952                 btrfs_dump_free_space(cache, bytes);
7953                 spin_unlock(&cache->lock);
7954         }
7955         if (++index < BTRFS_NR_RAID_TYPES)
7956                 goto again;
7957         up_read(&info->groups_sem);
7958 }
7959
7960 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
7961                          u64 num_bytes, u64 min_alloc_size,
7962                          u64 empty_size, u64 hint_byte,
7963                          struct btrfs_key *ins, int is_data, int delalloc)
7964 {
7965         struct btrfs_fs_info *fs_info = root->fs_info;
7966         bool final_tried = num_bytes == min_alloc_size;
7967         u64 flags;
7968         int ret;
7969
7970         flags = btrfs_get_alloc_profile(root, is_data);
7971 again:
7972         WARN_ON(num_bytes < root->sectorsize);
7973         ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
7974                                hint_byte, ins, flags, delalloc);
7975         if (!ret && !is_data) {
7976                 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
7977         } else if (ret == -ENOSPC) {
7978                 if (!final_tried && ins->offset) {
7979                         num_bytes = min(num_bytes >> 1, ins->offset);
7980                         num_bytes = round_down(num_bytes, root->sectorsize);
7981                         num_bytes = max(num_bytes, min_alloc_size);
7982                         ram_bytes = num_bytes;
7983                         if (num_bytes == min_alloc_size)
7984                                 final_tried = true;
7985                         goto again;
7986                 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
7987                         struct btrfs_space_info *sinfo;
7988
7989                         sinfo = __find_space_info(fs_info, flags);
7990                         btrfs_err(root->fs_info,
7991                                   "allocation failed flags %llu, wanted %llu",
7992                                   flags, num_bytes);
7993                         if (sinfo)
7994                                 dump_space_info(fs_info, sinfo, num_bytes, 1);
7995                 }
7996         }
7997
7998         return ret;
7999 }
8000
8001 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
8002                                         u64 start, u64 len,
8003                                         int pin, int delalloc)
8004 {
8005         struct btrfs_block_group_cache *cache;
8006         int ret = 0;
8007
8008         cache = btrfs_lookup_block_group(root->fs_info, start);
8009         if (!cache) {
8010                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
8011                         start);
8012                 return -ENOSPC;
8013         }
8014
8015         if (pin)
8016                 pin_down_extent(root, cache, start, len, 1);
8017         else {
8018                 if (btrfs_test_opt(root->fs_info, DISCARD))
8019                         ret = btrfs_discard_extent(root, start, len, NULL);
8020                 btrfs_add_free_space(cache, start, len);
8021                 btrfs_free_reserved_bytes(cache, len, delalloc);
8022                 trace_btrfs_reserved_extent_free(root, start, len);
8023         }
8024
8025         btrfs_put_block_group(cache);
8026         return ret;
8027 }
8028
8029 int btrfs_free_reserved_extent(struct btrfs_root *root,
8030                                u64 start, u64 len, int delalloc)
8031 {
8032         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
8033 }
8034
8035 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
8036                                        u64 start, u64 len)
8037 {
8038         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
8039 }
8040
8041 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8042                                       struct btrfs_root *root,
8043                                       u64 parent, u64 root_objectid,
8044                                       u64 flags, u64 owner, u64 offset,
8045                                       struct btrfs_key *ins, int ref_mod)
8046 {
8047         int ret;
8048         struct btrfs_fs_info *fs_info = root->fs_info;
8049         struct btrfs_extent_item *extent_item;
8050         struct btrfs_extent_inline_ref *iref;
8051         struct btrfs_path *path;
8052         struct extent_buffer *leaf;
8053         int type;
8054         u32 size;
8055
8056         if (parent > 0)
8057                 type = BTRFS_SHARED_DATA_REF_KEY;
8058         else
8059                 type = BTRFS_EXTENT_DATA_REF_KEY;
8060
8061         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
8062
8063         path = btrfs_alloc_path();
8064         if (!path)
8065                 return -ENOMEM;
8066
8067         path->leave_spinning = 1;
8068         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8069                                       ins, size);
8070         if (ret) {
8071                 btrfs_free_path(path);
8072                 return ret;
8073         }
8074
8075         leaf = path->nodes[0];
8076         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8077                                      struct btrfs_extent_item);
8078         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
8079         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8080         btrfs_set_extent_flags(leaf, extent_item,
8081                                flags | BTRFS_EXTENT_FLAG_DATA);
8082
8083         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8084         btrfs_set_extent_inline_ref_type(leaf, iref, type);
8085         if (parent > 0) {
8086                 struct btrfs_shared_data_ref *ref;
8087                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
8088                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8089                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
8090         } else {
8091                 struct btrfs_extent_data_ref *ref;
8092                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
8093                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
8094                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
8095                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
8096                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
8097         }
8098
8099         btrfs_mark_buffer_dirty(path->nodes[0]);
8100         btrfs_free_path(path);
8101
8102         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8103                                           ins->offset);
8104         if (ret)
8105                 return ret;
8106
8107         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
8108         if (ret) { /* -ENOENT, logic error */
8109                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8110                         ins->objectid, ins->offset);
8111                 BUG();
8112         }
8113         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
8114         return ret;
8115 }
8116
8117 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
8118                                      struct btrfs_root *root,
8119                                      u64 parent, u64 root_objectid,
8120                                      u64 flags, struct btrfs_disk_key *key,
8121                                      int level, struct btrfs_key *ins)
8122 {
8123         int ret;
8124         struct btrfs_fs_info *fs_info = root->fs_info;
8125         struct btrfs_extent_item *extent_item;
8126         struct btrfs_tree_block_info *block_info;
8127         struct btrfs_extent_inline_ref *iref;
8128         struct btrfs_path *path;
8129         struct extent_buffer *leaf;
8130         u32 size = sizeof(*extent_item) + sizeof(*iref);
8131         u64 num_bytes = ins->offset;
8132         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
8133                                                  SKINNY_METADATA);
8134
8135         if (!skinny_metadata)
8136                 size += sizeof(*block_info);
8137
8138         path = btrfs_alloc_path();
8139         if (!path) {
8140                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
8141                                                    root->nodesize);
8142                 return -ENOMEM;
8143         }
8144
8145         path->leave_spinning = 1;
8146         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8147                                       ins, size);
8148         if (ret) {
8149                 btrfs_free_path(path);
8150                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
8151                                                    root->nodesize);
8152                 return ret;
8153         }
8154
8155         leaf = path->nodes[0];
8156         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8157                                      struct btrfs_extent_item);
8158         btrfs_set_extent_refs(leaf, extent_item, 1);
8159         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8160         btrfs_set_extent_flags(leaf, extent_item,
8161                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
8162
8163         if (skinny_metadata) {
8164                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8165                 num_bytes = root->nodesize;
8166         } else {
8167                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
8168                 btrfs_set_tree_block_key(leaf, block_info, key);
8169                 btrfs_set_tree_block_level(leaf, block_info, level);
8170                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
8171         }
8172
8173         if (parent > 0) {
8174                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
8175                 btrfs_set_extent_inline_ref_type(leaf, iref,
8176                                                  BTRFS_SHARED_BLOCK_REF_KEY);
8177                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8178         } else {
8179                 btrfs_set_extent_inline_ref_type(leaf, iref,
8180                                                  BTRFS_TREE_BLOCK_REF_KEY);
8181                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
8182         }
8183
8184         btrfs_mark_buffer_dirty(leaf);
8185         btrfs_free_path(path);
8186
8187         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8188                                           num_bytes);
8189         if (ret)
8190                 return ret;
8191
8192         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
8193                                  1);
8194         if (ret) { /* -ENOENT, logic error */
8195                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8196                         ins->objectid, ins->offset);
8197                 BUG();
8198         }
8199
8200         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
8201         return ret;
8202 }
8203
8204 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8205                                      struct btrfs_root *root,
8206                                      u64 root_objectid, u64 owner,
8207                                      u64 offset, u64 ram_bytes,
8208                                      struct btrfs_key *ins)
8209 {
8210         int ret;
8211
8212         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
8213
8214         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
8215                                          ins->offset, 0,
8216                                          root_objectid, owner, offset,
8217                                          ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
8218                                          NULL);
8219         return ret;
8220 }
8221
8222 /*
8223  * this is used by the tree logging recovery code.  It records that
8224  * an extent has been allocated and makes sure to clear the free
8225  * space cache bits as well
8226  */
8227 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8228                                    struct btrfs_root *root,
8229                                    u64 root_objectid, u64 owner, u64 offset,
8230                                    struct btrfs_key *ins)
8231 {
8232         int ret;
8233         struct btrfs_block_group_cache *block_group;
8234         struct btrfs_space_info *space_info;
8235
8236         /*
8237          * Mixed block groups will exclude before processing the log so we only
8238          * need to do the exclude dance if this fs isn't mixed.
8239          */
8240         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
8241                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
8242                 if (ret)
8243                         return ret;
8244         }
8245
8246         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
8247         if (!block_group)
8248                 return -EINVAL;
8249
8250         space_info = block_group->space_info;
8251         spin_lock(&space_info->lock);
8252         spin_lock(&block_group->lock);
8253         space_info->bytes_reserved += ins->offset;
8254         block_group->reserved += ins->offset;
8255         spin_unlock(&block_group->lock);
8256         spin_unlock(&space_info->lock);
8257
8258         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
8259                                          0, owner, offset, ins, 1);
8260         btrfs_put_block_group(block_group);
8261         return ret;
8262 }
8263
8264 static struct extent_buffer *
8265 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8266                       u64 bytenr, int level)
8267 {
8268         struct extent_buffer *buf;
8269
8270         buf = btrfs_find_create_tree_block(root, bytenr);
8271         if (IS_ERR(buf))
8272                 return buf;
8273
8274         /*
8275          * Extra safety check in case the extent tree is corrupted and extent
8276          * allocator chooses to use a tree block which is already used and
8277          * locked.
8278          */
8279         if (buf->lock_owner == current->pid) {
8280                 btrfs_err_rl(root->fs_info,
8281 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
8282                         buf->start, btrfs_header_owner(buf), current->pid);
8283                 free_extent_buffer(buf);
8284                 return ERR_PTR(-EUCLEAN);
8285         }
8286
8287         btrfs_set_header_generation(buf, trans->transid);
8288         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8289         btrfs_tree_lock(buf);
8290         clean_tree_block(trans, root->fs_info, buf);
8291         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
8292
8293         btrfs_set_lock_blocking(buf);
8294         set_extent_buffer_uptodate(buf);
8295
8296         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8297                 buf->log_index = root->log_transid % 2;
8298                 /*
8299                  * we allow two log transactions at a time, use different
8300                  * EXENT bit to differentiate dirty pages.
8301                  */
8302                 if (buf->log_index == 0)
8303                         set_extent_dirty(&root->dirty_log_pages, buf->start,
8304                                         buf->start + buf->len - 1, GFP_NOFS);
8305                 else
8306                         set_extent_new(&root->dirty_log_pages, buf->start,
8307                                         buf->start + buf->len - 1);
8308         } else {
8309                 buf->log_index = -1;
8310                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8311                          buf->start + buf->len - 1, GFP_NOFS);
8312         }
8313         trans->dirty = true;
8314         /* this returns a buffer locked for blocking */
8315         return buf;
8316 }
8317
8318 static struct btrfs_block_rsv *
8319 use_block_rsv(struct btrfs_trans_handle *trans,
8320               struct btrfs_root *root, u32 blocksize)
8321 {
8322         struct btrfs_block_rsv *block_rsv;
8323         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
8324         int ret;
8325         bool global_updated = false;
8326
8327         block_rsv = get_block_rsv(trans, root);
8328
8329         if (unlikely(block_rsv->size == 0))
8330                 goto try_reserve;
8331 again:
8332         ret = block_rsv_use_bytes(block_rsv, blocksize);
8333         if (!ret)
8334                 return block_rsv;
8335
8336         if (block_rsv->failfast)
8337                 return ERR_PTR(ret);
8338
8339         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
8340                 global_updated = true;
8341                 update_global_block_rsv(root->fs_info);
8342                 goto again;
8343         }
8344
8345         if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
8346                 static DEFINE_RATELIMIT_STATE(_rs,
8347                                 DEFAULT_RATELIMIT_INTERVAL * 10,
8348                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
8349                 if (__ratelimit(&_rs))
8350                         WARN(1, KERN_DEBUG
8351                                 "BTRFS: block rsv returned %d\n", ret);
8352         }
8353 try_reserve:
8354         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
8355                                      BTRFS_RESERVE_NO_FLUSH);
8356         if (!ret)
8357                 return block_rsv;
8358         /*
8359          * If we couldn't reserve metadata bytes try and use some from
8360          * the global reserve if its space type is the same as the global
8361          * reservation.
8362          */
8363         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
8364             block_rsv->space_info == global_rsv->space_info) {
8365                 ret = block_rsv_use_bytes(global_rsv, blocksize);
8366                 if (!ret)
8367                         return global_rsv;
8368         }
8369         return ERR_PTR(ret);
8370 }
8371
8372 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8373                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
8374 {
8375         block_rsv_add_bytes(block_rsv, blocksize, 0);
8376         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
8377 }
8378
8379 /*
8380  * finds a free extent and does all the dirty work required for allocation
8381  * returns the tree buffer or an ERR_PTR on error.
8382  */
8383 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8384                                         struct btrfs_root *root,
8385                                         u64 parent, u64 root_objectid,
8386                                         struct btrfs_disk_key *key, int level,
8387                                         u64 hint, u64 empty_size)
8388 {
8389         struct btrfs_key ins;
8390         struct btrfs_block_rsv *block_rsv;
8391         struct extent_buffer *buf;
8392         struct btrfs_delayed_extent_op *extent_op;
8393         u64 flags = 0;
8394         int ret;
8395         u32 blocksize = root->nodesize;
8396         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
8397                                                  SKINNY_METADATA);
8398
8399 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8400         if (btrfs_is_testing(root->fs_info)) {
8401                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8402                                             level);
8403                 if (!IS_ERR(buf))
8404                         root->alloc_bytenr += blocksize;
8405                 return buf;
8406         }
8407 #endif
8408
8409         block_rsv = use_block_rsv(trans, root, blocksize);
8410         if (IS_ERR(block_rsv))
8411                 return ERR_CAST(block_rsv);
8412
8413         ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
8414                                    empty_size, hint, &ins, 0, 0);
8415         if (ret)
8416                 goto out_unuse;
8417
8418         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
8419         if (IS_ERR(buf)) {
8420                 ret = PTR_ERR(buf);
8421                 goto out_free_reserved;
8422         }
8423
8424         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8425                 if (parent == 0)
8426                         parent = ins.objectid;
8427                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8428         } else
8429                 BUG_ON(parent > 0);
8430
8431         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8432                 extent_op = btrfs_alloc_delayed_extent_op();
8433                 if (!extent_op) {
8434                         ret = -ENOMEM;
8435                         goto out_free_buf;
8436                 }
8437                 if (key)
8438                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
8439                 else
8440                         memset(&extent_op->key, 0, sizeof(extent_op->key));
8441                 extent_op->flags_to_set = flags;
8442                 extent_op->update_key = skinny_metadata ? false : true;
8443                 extent_op->update_flags = true;
8444                 extent_op->is_data = false;
8445                 extent_op->level = level;
8446
8447                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
8448                                                  ins.objectid, ins.offset,
8449                                                  parent, root_objectid, level,
8450                                                  BTRFS_ADD_DELAYED_EXTENT,
8451                                                  extent_op);
8452                 if (ret)
8453                         goto out_free_delayed;
8454         }
8455         return buf;
8456
8457 out_free_delayed:
8458         btrfs_free_delayed_extent_op(extent_op);
8459 out_free_buf:
8460         btrfs_tree_unlock(buf);
8461         free_extent_buffer(buf);
8462 out_free_reserved:
8463         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8464 out_unuse:
8465         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8466         return ERR_PTR(ret);
8467 }
8468
8469 struct walk_control {
8470         u64 refs[BTRFS_MAX_LEVEL];
8471         u64 flags[BTRFS_MAX_LEVEL];
8472         struct btrfs_key update_progress;
8473         int stage;
8474         int level;
8475         int shared_level;
8476         int update_ref;
8477         int keep_locks;
8478         int reada_slot;
8479         int reada_count;
8480         int for_reloc;
8481 };
8482
8483 #define DROP_REFERENCE  1
8484 #define UPDATE_BACKREF  2
8485
8486 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8487                                      struct btrfs_root *root,
8488                                      struct walk_control *wc,
8489                                      struct btrfs_path *path)
8490 {
8491         u64 bytenr;
8492         u64 generation;
8493         u64 refs;
8494         u64 flags;
8495         u32 nritems;
8496         struct btrfs_key key;
8497         struct extent_buffer *eb;
8498         int ret;
8499         int slot;
8500         int nread = 0;
8501
8502         if (path->slots[wc->level] < wc->reada_slot) {
8503                 wc->reada_count = wc->reada_count * 2 / 3;
8504                 wc->reada_count = max(wc->reada_count, 2);
8505         } else {
8506                 wc->reada_count = wc->reada_count * 3 / 2;
8507                 wc->reada_count = min_t(int, wc->reada_count,
8508                                         BTRFS_NODEPTRS_PER_BLOCK(root));
8509         }
8510
8511         eb = path->nodes[wc->level];
8512         nritems = btrfs_header_nritems(eb);
8513
8514         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8515                 if (nread >= wc->reada_count)
8516                         break;
8517
8518                 cond_resched();
8519                 bytenr = btrfs_node_blockptr(eb, slot);
8520                 generation = btrfs_node_ptr_generation(eb, slot);
8521
8522                 if (slot == path->slots[wc->level])
8523                         goto reada;
8524
8525                 if (wc->stage == UPDATE_BACKREF &&
8526                     generation <= root->root_key.offset)
8527                         continue;
8528
8529                 /* We don't lock the tree block, it's OK to be racy here */
8530                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8531                                                wc->level - 1, 1, &refs,
8532                                                &flags);
8533                 /* We don't care about errors in readahead. */
8534                 if (ret < 0)
8535                         continue;
8536                 BUG_ON(refs == 0);
8537
8538                 if (wc->stage == DROP_REFERENCE) {
8539                         if (refs == 1)
8540                                 goto reada;
8541
8542                         if (wc->level == 1 &&
8543                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8544                                 continue;
8545                         if (!wc->update_ref ||
8546                             generation <= root->root_key.offset)
8547                                 continue;
8548                         btrfs_node_key_to_cpu(eb, &key, slot);
8549                         ret = btrfs_comp_cpu_keys(&key,
8550                                                   &wc->update_progress);
8551                         if (ret < 0)
8552                                 continue;
8553                 } else {
8554                         if (wc->level == 1 &&
8555                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8556                                 continue;
8557                 }
8558 reada:
8559                 readahead_tree_block(root, bytenr);
8560                 nread++;
8561         }
8562         wc->reada_slot = slot;
8563 }
8564
8565 static int account_leaf_items(struct btrfs_trans_handle *trans,
8566                               struct btrfs_root *root,
8567                               struct extent_buffer *eb)
8568 {
8569         int nr = btrfs_header_nritems(eb);
8570         int i, extent_type, ret;
8571         struct btrfs_key key;
8572         struct btrfs_file_extent_item *fi;
8573         u64 bytenr, num_bytes;
8574
8575         /* We can be called directly from walk_up_proc() */
8576         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
8577                 return 0;
8578
8579         for (i = 0; i < nr; i++) {
8580                 btrfs_item_key_to_cpu(eb, &key, i);
8581
8582                 if (key.type != BTRFS_EXTENT_DATA_KEY)
8583                         continue;
8584
8585                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8586                 /* filter out non qgroup-accountable extents  */
8587                 extent_type = btrfs_file_extent_type(eb, fi);
8588
8589                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8590                         continue;
8591
8592                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8593                 if (!bytenr)
8594                         continue;
8595
8596                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8597
8598                 ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
8599                                 bytenr, num_bytes, GFP_NOFS);
8600                 if (ret)
8601                         return ret;
8602         }
8603         return 0;
8604 }
8605
8606 /*
8607  * Walk up the tree from the bottom, freeing leaves and any interior
8608  * nodes which have had all slots visited. If a node (leaf or
8609  * interior) is freed, the node above it will have it's slot
8610  * incremented. The root node will never be freed.
8611  *
8612  * At the end of this function, we should have a path which has all
8613  * slots incremented to the next position for a search. If we need to
8614  * read a new node it will be NULL and the node above it will have the
8615  * correct slot selected for a later read.
8616  *
8617  * If we increment the root nodes slot counter past the number of
8618  * elements, 1 is returned to signal completion of the search.
8619  */
8620 static int adjust_slots_upwards(struct btrfs_root *root,
8621                                 struct btrfs_path *path, int root_level)
8622 {
8623         int level = 0;
8624         int nr, slot;
8625         struct extent_buffer *eb;
8626
8627         if (root_level == 0)
8628                 return 1;
8629
8630         while (level <= root_level) {
8631                 eb = path->nodes[level];
8632                 nr = btrfs_header_nritems(eb);
8633                 path->slots[level]++;
8634                 slot = path->slots[level];
8635                 if (slot >= nr || level == 0) {
8636                         /*
8637                          * Don't free the root -  we will detect this
8638                          * condition after our loop and return a
8639                          * positive value for caller to stop walking the tree.
8640                          */
8641                         if (level != root_level) {
8642                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8643                                 path->locks[level] = 0;
8644
8645                                 free_extent_buffer(eb);
8646                                 path->nodes[level] = NULL;
8647                                 path->slots[level] = 0;
8648                         }
8649                 } else {
8650                         /*
8651                          * We have a valid slot to walk back down
8652                          * from. Stop here so caller can process these
8653                          * new nodes.
8654                          */
8655                         break;
8656                 }
8657
8658                 level++;
8659         }
8660
8661         eb = path->nodes[root_level];
8662         if (path->slots[root_level] >= btrfs_header_nritems(eb))
8663                 return 1;
8664
8665         return 0;
8666 }
8667
8668 /*
8669  * root_eb is the subtree root and is locked before this function is called.
8670  */
8671 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8672                                   struct btrfs_root *root,
8673                                   struct extent_buffer *root_eb,
8674                                   u64 root_gen,
8675                                   int root_level)
8676 {
8677         int ret = 0;
8678         int level;
8679         struct extent_buffer *eb = root_eb;
8680         struct btrfs_path *path = NULL;
8681
8682         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8683         BUG_ON(root_eb == NULL);
8684
8685         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
8686                 return 0;
8687
8688         if (!extent_buffer_uptodate(root_eb)) {
8689                 ret = btrfs_read_buffer(root_eb, root_gen);
8690                 if (ret)
8691                         goto out;
8692         }
8693
8694         if (root_level == 0) {
8695                 ret = account_leaf_items(trans, root, root_eb);
8696                 goto out;
8697         }
8698
8699         path = btrfs_alloc_path();
8700         if (!path)
8701                 return -ENOMEM;
8702
8703         /*
8704          * Walk down the tree.  Missing extent blocks are filled in as
8705          * we go. Metadata is accounted every time we read a new
8706          * extent block.
8707          *
8708          * When we reach a leaf, we account for file extent items in it,
8709          * walk back up the tree (adjusting slot pointers as we go)
8710          * and restart the search process.
8711          */
8712         extent_buffer_get(root_eb); /* For path */
8713         path->nodes[root_level] = root_eb;
8714         path->slots[root_level] = 0;
8715         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8716 walk_down:
8717         level = root_level;
8718         while (level >= 0) {
8719                 if (path->nodes[level] == NULL) {
8720                         int parent_slot;
8721                         u64 child_gen;
8722                         u64 child_bytenr;
8723
8724                         /* We need to get child blockptr/gen from
8725                          * parent before we can read it. */
8726                         eb = path->nodes[level + 1];
8727                         parent_slot = path->slots[level + 1];
8728                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8729                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8730
8731                         eb = read_tree_block(root, child_bytenr, child_gen);
8732                         if (IS_ERR(eb)) {
8733                                 ret = PTR_ERR(eb);
8734                                 goto out;
8735                         } else if (!extent_buffer_uptodate(eb)) {
8736                                 free_extent_buffer(eb);
8737                                 ret = -EIO;
8738                                 goto out;
8739                         }
8740
8741                         path->nodes[level] = eb;
8742                         path->slots[level] = 0;
8743
8744                         btrfs_tree_read_lock(eb);
8745                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8746                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8747
8748                         ret = btrfs_qgroup_insert_dirty_extent(trans,
8749                                         root->fs_info, child_bytenr,
8750                                         root->nodesize, GFP_NOFS);
8751                         if (ret)
8752                                 goto out;
8753                 }
8754
8755                 if (level == 0) {
8756                         ret = account_leaf_items(trans, root, path->nodes[level]);
8757                         if (ret)
8758                                 goto out;
8759
8760                         /* Nonzero return here means we completed our search */
8761                         ret = adjust_slots_upwards(root, path, root_level);
8762                         if (ret)
8763                                 break;
8764
8765                         /* Restart search with new slots */
8766                         goto walk_down;
8767                 }
8768
8769                 level--;
8770         }
8771
8772         ret = 0;
8773 out:
8774         btrfs_free_path(path);
8775
8776         return ret;
8777 }
8778
8779 /*
8780  * helper to process tree block while walking down the tree.
8781  *
8782  * when wc->stage == UPDATE_BACKREF, this function updates
8783  * back refs for pointers in the block.
8784  *
8785  * NOTE: return value 1 means we should stop walking down.
8786  */
8787 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8788                                    struct btrfs_root *root,
8789                                    struct btrfs_path *path,
8790                                    struct walk_control *wc, int lookup_info)
8791 {
8792         int level = wc->level;
8793         struct extent_buffer *eb = path->nodes[level];
8794         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8795         int ret;
8796
8797         if (wc->stage == UPDATE_BACKREF &&
8798             btrfs_header_owner(eb) != root->root_key.objectid)
8799                 return 1;
8800
8801         /*
8802          * when reference count of tree block is 1, it won't increase
8803          * again. once full backref flag is set, we never clear it.
8804          */
8805         if (lookup_info &&
8806             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8807              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8808                 BUG_ON(!path->locks[level]);
8809                 ret = btrfs_lookup_extent_info(trans, root,
8810                                                eb->start, level, 1,
8811                                                &wc->refs[level],
8812                                                &wc->flags[level]);
8813                 BUG_ON(ret == -ENOMEM);
8814                 if (ret)
8815                         return ret;
8816                 BUG_ON(wc->refs[level] == 0);
8817         }
8818
8819         if (wc->stage == DROP_REFERENCE) {
8820                 if (wc->refs[level] > 1)
8821                         return 1;
8822
8823                 if (path->locks[level] && !wc->keep_locks) {
8824                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8825                         path->locks[level] = 0;
8826                 }
8827                 return 0;
8828         }
8829
8830         /* wc->stage == UPDATE_BACKREF */
8831         if (!(wc->flags[level] & flag)) {
8832                 BUG_ON(!path->locks[level]);
8833                 ret = btrfs_inc_ref(trans, root, eb, 1);
8834                 BUG_ON(ret); /* -ENOMEM */
8835                 ret = btrfs_dec_ref(trans, root, eb, 0);
8836                 BUG_ON(ret); /* -ENOMEM */
8837                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8838                                                   eb->len, flag,
8839                                                   btrfs_header_level(eb), 0);
8840                 BUG_ON(ret); /* -ENOMEM */
8841                 wc->flags[level] |= flag;
8842         }
8843
8844         /*
8845          * the block is shared by multiple trees, so it's not good to
8846          * keep the tree lock
8847          */
8848         if (path->locks[level] && level > 0) {
8849                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8850                 path->locks[level] = 0;
8851         }
8852         return 0;
8853 }
8854
8855 /*
8856  * helper to process tree block pointer.
8857  *
8858  * when wc->stage == DROP_REFERENCE, this function checks
8859  * reference count of the block pointed to. if the block
8860  * is shared and we need update back refs for the subtree
8861  * rooted at the block, this function changes wc->stage to
8862  * UPDATE_BACKREF. if the block is shared and there is no
8863  * need to update back, this function drops the reference
8864  * to the block.
8865  *
8866  * NOTE: return value 1 means we should stop walking down.
8867  */
8868 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8869                                  struct btrfs_root *root,
8870                                  struct btrfs_path *path,
8871                                  struct walk_control *wc, int *lookup_info)
8872 {
8873         u64 bytenr;
8874         u64 generation;
8875         u64 parent;
8876         u32 blocksize;
8877         struct btrfs_key key;
8878         struct extent_buffer *next;
8879         int level = wc->level;
8880         int reada = 0;
8881         int ret = 0;
8882         bool need_account = false;
8883
8884         generation = btrfs_node_ptr_generation(path->nodes[level],
8885                                                path->slots[level]);
8886         /*
8887          * if the lower level block was created before the snapshot
8888          * was created, we know there is no need to update back refs
8889          * for the subtree
8890          */
8891         if (wc->stage == UPDATE_BACKREF &&
8892             generation <= root->root_key.offset) {
8893                 *lookup_info = 1;
8894                 return 1;
8895         }
8896
8897         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8898         blocksize = root->nodesize;
8899
8900         next = btrfs_find_tree_block(root->fs_info, bytenr);
8901         if (!next) {
8902                 next = btrfs_find_create_tree_block(root, bytenr);
8903                 if (IS_ERR(next))
8904                         return PTR_ERR(next);
8905
8906                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8907                                                level - 1);
8908                 reada = 1;
8909         }
8910         btrfs_tree_lock(next);
8911         btrfs_set_lock_blocking(next);
8912
8913         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8914                                        &wc->refs[level - 1],
8915                                        &wc->flags[level - 1]);
8916         if (ret < 0)
8917                 goto out_unlock;
8918
8919         if (unlikely(wc->refs[level - 1] == 0)) {
8920                 btrfs_err(root->fs_info, "Missing references.");
8921                 ret = -EIO;
8922                 goto out_unlock;
8923         }
8924         *lookup_info = 0;
8925
8926         if (wc->stage == DROP_REFERENCE) {
8927                 if (wc->refs[level - 1] > 1) {
8928                         need_account = true;
8929                         if (level == 1 &&
8930                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8931                                 goto skip;
8932
8933                         if (!wc->update_ref ||
8934                             generation <= root->root_key.offset)
8935                                 goto skip;
8936
8937                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8938                                               path->slots[level]);
8939                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8940                         if (ret < 0)
8941                                 goto skip;
8942
8943                         wc->stage = UPDATE_BACKREF;
8944                         wc->shared_level = level - 1;
8945                 }
8946         } else {
8947                 if (level == 1 &&
8948                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8949                         goto skip;
8950         }
8951
8952         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8953                 btrfs_tree_unlock(next);
8954                 free_extent_buffer(next);
8955                 next = NULL;
8956                 *lookup_info = 1;
8957         }
8958
8959         if (!next) {
8960                 if (reada && level == 1)
8961                         reada_walk_down(trans, root, wc, path);
8962                 next = read_tree_block(root, bytenr, generation);
8963                 if (IS_ERR(next)) {
8964                         return PTR_ERR(next);
8965                 } else if (!extent_buffer_uptodate(next)) {
8966                         free_extent_buffer(next);
8967                         return -EIO;
8968                 }
8969                 btrfs_tree_lock(next);
8970                 btrfs_set_lock_blocking(next);
8971         }
8972
8973         level--;
8974         ASSERT(level == btrfs_header_level(next));
8975         if (level != btrfs_header_level(next)) {
8976                 btrfs_err(root->fs_info, "mismatched level");
8977                 ret = -EIO;
8978                 goto out_unlock;
8979         }
8980         path->nodes[level] = next;
8981         path->slots[level] = 0;
8982         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8983         wc->level = level;
8984         if (wc->level == 1)
8985                 wc->reada_slot = 0;
8986         return 0;
8987 skip:
8988         wc->refs[level - 1] = 0;
8989         wc->flags[level - 1] = 0;
8990         if (wc->stage == DROP_REFERENCE) {
8991                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8992                         parent = path->nodes[level]->start;
8993                 } else {
8994                         ASSERT(root->root_key.objectid ==
8995                                btrfs_header_owner(path->nodes[level]));
8996                         if (root->root_key.objectid !=
8997                             btrfs_header_owner(path->nodes[level])) {
8998                                 btrfs_err(root->fs_info,
8999                                                 "mismatched block owner");
9000                                 ret = -EIO;
9001                                 goto out_unlock;
9002                         }
9003                         parent = 0;
9004                 }
9005
9006                 if (need_account) {
9007                         ret = account_shared_subtree(trans, root, next,
9008                                                      generation, level - 1);
9009                         if (ret) {
9010                                 btrfs_err_rl(root->fs_info,
9011                                              "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
9012                                              ret);
9013                         }
9014                 }
9015                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
9016                                 root->root_key.objectid, level - 1, 0);
9017                 if (ret)
9018                         goto out_unlock;
9019         }
9020
9021         *lookup_info = 1;
9022         ret = 1;
9023
9024 out_unlock:
9025         btrfs_tree_unlock(next);
9026         free_extent_buffer(next);
9027
9028         return ret;
9029 }
9030
9031 /*
9032  * helper to process tree block while walking up the tree.
9033  *
9034  * when wc->stage == DROP_REFERENCE, this function drops
9035  * reference count on the block.
9036  *
9037  * when wc->stage == UPDATE_BACKREF, this function changes
9038  * wc->stage back to DROP_REFERENCE if we changed wc->stage
9039  * to UPDATE_BACKREF previously while processing the block.
9040  *
9041  * NOTE: return value 1 means we should stop walking up.
9042  */
9043 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
9044                                  struct btrfs_root *root,
9045                                  struct btrfs_path *path,
9046                                  struct walk_control *wc)
9047 {
9048         int ret;
9049         int level = wc->level;
9050         struct extent_buffer *eb = path->nodes[level];
9051         u64 parent = 0;
9052
9053         if (wc->stage == UPDATE_BACKREF) {
9054                 BUG_ON(wc->shared_level < level);
9055                 if (level < wc->shared_level)
9056                         goto out;
9057
9058                 ret = find_next_key(path, level + 1, &wc->update_progress);
9059                 if (ret > 0)
9060                         wc->update_ref = 0;
9061
9062                 wc->stage = DROP_REFERENCE;
9063                 wc->shared_level = -1;
9064                 path->slots[level] = 0;
9065
9066                 /*
9067                  * check reference count again if the block isn't locked.
9068                  * we should start walking down the tree again if reference
9069                  * count is one.
9070                  */
9071                 if (!path->locks[level]) {
9072                         BUG_ON(level == 0);
9073                         btrfs_tree_lock(eb);
9074                         btrfs_set_lock_blocking(eb);
9075                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9076
9077                         ret = btrfs_lookup_extent_info(trans, root,
9078                                                        eb->start, level, 1,
9079                                                        &wc->refs[level],
9080                                                        &wc->flags[level]);
9081                         if (ret < 0) {
9082                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
9083                                 path->locks[level] = 0;
9084                                 return ret;
9085                         }
9086                         BUG_ON(wc->refs[level] == 0);
9087                         if (wc->refs[level] == 1) {
9088                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
9089                                 path->locks[level] = 0;
9090                                 return 1;
9091                         }
9092                 }
9093         }
9094
9095         /* wc->stage == DROP_REFERENCE */
9096         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
9097
9098         if (wc->refs[level] == 1) {
9099                 if (level == 0) {
9100                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9101                                 ret = btrfs_dec_ref(trans, root, eb, 1);
9102                         else
9103                                 ret = btrfs_dec_ref(trans, root, eb, 0);
9104                         BUG_ON(ret); /* -ENOMEM */
9105                         ret = account_leaf_items(trans, root, eb);
9106                         if (ret) {
9107                                 btrfs_err_rl(root->fs_info,
9108                                              "error %d accounting leaf items. Quota is out of sync, rescan required.",
9109                                              ret);
9110                         }
9111                 }
9112                 /* make block locked assertion in clean_tree_block happy */
9113                 if (!path->locks[level] &&
9114                     btrfs_header_generation(eb) == trans->transid) {
9115                         btrfs_tree_lock(eb);
9116                         btrfs_set_lock_blocking(eb);
9117                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9118                 }
9119                 clean_tree_block(trans, root->fs_info, eb);
9120         }
9121
9122         if (eb == root->node) {
9123                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9124                         parent = eb->start;
9125                 else if (root->root_key.objectid != btrfs_header_owner(eb))
9126                         goto owner_mismatch;
9127         } else {
9128                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9129                         parent = path->nodes[level + 1]->start;
9130                 else if (root->root_key.objectid !=
9131                          btrfs_header_owner(path->nodes[level + 1]))
9132                         goto owner_mismatch;
9133         }
9134
9135         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
9136 out:
9137         wc->refs[level] = 0;
9138         wc->flags[level] = 0;
9139         return 0;
9140
9141 owner_mismatch:
9142         btrfs_err_rl(root->fs_info, "unexpected tree owner, have %llu expect %llu",
9143                      btrfs_header_owner(eb), root->root_key.objectid);
9144         return -EUCLEAN;
9145 }
9146
9147 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
9148                                    struct btrfs_root *root,
9149                                    struct btrfs_path *path,
9150                                    struct walk_control *wc)
9151 {
9152         int level = wc->level;
9153         int lookup_info = 1;
9154         int ret;
9155
9156         while (level >= 0) {
9157                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
9158                 if (ret > 0)
9159                         break;
9160
9161                 if (level == 0)
9162                         break;
9163
9164                 if (path->slots[level] >=
9165                     btrfs_header_nritems(path->nodes[level]))
9166                         break;
9167
9168                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
9169                 if (ret > 0) {
9170                         path->slots[level]++;
9171                         continue;
9172                 } else if (ret < 0)
9173                         return ret;
9174                 level = wc->level;
9175         }
9176         return 0;
9177 }
9178
9179 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
9180                                  struct btrfs_root *root,
9181                                  struct btrfs_path *path,
9182                                  struct walk_control *wc, int max_level)
9183 {
9184         int level = wc->level;
9185         int ret;
9186
9187         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
9188         while (level < max_level && path->nodes[level]) {
9189                 wc->level = level;
9190                 if (path->slots[level] + 1 <
9191                     btrfs_header_nritems(path->nodes[level])) {
9192                         path->slots[level]++;
9193                         return 0;
9194                 } else {
9195                         ret = walk_up_proc(trans, root, path, wc);
9196                         if (ret > 0)
9197                                 return 0;
9198                         if (ret < 0)
9199                                 return ret;
9200
9201                         if (path->locks[level]) {
9202                                 btrfs_tree_unlock_rw(path->nodes[level],
9203                                                      path->locks[level]);
9204                                 path->locks[level] = 0;
9205                         }
9206                         free_extent_buffer(path->nodes[level]);
9207                         path->nodes[level] = NULL;
9208                         level++;
9209                 }
9210         }
9211         return 1;
9212 }
9213
9214 /*
9215  * drop a subvolume tree.
9216  *
9217  * this function traverses the tree freeing any blocks that only
9218  * referenced by the tree.
9219  *
9220  * when a shared tree block is found. this function decreases its
9221  * reference count by one. if update_ref is true, this function
9222  * also make sure backrefs for the shared block and all lower level
9223  * blocks are properly updated.
9224  *
9225  * If called with for_reloc == 0, may exit early with -EAGAIN
9226  */
9227 int btrfs_drop_snapshot(struct btrfs_root *root,
9228                          struct btrfs_block_rsv *block_rsv, int update_ref,
9229                          int for_reloc)
9230 {
9231         struct btrfs_fs_info *fs_info = root->fs_info;
9232         struct btrfs_path *path;
9233         struct btrfs_trans_handle *trans;
9234         struct btrfs_root *tree_root = fs_info->tree_root;
9235         struct btrfs_root_item *root_item = &root->root_item;
9236         struct walk_control *wc;
9237         struct btrfs_key key;
9238         int err = 0;
9239         int ret;
9240         int level;
9241         bool root_dropped = false;
9242
9243         btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
9244
9245         path = btrfs_alloc_path();
9246         if (!path) {
9247                 err = -ENOMEM;
9248                 goto out;
9249         }
9250
9251         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9252         if (!wc) {
9253                 btrfs_free_path(path);
9254                 err = -ENOMEM;
9255                 goto out;
9256         }
9257
9258         trans = btrfs_start_transaction(tree_root, 0);
9259         if (IS_ERR(trans)) {
9260                 err = PTR_ERR(trans);
9261                 goto out_free;
9262         }
9263
9264         if (block_rsv)
9265                 trans->block_rsv = block_rsv;
9266
9267         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
9268                 level = btrfs_header_level(root->node);
9269                 path->nodes[level] = btrfs_lock_root_node(root);
9270                 btrfs_set_lock_blocking(path->nodes[level]);
9271                 path->slots[level] = 0;
9272                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9273                 memset(&wc->update_progress, 0,
9274                        sizeof(wc->update_progress));
9275         } else {
9276                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
9277                 memcpy(&wc->update_progress, &key,
9278                        sizeof(wc->update_progress));
9279
9280                 level = root_item->drop_level;
9281                 BUG_ON(level == 0);
9282                 path->lowest_level = level;
9283                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9284                 path->lowest_level = 0;
9285                 if (ret < 0) {
9286                         err = ret;
9287                         goto out_end_trans;
9288                 }
9289                 WARN_ON(ret > 0);
9290
9291                 /*
9292                  * unlock our path, this is safe because only this
9293                  * function is allowed to delete this snapshot
9294                  */
9295                 btrfs_unlock_up_safe(path, 0);
9296
9297                 level = btrfs_header_level(root->node);
9298                 while (1) {
9299                         btrfs_tree_lock(path->nodes[level]);
9300                         btrfs_set_lock_blocking(path->nodes[level]);
9301                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9302
9303                         ret = btrfs_lookup_extent_info(trans, root,
9304                                                 path->nodes[level]->start,
9305                                                 level, 1, &wc->refs[level],
9306                                                 &wc->flags[level]);
9307                         if (ret < 0) {
9308                                 err = ret;
9309                                 goto out_end_trans;
9310                         }
9311                         BUG_ON(wc->refs[level] == 0);
9312
9313                         if (level == root_item->drop_level)
9314                                 break;
9315
9316                         btrfs_tree_unlock(path->nodes[level]);
9317                         path->locks[level] = 0;
9318                         WARN_ON(wc->refs[level] != 1);
9319                         level--;
9320                 }
9321         }
9322
9323         wc->level = level;
9324         wc->shared_level = -1;
9325         wc->stage = DROP_REFERENCE;
9326         wc->update_ref = update_ref;
9327         wc->keep_locks = 0;
9328         wc->for_reloc = for_reloc;
9329         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9330
9331         while (1) {
9332
9333                 ret = walk_down_tree(trans, root, path, wc);
9334                 if (ret < 0) {
9335                         err = ret;
9336                         break;
9337                 }
9338
9339                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
9340                 if (ret < 0) {
9341                         err = ret;
9342                         break;
9343                 }
9344
9345                 if (ret > 0) {
9346                         BUG_ON(wc->stage != DROP_REFERENCE);
9347                         break;
9348                 }
9349
9350                 if (wc->stage == DROP_REFERENCE) {
9351                         level = wc->level;
9352                         btrfs_node_key(path->nodes[level],
9353                                        &root_item->drop_progress,
9354                                        path->slots[level]);
9355                         root_item->drop_level = level;
9356                 }
9357
9358                 BUG_ON(wc->level == 0);
9359                 if (btrfs_should_end_transaction(trans, tree_root) ||
9360                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
9361                         ret = btrfs_update_root(trans, tree_root,
9362                                                 &root->root_key,
9363                                                 root_item);
9364                         if (ret) {
9365                                 btrfs_abort_transaction(trans, ret);
9366                                 err = ret;
9367                                 goto out_end_trans;
9368                         }
9369
9370                         btrfs_end_transaction_throttle(trans, tree_root);
9371                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
9372                                 btrfs_debug(fs_info,
9373                                             "drop snapshot early exit");
9374                                 err = -EAGAIN;
9375                                 goto out_free;
9376                         }
9377
9378                         trans = btrfs_start_transaction(tree_root, 0);
9379                         if (IS_ERR(trans)) {
9380                                 err = PTR_ERR(trans);
9381                                 goto out_free;
9382                         }
9383                         if (block_rsv)
9384                                 trans->block_rsv = block_rsv;
9385                 }
9386         }
9387         btrfs_release_path(path);
9388         if (err)
9389                 goto out_end_trans;
9390
9391         ret = btrfs_del_root(trans, tree_root, &root->root_key);
9392         if (ret) {
9393                 btrfs_abort_transaction(trans, ret);
9394                 err = ret;
9395                 goto out_end_trans;
9396         }
9397
9398         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9399                 ret = btrfs_find_root(tree_root, &root->root_key, path,
9400                                       NULL, NULL);
9401                 if (ret < 0) {
9402                         btrfs_abort_transaction(trans, ret);
9403                         err = ret;
9404                         goto out_end_trans;
9405                 } else if (ret > 0) {
9406                         /* if we fail to delete the orphan item this time
9407                          * around, it'll get picked up the next time.
9408                          *
9409                          * The most common failure here is just -ENOENT.
9410                          */
9411                         btrfs_del_orphan_item(trans, tree_root,
9412                                               root->root_key.objectid);
9413                 }
9414         }
9415
9416         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9417                 btrfs_add_dropped_root(trans, root);
9418         } else {
9419                 free_extent_buffer(root->node);
9420                 free_extent_buffer(root->commit_root);
9421                 btrfs_put_fs_root(root);
9422         }
9423         root_dropped = true;
9424 out_end_trans:
9425         btrfs_end_transaction_throttle(trans, tree_root);
9426 out_free:
9427         kfree(wc);
9428         btrfs_free_path(path);
9429 out:
9430         /*
9431          * So if we need to stop dropping the snapshot for whatever reason we
9432          * need to make sure to add it back to the dead root list so that we
9433          * keep trying to do the work later.  This also cleans up roots if we
9434          * don't have it in the radix (like when we recover after a power fail
9435          * or unmount) so we don't leak memory.
9436          */
9437         if (!for_reloc && root_dropped == false)
9438                 btrfs_add_dead_root(root);
9439         return err;
9440 }
9441
9442 /*
9443  * drop subtree rooted at tree block 'node'.
9444  *
9445  * NOTE: this function will unlock and release tree block 'node'
9446  * only used by relocation code
9447  */
9448 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9449                         struct btrfs_root *root,
9450                         struct extent_buffer *node,
9451                         struct extent_buffer *parent)
9452 {
9453         struct btrfs_path *path;
9454         struct walk_control *wc;
9455         int level;
9456         int parent_level;
9457         int ret = 0;
9458         int wret;
9459
9460         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9461
9462         path = btrfs_alloc_path();
9463         if (!path)
9464                 return -ENOMEM;
9465
9466         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9467         if (!wc) {
9468                 btrfs_free_path(path);
9469                 return -ENOMEM;
9470         }
9471
9472         btrfs_assert_tree_locked(parent);
9473         parent_level = btrfs_header_level(parent);
9474         extent_buffer_get(parent);
9475         path->nodes[parent_level] = parent;
9476         path->slots[parent_level] = btrfs_header_nritems(parent);
9477
9478         btrfs_assert_tree_locked(node);
9479         level = btrfs_header_level(node);
9480         path->nodes[level] = node;
9481         path->slots[level] = 0;
9482         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9483
9484         wc->refs[parent_level] = 1;
9485         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9486         wc->level = level;
9487         wc->shared_level = -1;
9488         wc->stage = DROP_REFERENCE;
9489         wc->update_ref = 0;
9490         wc->keep_locks = 1;
9491         wc->for_reloc = 1;
9492         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9493
9494         while (1) {
9495                 wret = walk_down_tree(trans, root, path, wc);
9496                 if (wret < 0) {
9497                         ret = wret;
9498                         break;
9499                 }
9500
9501                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9502                 if (wret < 0)
9503                         ret = wret;
9504                 if (wret != 0)
9505                         break;
9506         }
9507
9508         kfree(wc);
9509         btrfs_free_path(path);
9510         return ret;
9511 }
9512
9513 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9514 {
9515         u64 num_devices;
9516         u64 stripped;
9517
9518         /*
9519          * if restripe for this chunk_type is on pick target profile and
9520          * return, otherwise do the usual balance
9521          */
9522         stripped = get_restripe_target(root->fs_info, flags);
9523         if (stripped)
9524                 return extended_to_chunk(stripped);
9525
9526         num_devices = root->fs_info->fs_devices->rw_devices;
9527
9528         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9529                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9530                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9531
9532         if (num_devices == 1) {
9533                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9534                 stripped = flags & ~stripped;
9535
9536                 /* turn raid0 into single device chunks */
9537                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9538                         return stripped;
9539
9540                 /* turn mirroring into duplication */
9541                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9542                              BTRFS_BLOCK_GROUP_RAID10))
9543                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9544         } else {
9545                 /* they already had raid on here, just return */
9546                 if (flags & stripped)
9547                         return flags;
9548
9549                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9550                 stripped = flags & ~stripped;
9551
9552                 /* switch duplicated blocks with raid1 */
9553                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9554                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9555
9556                 /* this is drive concat, leave it alone */
9557         }
9558
9559         return flags;
9560 }
9561
9562 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9563 {
9564         struct btrfs_space_info *sinfo = cache->space_info;
9565         u64 num_bytes;
9566         u64 min_allocable_bytes;
9567         int ret = -ENOSPC;
9568
9569         /*
9570          * We need some metadata space and system metadata space for
9571          * allocating chunks in some corner cases until we force to set
9572          * it to be readonly.
9573          */
9574         if ((sinfo->flags &
9575              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9576             !force)
9577                 min_allocable_bytes = SZ_1M;
9578         else
9579                 min_allocable_bytes = 0;
9580
9581         spin_lock(&sinfo->lock);
9582         spin_lock(&cache->lock);
9583
9584         if (cache->ro) {
9585                 cache->ro++;
9586                 ret = 0;
9587                 goto out;
9588         }
9589
9590         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9591                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9592
9593         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9594             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9595             min_allocable_bytes <= sinfo->total_bytes) {
9596                 sinfo->bytes_readonly += num_bytes;
9597                 cache->ro++;
9598                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9599                 ret = 0;
9600         }
9601 out:
9602         spin_unlock(&cache->lock);
9603         spin_unlock(&sinfo->lock);
9604         return ret;
9605 }
9606
9607 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9608                              struct btrfs_block_group_cache *cache)
9609
9610 {
9611         struct btrfs_trans_handle *trans;
9612         u64 alloc_flags;
9613         int ret;
9614
9615 again:
9616         trans = btrfs_join_transaction(root);
9617         if (IS_ERR(trans))
9618                 return PTR_ERR(trans);
9619
9620         /*
9621          * we're not allowed to set block groups readonly after the dirty
9622          * block groups cache has started writing.  If it already started,
9623          * back off and let this transaction commit
9624          */
9625         mutex_lock(&root->fs_info->ro_block_group_mutex);
9626         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9627                 u64 transid = trans->transid;
9628
9629                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9630                 btrfs_end_transaction(trans, root);
9631
9632                 ret = btrfs_wait_for_commit(root, transid);
9633                 if (ret)
9634                         return ret;
9635                 goto again;
9636         }
9637
9638         /*
9639          * if we are changing raid levels, try to allocate a corresponding
9640          * block group with the new raid level.
9641          */
9642         alloc_flags = update_block_group_flags(root, cache->flags);
9643         if (alloc_flags != cache->flags) {
9644                 ret = do_chunk_alloc(trans, root, alloc_flags,
9645                                      CHUNK_ALLOC_FORCE);
9646                 /*
9647                  * ENOSPC is allowed here, we may have enough space
9648                  * already allocated at the new raid level to
9649                  * carry on
9650                  */
9651                 if (ret == -ENOSPC)
9652                         ret = 0;
9653                 if (ret < 0)
9654                         goto out;
9655         }
9656
9657         ret = inc_block_group_ro(cache, 0);
9658         if (!ret)
9659                 goto out;
9660         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9661         ret = do_chunk_alloc(trans, root, alloc_flags,
9662                              CHUNK_ALLOC_FORCE);
9663         if (ret < 0)
9664                 goto out;
9665         ret = inc_block_group_ro(cache, 0);
9666 out:
9667         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9668                 alloc_flags = update_block_group_flags(root, cache->flags);
9669                 lock_chunks(root->fs_info->chunk_root);
9670                 check_system_chunk(trans, root, alloc_flags);
9671                 unlock_chunks(root->fs_info->chunk_root);
9672         }
9673         mutex_unlock(&root->fs_info->ro_block_group_mutex);
9674
9675         btrfs_end_transaction(trans, root);
9676         return ret;
9677 }
9678
9679 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9680                             struct btrfs_root *root, u64 type)
9681 {
9682         u64 alloc_flags = get_alloc_profile(root, type);
9683         return do_chunk_alloc(trans, root, alloc_flags,
9684                               CHUNK_ALLOC_FORCE);
9685 }
9686
9687 /*
9688  * helper to account the unused space of all the readonly block group in the
9689  * space_info. takes mirrors into account.
9690  */
9691 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9692 {
9693         struct btrfs_block_group_cache *block_group;
9694         u64 free_bytes = 0;
9695         int factor;
9696
9697         /* It's df, we don't care if it's racy */
9698         if (list_empty(&sinfo->ro_bgs))
9699                 return 0;
9700
9701         spin_lock(&sinfo->lock);
9702         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9703                 spin_lock(&block_group->lock);
9704
9705                 if (!block_group->ro) {
9706                         spin_unlock(&block_group->lock);
9707                         continue;
9708                 }
9709
9710                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9711                                           BTRFS_BLOCK_GROUP_RAID10 |
9712                                           BTRFS_BLOCK_GROUP_DUP))
9713                         factor = 2;
9714                 else
9715                         factor = 1;
9716
9717                 free_bytes += (block_group->key.offset -
9718                                btrfs_block_group_used(&block_group->item)) *
9719                                factor;
9720
9721                 spin_unlock(&block_group->lock);
9722         }
9723         spin_unlock(&sinfo->lock);
9724
9725         return free_bytes;
9726 }
9727
9728 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9729                               struct btrfs_block_group_cache *cache)
9730 {
9731         struct btrfs_space_info *sinfo = cache->space_info;
9732         u64 num_bytes;
9733
9734         BUG_ON(!cache->ro);
9735
9736         spin_lock(&sinfo->lock);
9737         spin_lock(&cache->lock);
9738         if (!--cache->ro) {
9739                 num_bytes = cache->key.offset - cache->reserved -
9740                             cache->pinned - cache->bytes_super -
9741                             btrfs_block_group_used(&cache->item);
9742                 sinfo->bytes_readonly -= num_bytes;
9743                 list_del_init(&cache->ro_list);
9744         }
9745         spin_unlock(&cache->lock);
9746         spin_unlock(&sinfo->lock);
9747 }
9748
9749 /*
9750  * checks to see if its even possible to relocate this block group.
9751  *
9752  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9753  * ok to go ahead and try.
9754  */
9755 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9756 {
9757         struct btrfs_block_group_cache *block_group;
9758         struct btrfs_space_info *space_info;
9759         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9760         struct btrfs_device *device;
9761         struct btrfs_trans_handle *trans;
9762         u64 min_free;
9763         u64 dev_min = 1;
9764         u64 dev_nr = 0;
9765         u64 target;
9766         int debug;
9767         int index;
9768         int full = 0;
9769         int ret = 0;
9770
9771         debug = btrfs_test_opt(root->fs_info, ENOSPC_DEBUG);
9772
9773         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9774
9775         /* odd, couldn't find the block group, leave it alone */
9776         if (!block_group) {
9777                 if (debug)
9778                         btrfs_warn(root->fs_info,
9779                                    "can't find block group for bytenr %llu",
9780                                    bytenr);
9781                 return -1;
9782         }
9783
9784         min_free = btrfs_block_group_used(&block_group->item);
9785
9786         /* no bytes used, we're good */
9787         if (!min_free)
9788                 goto out;
9789
9790         space_info = block_group->space_info;
9791         spin_lock(&space_info->lock);
9792
9793         full = space_info->full;
9794
9795         /*
9796          * if this is the last block group we have in this space, we can't
9797          * relocate it unless we're able to allocate a new chunk below.
9798          *
9799          * Otherwise, we need to make sure we have room in the space to handle
9800          * all of the extents from this block group.  If we can, we're good
9801          */
9802         if ((space_info->total_bytes != block_group->key.offset) &&
9803             (space_info->bytes_used + space_info->bytes_reserved +
9804              space_info->bytes_pinned + space_info->bytes_readonly +
9805              min_free < space_info->total_bytes)) {
9806                 spin_unlock(&space_info->lock);
9807                 goto out;
9808         }
9809         spin_unlock(&space_info->lock);
9810
9811         /*
9812          * ok we don't have enough space, but maybe we have free space on our
9813          * devices to allocate new chunks for relocation, so loop through our
9814          * alloc devices and guess if we have enough space.  if this block
9815          * group is going to be restriped, run checks against the target
9816          * profile instead of the current one.
9817          */
9818         ret = -1;
9819
9820         /*
9821          * index:
9822          *      0: raid10
9823          *      1: raid1
9824          *      2: dup
9825          *      3: raid0
9826          *      4: single
9827          */
9828         target = get_restripe_target(root->fs_info, block_group->flags);
9829         if (target) {
9830                 index = __get_raid_index(extended_to_chunk(target));
9831         } else {
9832                 /*
9833                  * this is just a balance, so if we were marked as full
9834                  * we know there is no space for a new chunk
9835                  */
9836                 if (full) {
9837                         if (debug)
9838                                 btrfs_warn(root->fs_info,
9839                                         "no space to alloc new chunk for block group %llu",
9840                                         block_group->key.objectid);
9841                         goto out;
9842                 }
9843
9844                 index = get_block_group_index(block_group);
9845         }
9846
9847         if (index == BTRFS_RAID_RAID10) {
9848                 dev_min = 4;
9849                 /* Divide by 2 */
9850                 min_free >>= 1;
9851         } else if (index == BTRFS_RAID_RAID1) {
9852                 dev_min = 2;
9853         } else if (index == BTRFS_RAID_DUP) {
9854                 /* Multiply by 2 */
9855                 min_free <<= 1;
9856         } else if (index == BTRFS_RAID_RAID0) {
9857                 dev_min = fs_devices->rw_devices;
9858                 min_free = div64_u64(min_free, dev_min);
9859         }
9860
9861         /* We need to do this so that we can look at pending chunks */
9862         trans = btrfs_join_transaction(root);
9863         if (IS_ERR(trans)) {
9864                 ret = PTR_ERR(trans);
9865                 goto out;
9866         }
9867
9868         mutex_lock(&root->fs_info->chunk_mutex);
9869         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9870                 u64 dev_offset;
9871
9872                 /*
9873                  * check to make sure we can actually find a chunk with enough
9874                  * space to fit our block group in.
9875                  */
9876                 if (device->total_bytes > device->bytes_used + min_free &&
9877                     !device->is_tgtdev_for_dev_replace) {
9878                         ret = find_free_dev_extent(trans, device, min_free,
9879                                                    &dev_offset, NULL);
9880                         if (!ret)
9881                                 dev_nr++;
9882
9883                         if (dev_nr >= dev_min)
9884                                 break;
9885
9886                         ret = -1;
9887                 }
9888         }
9889         if (debug && ret == -1)
9890                 btrfs_warn(root->fs_info,
9891                         "no space to allocate a new chunk for block group %llu",
9892                         block_group->key.objectid);
9893         mutex_unlock(&root->fs_info->chunk_mutex);
9894         btrfs_end_transaction(trans, root);
9895 out:
9896         btrfs_put_block_group(block_group);
9897         return ret;
9898 }
9899
9900 static int find_first_block_group(struct btrfs_root *root,
9901                 struct btrfs_path *path, struct btrfs_key *key)
9902 {
9903         int ret = 0;
9904         struct btrfs_key found_key;
9905         struct extent_buffer *leaf;
9906         struct btrfs_block_group_item bg;
9907         u64 flags;
9908         int slot;
9909
9910         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9911         if (ret < 0)
9912                 goto out;
9913
9914         while (1) {
9915                 slot = path->slots[0];
9916                 leaf = path->nodes[0];
9917                 if (slot >= btrfs_header_nritems(leaf)) {
9918                         ret = btrfs_next_leaf(root, path);
9919                         if (ret == 0)
9920                                 continue;
9921                         if (ret < 0)
9922                                 goto out;
9923                         break;
9924                 }
9925                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9926
9927                 if (found_key.objectid >= key->objectid &&
9928                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9929                         struct extent_map_tree *em_tree;
9930                         struct extent_map *em;
9931
9932                         em_tree = &root->fs_info->mapping_tree.map_tree;
9933                         read_lock(&em_tree->lock);
9934                         em = lookup_extent_mapping(em_tree, found_key.objectid,
9935                                                    found_key.offset);
9936                         read_unlock(&em_tree->lock);
9937                         if (!em) {
9938                                 btrfs_err(root->fs_info,
9939                         "logical %llu len %llu found bg but no related chunk",
9940                                           found_key.objectid, found_key.offset);
9941                                 ret = -ENOENT;
9942                         } else if (em->start != found_key.objectid ||
9943                                    em->len != found_key.offset) {
9944                                 btrfs_err(root->fs_info,
9945                 "block group %llu len %llu mismatch with chunk %llu len %llu",
9946                                           found_key.objectid, found_key.offset,
9947                                           em->start, em->len);
9948                                 ret = -EUCLEAN;
9949                         } else {
9950                                 read_extent_buffer(leaf, &bg,
9951                                         btrfs_item_ptr_offset(leaf, slot),
9952                                         sizeof(bg));
9953                                 flags = btrfs_block_group_flags(&bg) &
9954                                         BTRFS_BLOCK_GROUP_TYPE_MASK;
9955
9956                                 if (flags != (em->map_lookup->type &
9957                                               BTRFS_BLOCK_GROUP_TYPE_MASK)) {
9958                                         btrfs_err(root->fs_info,
9959 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
9960                                                 found_key.objectid,
9961                                                 found_key.offset, flags,
9962                                                 (BTRFS_BLOCK_GROUP_TYPE_MASK &
9963                                                  em->map_lookup->type));
9964                                         ret = -EUCLEAN;
9965                                 } else {
9966                                         ret = 0;
9967                                 }
9968                         }
9969                         free_extent_map(em);
9970                         goto out;
9971                 }
9972                 path->slots[0]++;
9973         }
9974 out:
9975         return ret;
9976 }
9977
9978 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9979 {
9980         struct btrfs_block_group_cache *block_group;
9981         u64 last = 0;
9982
9983         while (1) {
9984                 struct inode *inode;
9985
9986                 block_group = btrfs_lookup_first_block_group(info, last);
9987                 while (block_group) {
9988                         wait_block_group_cache_done(block_group);
9989                         spin_lock(&block_group->lock);
9990                         if (block_group->iref)
9991                                 break;
9992                         spin_unlock(&block_group->lock);
9993                         block_group = next_block_group(info->tree_root,
9994                                                        block_group);
9995                 }
9996                 if (!block_group) {
9997                         if (last == 0)
9998                                 break;
9999                         last = 0;
10000                         continue;
10001                 }
10002
10003                 inode = block_group->inode;
10004                 block_group->iref = 0;
10005                 block_group->inode = NULL;
10006                 spin_unlock(&block_group->lock);
10007                 ASSERT(block_group->io_ctl.inode == NULL);
10008                 iput(inode);
10009                 last = block_group->key.objectid + block_group->key.offset;
10010                 btrfs_put_block_group(block_group);
10011         }
10012 }
10013
10014 int btrfs_free_block_groups(struct btrfs_fs_info *info)
10015 {
10016         struct btrfs_block_group_cache *block_group;
10017         struct btrfs_space_info *space_info;
10018         struct btrfs_caching_control *caching_ctl;
10019         struct rb_node *n;
10020
10021         down_write(&info->commit_root_sem);
10022         while (!list_empty(&info->caching_block_groups)) {
10023                 caching_ctl = list_entry(info->caching_block_groups.next,
10024                                          struct btrfs_caching_control, list);
10025                 list_del(&caching_ctl->list);
10026                 put_caching_control(caching_ctl);
10027         }
10028         up_write(&info->commit_root_sem);
10029
10030         spin_lock(&info->unused_bgs_lock);
10031         while (!list_empty(&info->unused_bgs)) {
10032                 block_group = list_first_entry(&info->unused_bgs,
10033                                                struct btrfs_block_group_cache,
10034                                                bg_list);
10035                 list_del_init(&block_group->bg_list);
10036                 btrfs_put_block_group(block_group);
10037         }
10038         spin_unlock(&info->unused_bgs_lock);
10039
10040         spin_lock(&info->block_group_cache_lock);
10041         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
10042                 block_group = rb_entry(n, struct btrfs_block_group_cache,
10043                                        cache_node);
10044                 rb_erase(&block_group->cache_node,
10045                          &info->block_group_cache_tree);
10046                 RB_CLEAR_NODE(&block_group->cache_node);
10047                 spin_unlock(&info->block_group_cache_lock);
10048
10049                 down_write(&block_group->space_info->groups_sem);
10050                 list_del(&block_group->list);
10051                 up_write(&block_group->space_info->groups_sem);
10052
10053                 if (block_group->cached == BTRFS_CACHE_STARTED)
10054                         wait_block_group_cache_done(block_group);
10055
10056                 /*
10057                  * We haven't cached this block group, which means we could
10058                  * possibly have excluded extents on this block group.
10059                  */
10060                 if (block_group->cached == BTRFS_CACHE_NO ||
10061                     block_group->cached == BTRFS_CACHE_ERROR)
10062                         free_excluded_extents(info->extent_root, block_group);
10063
10064                 btrfs_remove_free_space_cache(block_group);
10065                 ASSERT(list_empty(&block_group->dirty_list));
10066                 ASSERT(list_empty(&block_group->io_list));
10067                 ASSERT(list_empty(&block_group->bg_list));
10068                 ASSERT(atomic_read(&block_group->count) == 1);
10069                 btrfs_put_block_group(block_group);
10070
10071                 spin_lock(&info->block_group_cache_lock);
10072         }
10073         spin_unlock(&info->block_group_cache_lock);
10074
10075         /* now that all the block groups are freed, go through and
10076          * free all the space_info structs.  This is only called during
10077          * the final stages of unmount, and so we know nobody is
10078          * using them.  We call synchronize_rcu() once before we start,
10079          * just to be on the safe side.
10080          */
10081         synchronize_rcu();
10082
10083         release_global_block_rsv(info);
10084
10085         while (!list_empty(&info->space_info)) {
10086                 int i;
10087
10088                 space_info = list_entry(info->space_info.next,
10089                                         struct btrfs_space_info,
10090                                         list);
10091
10092                 /*
10093                  * Do not hide this behind enospc_debug, this is actually
10094                  * important and indicates a real bug if this happens.
10095                  */
10096                 if (WARN_ON(space_info->bytes_pinned > 0 ||
10097                             space_info->bytes_reserved > 0 ||
10098                             space_info->bytes_may_use > 0))
10099                         dump_space_info(info, space_info, 0, 0);
10100                 list_del(&space_info->list);
10101                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
10102                         struct kobject *kobj;
10103                         kobj = space_info->block_group_kobjs[i];
10104                         space_info->block_group_kobjs[i] = NULL;
10105                         if (kobj) {
10106                                 kobject_del(kobj);
10107                                 kobject_put(kobj);
10108                         }
10109                 }
10110                 kobject_del(&space_info->kobj);
10111                 kobject_put(&space_info->kobj);
10112         }
10113         return 0;
10114 }
10115
10116 static void __link_block_group(struct btrfs_space_info *space_info,
10117                                struct btrfs_block_group_cache *cache)
10118 {
10119         int index = get_block_group_index(cache);
10120         bool first = false;
10121
10122         down_write(&space_info->groups_sem);
10123         if (list_empty(&space_info->block_groups[index]))
10124                 first = true;
10125         list_add_tail(&cache->list, &space_info->block_groups[index]);
10126         up_write(&space_info->groups_sem);
10127
10128         if (first) {
10129                 struct raid_kobject *rkobj;
10130                 int ret;
10131
10132                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
10133                 if (!rkobj)
10134                         goto out_err;
10135                 rkobj->raid_type = index;
10136                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
10137                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
10138                                   "%s", get_raid_name(index));
10139                 if (ret) {
10140                         kobject_put(&rkobj->kobj);
10141                         goto out_err;
10142                 }
10143                 space_info->block_group_kobjs[index] = &rkobj->kobj;
10144         }
10145
10146         return;
10147 out_err:
10148         btrfs_warn(cache->fs_info,
10149                    "failed to add kobject for block cache, ignoring");
10150 }
10151
10152 static struct btrfs_block_group_cache *
10153 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
10154 {
10155         struct btrfs_block_group_cache *cache;
10156
10157         cache = kzalloc(sizeof(*cache), GFP_NOFS);
10158         if (!cache)
10159                 return NULL;
10160
10161         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
10162                                         GFP_NOFS);
10163         if (!cache->free_space_ctl) {
10164                 kfree(cache);
10165                 return NULL;
10166         }
10167
10168         cache->key.objectid = start;
10169         cache->key.offset = size;
10170         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10171
10172         cache->sectorsize = root->sectorsize;
10173         cache->fs_info = root->fs_info;
10174         cache->full_stripe_len = btrfs_full_stripe_len(root,
10175                                                &root->fs_info->mapping_tree,
10176                                                start);
10177         set_free_space_tree_thresholds(cache);
10178
10179         atomic_set(&cache->count, 1);
10180         spin_lock_init(&cache->lock);
10181         init_rwsem(&cache->data_rwsem);
10182         INIT_LIST_HEAD(&cache->list);
10183         INIT_LIST_HEAD(&cache->cluster_list);
10184         INIT_LIST_HEAD(&cache->bg_list);
10185         INIT_LIST_HEAD(&cache->ro_list);
10186         INIT_LIST_HEAD(&cache->dirty_list);
10187         INIT_LIST_HEAD(&cache->io_list);
10188         btrfs_init_free_space_ctl(cache);
10189         atomic_set(&cache->trimming, 0);
10190         mutex_init(&cache->free_space_lock);
10191
10192         return cache;
10193 }
10194
10195
10196 /*
10197  * Iterate all chunks and verify that each of them has the corresponding block
10198  * group
10199  */
10200 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
10201 {
10202         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
10203         struct extent_map *em;
10204         struct btrfs_block_group_cache *bg;
10205         u64 start = 0;
10206         int ret = 0;
10207
10208         while (1) {
10209                 read_lock(&map_tree->map_tree.lock);
10210                 /*
10211                  * lookup_extent_mapping will return the first extent map
10212                  * intersecting the range, so setting @len to 1 is enough to
10213                  * get the first chunk.
10214                  */
10215                 em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
10216                 read_unlock(&map_tree->map_tree.lock);
10217                 if (!em)
10218                         break;
10219
10220                 bg = btrfs_lookup_block_group(fs_info, em->start);
10221                 if (!bg) {
10222                         btrfs_err(fs_info,
10223         "chunk start=%llu len=%llu doesn't have corresponding block group",
10224                                      em->start, em->len);
10225                         ret = -EUCLEAN;
10226                         free_extent_map(em);
10227                         break;
10228                 }
10229                 if (bg->key.objectid != em->start ||
10230                     bg->key.offset != em->len ||
10231                     (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
10232                     (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
10233                         btrfs_err(fs_info,
10234 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
10235                                 em->start, em->len,
10236                                 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
10237                                 bg->key.objectid, bg->key.offset,
10238                                 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
10239                         ret = -EUCLEAN;
10240                         free_extent_map(em);
10241                         btrfs_put_block_group(bg);
10242                         break;
10243                 }
10244                 start = em->start + em->len;
10245                 free_extent_map(em);
10246                 btrfs_put_block_group(bg);
10247         }
10248         return ret;
10249 }
10250
10251 int btrfs_read_block_groups(struct btrfs_root *root)
10252 {
10253         struct btrfs_path *path;
10254         int ret;
10255         struct btrfs_block_group_cache *cache;
10256         struct btrfs_fs_info *info = root->fs_info;
10257         struct btrfs_space_info *space_info;
10258         struct btrfs_key key;
10259         struct btrfs_key found_key;
10260         struct extent_buffer *leaf;
10261         int need_clear = 0;
10262         u64 cache_gen;
10263         u64 feature;
10264         int mixed;
10265
10266         feature = btrfs_super_incompat_flags(info->super_copy);
10267         mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
10268
10269         root = info->extent_root;
10270         key.objectid = 0;
10271         key.offset = 0;
10272         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10273         path = btrfs_alloc_path();
10274         if (!path)
10275                 return -ENOMEM;
10276         path->reada = READA_FORWARD;
10277
10278         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
10279         if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
10280             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
10281                 need_clear = 1;
10282         if (btrfs_test_opt(root->fs_info, CLEAR_CACHE))
10283                 need_clear = 1;
10284
10285         while (1) {
10286                 ret = find_first_block_group(root, path, &key);
10287                 if (ret > 0)
10288                         break;
10289                 if (ret != 0)
10290                         goto error;
10291
10292                 leaf = path->nodes[0];
10293                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
10294
10295                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
10296                                                        found_key.offset);
10297                 if (!cache) {
10298                         ret = -ENOMEM;
10299                         goto error;
10300                 }
10301
10302                 if (need_clear) {
10303                         /*
10304                          * When we mount with old space cache, we need to
10305                          * set BTRFS_DC_CLEAR and set dirty flag.
10306                          *
10307                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
10308                          *    truncate the old free space cache inode and
10309                          *    setup a new one.
10310                          * b) Setting 'dirty flag' makes sure that we flush
10311                          *    the new space cache info onto disk.
10312                          */
10313                         if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
10314                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
10315                 }
10316
10317                 read_extent_buffer(leaf, &cache->item,
10318                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
10319                                    sizeof(cache->item));
10320                 cache->flags = btrfs_block_group_flags(&cache->item);
10321                 if (!mixed &&
10322                     ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
10323                     (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
10324                         btrfs_err(info,
10325 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
10326                                   cache->key.objectid);
10327                         btrfs_put_block_group(cache);
10328                         ret = -EINVAL;
10329                         goto error;
10330                 }
10331
10332                 key.objectid = found_key.objectid + found_key.offset;
10333                 btrfs_release_path(path);
10334
10335                 /*
10336                  * We need to exclude the super stripes now so that the space
10337                  * info has super bytes accounted for, otherwise we'll think
10338                  * we have more space than we actually do.
10339                  */
10340                 ret = exclude_super_stripes(root, cache);
10341                 if (ret) {
10342                         /*
10343                          * We may have excluded something, so call this just in
10344                          * case.
10345                          */
10346                         free_excluded_extents(root, cache);
10347                         btrfs_put_block_group(cache);
10348                         goto error;
10349                 }
10350
10351                 /*
10352                  * check for two cases, either we are full, and therefore
10353                  * don't need to bother with the caching work since we won't
10354                  * find any space, or we are empty, and we can just add all
10355                  * the space in and be done with it.  This saves us _alot_ of
10356                  * time, particularly in the full case.
10357                  */
10358                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
10359                         cache->last_byte_to_unpin = (u64)-1;
10360                         cache->cached = BTRFS_CACHE_FINISHED;
10361                         free_excluded_extents(root, cache);
10362                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10363                         cache->last_byte_to_unpin = (u64)-1;
10364                         cache->cached = BTRFS_CACHE_FINISHED;
10365                         add_new_free_space(cache, root->fs_info,
10366                                            found_key.objectid,
10367                                            found_key.objectid +
10368                                            found_key.offset);
10369                         free_excluded_extents(root, cache);
10370                 }
10371
10372                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
10373                 if (ret) {
10374                         btrfs_remove_free_space_cache(cache);
10375                         btrfs_put_block_group(cache);
10376                         goto error;
10377                 }
10378
10379                 trace_btrfs_add_block_group(root->fs_info, cache, 0);
10380                 ret = update_space_info(info, cache->flags, found_key.offset,
10381                                         btrfs_block_group_used(&cache->item),
10382                                         cache->bytes_super, &space_info);
10383                 if (ret) {
10384                         btrfs_remove_free_space_cache(cache);
10385                         spin_lock(&info->block_group_cache_lock);
10386                         rb_erase(&cache->cache_node,
10387                                  &info->block_group_cache_tree);
10388                         RB_CLEAR_NODE(&cache->cache_node);
10389                         spin_unlock(&info->block_group_cache_lock);
10390                         btrfs_put_block_group(cache);
10391                         goto error;
10392                 }
10393
10394                 cache->space_info = space_info;
10395
10396                 __link_block_group(space_info, cache);
10397
10398                 set_avail_alloc_bits(root->fs_info, cache->flags);
10399                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
10400                         inc_block_group_ro(cache, 1);
10401                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10402                         spin_lock(&info->unused_bgs_lock);
10403                         /* Should always be true but just in case. */
10404                         if (list_empty(&cache->bg_list)) {
10405                                 btrfs_get_block_group(cache);
10406                                 list_add_tail(&cache->bg_list,
10407                                               &info->unused_bgs);
10408                         }
10409                         spin_unlock(&info->unused_bgs_lock);
10410                 }
10411         }
10412
10413         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
10414                 if (!(get_alloc_profile(root, space_info->flags) &
10415                       (BTRFS_BLOCK_GROUP_RAID10 |
10416                        BTRFS_BLOCK_GROUP_RAID1 |
10417                        BTRFS_BLOCK_GROUP_RAID5 |
10418                        BTRFS_BLOCK_GROUP_RAID6 |
10419                        BTRFS_BLOCK_GROUP_DUP)))
10420                         continue;
10421                 /*
10422                  * avoid allocating from un-mirrored block group if there are
10423                  * mirrored block groups.
10424                  */
10425                 list_for_each_entry(cache,
10426                                 &space_info->block_groups[BTRFS_RAID_RAID0],
10427                                 list)
10428                         inc_block_group_ro(cache, 1);
10429                 list_for_each_entry(cache,
10430                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
10431                                 list)
10432                         inc_block_group_ro(cache, 1);
10433         }
10434
10435         init_global_block_rsv(info);
10436         ret = check_chunk_block_group_mappings(info);
10437 error:
10438         btrfs_free_path(path);
10439         return ret;
10440 }
10441
10442 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
10443                                        struct btrfs_root *root)
10444 {
10445         struct btrfs_block_group_cache *block_group;
10446         struct btrfs_root *extent_root = root->fs_info->extent_root;
10447         struct btrfs_block_group_item item;
10448         struct btrfs_key key;
10449         int ret = 0;
10450         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
10451
10452         trans->can_flush_pending_bgs = false;
10453         while (!list_empty(&trans->new_bgs)) {
10454                 block_group = list_first_entry(&trans->new_bgs,
10455                                                struct btrfs_block_group_cache,
10456                                                bg_list);
10457                 if (ret)
10458                         goto next;
10459
10460                 spin_lock(&block_group->lock);
10461                 memcpy(&item, &block_group->item, sizeof(item));
10462                 memcpy(&key, &block_group->key, sizeof(key));
10463                 spin_unlock(&block_group->lock);
10464
10465                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
10466                                         sizeof(item));
10467                 if (ret)
10468                         btrfs_abort_transaction(trans, ret);
10469                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
10470                                                key.objectid, key.offset);
10471                 if (ret)
10472                         btrfs_abort_transaction(trans, ret);
10473                 add_block_group_free_space(trans, root->fs_info, block_group);
10474                 /* already aborted the transaction if it failed. */
10475 next:
10476                 list_del_init(&block_group->bg_list);
10477         }
10478         trans->can_flush_pending_bgs = can_flush_pending_bgs;
10479 }
10480
10481 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
10482                            struct btrfs_root *root, u64 bytes_used,
10483                            u64 type, u64 chunk_objectid, u64 chunk_offset,
10484                            u64 size)
10485 {
10486         int ret;
10487         struct btrfs_root *extent_root;
10488         struct btrfs_block_group_cache *cache;
10489         extent_root = root->fs_info->extent_root;
10490
10491         btrfs_set_log_full_commit(root->fs_info, trans);
10492
10493         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
10494         if (!cache)
10495                 return -ENOMEM;
10496
10497         btrfs_set_block_group_used(&cache->item, bytes_used);
10498         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
10499         btrfs_set_block_group_flags(&cache->item, type);
10500
10501         cache->flags = type;
10502         cache->last_byte_to_unpin = (u64)-1;
10503         cache->cached = BTRFS_CACHE_FINISHED;
10504         cache->needs_free_space = 1;
10505         ret = exclude_super_stripes(root, cache);
10506         if (ret) {
10507                 /*
10508                  * We may have excluded something, so call this just in
10509                  * case.
10510                  */
10511                 free_excluded_extents(root, cache);
10512                 btrfs_put_block_group(cache);
10513                 return ret;
10514         }
10515
10516         add_new_free_space(cache, root->fs_info, chunk_offset,
10517                            chunk_offset + size);
10518
10519         free_excluded_extents(root, cache);
10520
10521 #ifdef CONFIG_BTRFS_DEBUG
10522         if (btrfs_should_fragment_free_space(root, cache)) {
10523                 u64 new_bytes_used = size - bytes_used;
10524
10525                 bytes_used += new_bytes_used >> 1;
10526                 fragment_free_space(root, cache);
10527         }
10528 #endif
10529         /*
10530          * Call to ensure the corresponding space_info object is created and
10531          * assigned to our block group, but don't update its counters just yet.
10532          * We want our bg to be added to the rbtree with its ->space_info set.
10533          */
10534         ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0,
10535                                 &cache->space_info);
10536         if (ret) {
10537                 btrfs_remove_free_space_cache(cache);
10538                 btrfs_put_block_group(cache);
10539                 return ret;
10540         }
10541
10542         ret = btrfs_add_block_group_cache(root->fs_info, cache);
10543         if (ret) {
10544                 btrfs_remove_free_space_cache(cache);
10545                 btrfs_put_block_group(cache);
10546                 return ret;
10547         }
10548
10549         /*
10550          * Now that our block group has its ->space_info set and is inserted in
10551          * the rbtree, update the space info's counters.
10552          */
10553         trace_btrfs_add_block_group(root->fs_info, cache, 1);
10554         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
10555                                 cache->bytes_super, &cache->space_info);
10556         if (ret) {
10557                 btrfs_remove_free_space_cache(cache);
10558                 spin_lock(&root->fs_info->block_group_cache_lock);
10559                 rb_erase(&cache->cache_node,
10560                          &root->fs_info->block_group_cache_tree);
10561                 RB_CLEAR_NODE(&cache->cache_node);
10562                 spin_unlock(&root->fs_info->block_group_cache_lock);
10563                 btrfs_put_block_group(cache);
10564                 return ret;
10565         }
10566         update_global_block_rsv(root->fs_info);
10567
10568         __link_block_group(cache->space_info, cache);
10569
10570         list_add_tail(&cache->bg_list, &trans->new_bgs);
10571
10572         set_avail_alloc_bits(extent_root->fs_info, type);
10573         return 0;
10574 }
10575
10576 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10577 {
10578         u64 extra_flags = chunk_to_extended(flags) &
10579                                 BTRFS_EXTENDED_PROFILE_MASK;
10580
10581         write_seqlock(&fs_info->profiles_lock);
10582         if (flags & BTRFS_BLOCK_GROUP_DATA)
10583                 fs_info->avail_data_alloc_bits &= ~extra_flags;
10584         if (flags & BTRFS_BLOCK_GROUP_METADATA)
10585                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10586         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10587                 fs_info->avail_system_alloc_bits &= ~extra_flags;
10588         write_sequnlock(&fs_info->profiles_lock);
10589 }
10590
10591 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10592                              struct btrfs_root *root, u64 group_start,
10593                              struct extent_map *em)
10594 {
10595         struct btrfs_path *path;
10596         struct btrfs_block_group_cache *block_group;
10597         struct btrfs_free_cluster *cluster;
10598         struct btrfs_root *tree_root = root->fs_info->tree_root;
10599         struct btrfs_key key;
10600         struct inode *inode;
10601         struct kobject *kobj = NULL;
10602         int ret;
10603         int index;
10604         int factor;
10605         struct btrfs_caching_control *caching_ctl = NULL;
10606         bool remove_em;
10607
10608         root = root->fs_info->extent_root;
10609
10610         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
10611         BUG_ON(!block_group);
10612         BUG_ON(!block_group->ro);
10613
10614         /*
10615          * Free the reserved super bytes from this block group before
10616          * remove it.
10617          */
10618         free_excluded_extents(root, block_group);
10619
10620         memcpy(&key, &block_group->key, sizeof(key));
10621         index = get_block_group_index(block_group);
10622         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10623                                   BTRFS_BLOCK_GROUP_RAID1 |
10624                                   BTRFS_BLOCK_GROUP_RAID10))
10625                 factor = 2;
10626         else
10627                 factor = 1;
10628
10629         /* make sure this block group isn't part of an allocation cluster */
10630         cluster = &root->fs_info->data_alloc_cluster;
10631         spin_lock(&cluster->refill_lock);
10632         btrfs_return_cluster_to_free_space(block_group, cluster);
10633         spin_unlock(&cluster->refill_lock);
10634
10635         /*
10636          * make sure this block group isn't part of a metadata
10637          * allocation cluster
10638          */
10639         cluster = &root->fs_info->meta_alloc_cluster;
10640         spin_lock(&cluster->refill_lock);
10641         btrfs_return_cluster_to_free_space(block_group, cluster);
10642         spin_unlock(&cluster->refill_lock);
10643
10644         path = btrfs_alloc_path();
10645         if (!path) {
10646                 ret = -ENOMEM;
10647                 goto out;
10648         }
10649
10650         /*
10651          * get the inode first so any iput calls done for the io_list
10652          * aren't the final iput (no unlinks allowed now)
10653          */
10654         inode = lookup_free_space_inode(tree_root, block_group, path);
10655
10656         mutex_lock(&trans->transaction->cache_write_mutex);
10657         /*
10658          * make sure our free spache cache IO is done before remove the
10659          * free space inode
10660          */
10661         spin_lock(&trans->transaction->dirty_bgs_lock);
10662         if (!list_empty(&block_group->io_list)) {
10663                 list_del_init(&block_group->io_list);
10664
10665                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10666
10667                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10668                 btrfs_wait_cache_io(root, trans, block_group,
10669                                     &block_group->io_ctl, path,
10670                                     block_group->key.objectid);
10671                 btrfs_put_block_group(block_group);
10672                 spin_lock(&trans->transaction->dirty_bgs_lock);
10673         }
10674
10675         if (!list_empty(&block_group->dirty_list)) {
10676                 list_del_init(&block_group->dirty_list);
10677                 btrfs_put_block_group(block_group);
10678         }
10679         spin_unlock(&trans->transaction->dirty_bgs_lock);
10680         mutex_unlock(&trans->transaction->cache_write_mutex);
10681
10682         if (!IS_ERR(inode)) {
10683                 ret = btrfs_orphan_add(trans, inode);
10684                 if (ret) {
10685                         btrfs_add_delayed_iput(inode);
10686                         goto out;
10687                 }
10688                 clear_nlink(inode);
10689                 /* One for the block groups ref */
10690                 spin_lock(&block_group->lock);
10691                 if (block_group->iref) {
10692                         block_group->iref = 0;
10693                         block_group->inode = NULL;
10694                         spin_unlock(&block_group->lock);
10695                         iput(inode);
10696                 } else {
10697                         spin_unlock(&block_group->lock);
10698                 }
10699                 /* One for our lookup ref */
10700                 btrfs_add_delayed_iput(inode);
10701         }
10702
10703         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10704         key.offset = block_group->key.objectid;
10705         key.type = 0;
10706
10707         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10708         if (ret < 0)
10709                 goto out;
10710         if (ret > 0)
10711                 btrfs_release_path(path);
10712         if (ret == 0) {
10713                 ret = btrfs_del_item(trans, tree_root, path);
10714                 if (ret)
10715                         goto out;
10716                 btrfs_release_path(path);
10717         }
10718
10719         spin_lock(&root->fs_info->block_group_cache_lock);
10720         rb_erase(&block_group->cache_node,
10721                  &root->fs_info->block_group_cache_tree);
10722         RB_CLEAR_NODE(&block_group->cache_node);
10723
10724         /* Once for the block groups rbtree */
10725         btrfs_put_block_group(block_group);
10726
10727         if (root->fs_info->first_logical_byte == block_group->key.objectid)
10728                 root->fs_info->first_logical_byte = (u64)-1;
10729         spin_unlock(&root->fs_info->block_group_cache_lock);
10730
10731         down_write(&block_group->space_info->groups_sem);
10732         /*
10733          * we must use list_del_init so people can check to see if they
10734          * are still on the list after taking the semaphore
10735          */
10736         list_del_init(&block_group->list);
10737         if (list_empty(&block_group->space_info->block_groups[index])) {
10738                 kobj = block_group->space_info->block_group_kobjs[index];
10739                 block_group->space_info->block_group_kobjs[index] = NULL;
10740                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10741         }
10742         up_write(&block_group->space_info->groups_sem);
10743         if (kobj) {
10744                 kobject_del(kobj);
10745                 kobject_put(kobj);
10746         }
10747
10748         if (block_group->has_caching_ctl)
10749                 caching_ctl = get_caching_control(block_group);
10750         if (block_group->cached == BTRFS_CACHE_STARTED)
10751                 wait_block_group_cache_done(block_group);
10752         if (block_group->has_caching_ctl) {
10753                 down_write(&root->fs_info->commit_root_sem);
10754                 if (!caching_ctl) {
10755                         struct btrfs_caching_control *ctl;
10756
10757                         list_for_each_entry(ctl,
10758                                     &root->fs_info->caching_block_groups, list)
10759                                 if (ctl->block_group == block_group) {
10760                                         caching_ctl = ctl;
10761                                         atomic_inc(&caching_ctl->count);
10762                                         break;
10763                                 }
10764                 }
10765                 if (caching_ctl)
10766                         list_del_init(&caching_ctl->list);
10767                 up_write(&root->fs_info->commit_root_sem);
10768                 if (caching_ctl) {
10769                         /* Once for the caching bgs list and once for us. */
10770                         put_caching_control(caching_ctl);
10771                         put_caching_control(caching_ctl);
10772                 }
10773         }
10774
10775         spin_lock(&trans->transaction->dirty_bgs_lock);
10776         if (!list_empty(&block_group->dirty_list)) {
10777                 WARN_ON(1);
10778         }
10779         if (!list_empty(&block_group->io_list)) {
10780                 WARN_ON(1);
10781         }
10782         spin_unlock(&trans->transaction->dirty_bgs_lock);
10783         btrfs_remove_free_space_cache(block_group);
10784
10785         spin_lock(&block_group->space_info->lock);
10786         list_del_init(&block_group->ro_list);
10787
10788         if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
10789                 WARN_ON(block_group->space_info->total_bytes
10790                         < block_group->key.offset);
10791                 WARN_ON(block_group->space_info->bytes_readonly
10792                         < block_group->key.offset);
10793                 WARN_ON(block_group->space_info->disk_total
10794                         < block_group->key.offset * factor);
10795         }
10796         block_group->space_info->total_bytes -= block_group->key.offset;
10797         block_group->space_info->bytes_readonly -= block_group->key.offset;
10798         block_group->space_info->disk_total -= block_group->key.offset * factor;
10799
10800         spin_unlock(&block_group->space_info->lock);
10801
10802         memcpy(&key, &block_group->key, sizeof(key));
10803
10804         lock_chunks(root);
10805         if (!list_empty(&em->list)) {
10806                 /* We're in the transaction->pending_chunks list. */
10807                 free_extent_map(em);
10808         }
10809         spin_lock(&block_group->lock);
10810         block_group->removed = 1;
10811         /*
10812          * At this point trimming can't start on this block group, because we
10813          * removed the block group from the tree fs_info->block_group_cache_tree
10814          * so no one can't find it anymore and even if someone already got this
10815          * block group before we removed it from the rbtree, they have already
10816          * incremented block_group->trimming - if they didn't, they won't find
10817          * any free space entries because we already removed them all when we
10818          * called btrfs_remove_free_space_cache().
10819          *
10820          * And we must not remove the extent map from the fs_info->mapping_tree
10821          * to prevent the same logical address range and physical device space
10822          * ranges from being reused for a new block group. This is because our
10823          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10824          * completely transactionless, so while it is trimming a range the
10825          * currently running transaction might finish and a new one start,
10826          * allowing for new block groups to be created that can reuse the same
10827          * physical device locations unless we take this special care.
10828          *
10829          * There may also be an implicit trim operation if the file system
10830          * is mounted with -odiscard. The same protections must remain
10831          * in place until the extents have been discarded completely when
10832          * the transaction commit has completed.
10833          */
10834         remove_em = (atomic_read(&block_group->trimming) == 0);
10835         /*
10836          * Make sure a trimmer task always sees the em in the pinned_chunks list
10837          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10838          * before checking block_group->removed).
10839          */
10840         if (!remove_em) {
10841                 /*
10842                  * Our em might be in trans->transaction->pending_chunks which
10843                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10844                  * and so is the fs_info->pinned_chunks list.
10845                  *
10846                  * So at this point we must be holding the chunk_mutex to avoid
10847                  * any races with chunk allocation (more specifically at
10848                  * volumes.c:contains_pending_extent()), to ensure it always
10849                  * sees the em, either in the pending_chunks list or in the
10850                  * pinned_chunks list.
10851                  */
10852                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10853         }
10854         spin_unlock(&block_group->lock);
10855
10856         if (remove_em) {
10857                 struct extent_map_tree *em_tree;
10858
10859                 em_tree = &root->fs_info->mapping_tree.map_tree;
10860                 write_lock(&em_tree->lock);
10861                 /*
10862                  * The em might be in the pending_chunks list, so make sure the
10863                  * chunk mutex is locked, since remove_extent_mapping() will
10864                  * delete us from that list.
10865                  */
10866                 remove_extent_mapping(em_tree, em);
10867                 write_unlock(&em_tree->lock);
10868                 /* once for the tree */
10869                 free_extent_map(em);
10870         }
10871
10872         unlock_chunks(root);
10873
10874         ret = remove_block_group_free_space(trans, root->fs_info, block_group);
10875         if (ret)
10876                 goto out;
10877
10878         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10879         if (ret > 0)
10880                 ret = -EIO;
10881         if (ret < 0)
10882                 goto out;
10883
10884         ret = btrfs_del_item(trans, root, path);
10885
10886 out:
10887         /* Once for the lookup reference */
10888         btrfs_put_block_group(block_group);
10889         btrfs_free_path(path);
10890         return ret;
10891 }
10892
10893 struct btrfs_trans_handle *
10894 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10895                                      const u64 chunk_offset)
10896 {
10897         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10898         struct extent_map *em;
10899         struct map_lookup *map;
10900         unsigned int num_items;
10901
10902         read_lock(&em_tree->lock);
10903         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10904         read_unlock(&em_tree->lock);
10905         ASSERT(em && em->start == chunk_offset);
10906
10907         /*
10908          * We need to reserve 3 + N units from the metadata space info in order
10909          * to remove a block group (done at btrfs_remove_chunk() and at
10910          * btrfs_remove_block_group()), which are used for:
10911          *
10912          * 1 unit for adding the free space inode's orphan (located in the tree
10913          * of tree roots).
10914          * 1 unit for deleting the block group item (located in the extent
10915          * tree).
10916          * 1 unit for deleting the free space item (located in tree of tree
10917          * roots).
10918          * N units for deleting N device extent items corresponding to each
10919          * stripe (located in the device tree).
10920          *
10921          * In order to remove a block group we also need to reserve units in the
10922          * system space info in order to update the chunk tree (update one or
10923          * more device items and remove one chunk item), but this is done at
10924          * btrfs_remove_chunk() through a call to check_system_chunk().
10925          */
10926         map = em->map_lookup;
10927         num_items = 3 + map->num_stripes;
10928         free_extent_map(em);
10929
10930         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10931                                                            num_items, 1);
10932 }
10933
10934 /*
10935  * Process the unused_bgs list and remove any that don't have any allocated
10936  * space inside of them.
10937  */
10938 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10939 {
10940         struct btrfs_block_group_cache *block_group;
10941         struct btrfs_space_info *space_info;
10942         struct btrfs_root *root = fs_info->extent_root;
10943         struct btrfs_trans_handle *trans;
10944         int ret = 0;
10945
10946         if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
10947                 return;
10948
10949         spin_lock(&fs_info->unused_bgs_lock);
10950         while (!list_empty(&fs_info->unused_bgs)) {
10951                 u64 start, end;
10952                 int trimming;
10953
10954                 block_group = list_first_entry(&fs_info->unused_bgs,
10955                                                struct btrfs_block_group_cache,
10956                                                bg_list);
10957                 list_del_init(&block_group->bg_list);
10958
10959                 space_info = block_group->space_info;
10960
10961                 if (ret || btrfs_mixed_space_info(space_info)) {
10962                         btrfs_put_block_group(block_group);
10963                         continue;
10964                 }
10965                 spin_unlock(&fs_info->unused_bgs_lock);
10966
10967                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10968
10969                 /* Don't want to race with allocators so take the groups_sem */
10970                 down_write(&space_info->groups_sem);
10971                 spin_lock(&block_group->lock);
10972                 if (block_group->reserved || block_group->pinned ||
10973                     btrfs_block_group_used(&block_group->item) ||
10974                     block_group->ro ||
10975                     list_is_singular(&block_group->list)) {
10976                         /*
10977                          * We want to bail if we made new allocations or have
10978                          * outstanding allocations in this block group.  We do
10979                          * the ro check in case balance is currently acting on
10980                          * this block group.
10981                          */
10982                         spin_unlock(&block_group->lock);
10983                         up_write(&space_info->groups_sem);
10984                         goto next;
10985                 }
10986                 spin_unlock(&block_group->lock);
10987
10988                 /* We don't want to force the issue, only flip if it's ok. */
10989                 ret = inc_block_group_ro(block_group, 0);
10990                 up_write(&space_info->groups_sem);
10991                 if (ret < 0) {
10992                         ret = 0;
10993                         goto next;
10994                 }
10995
10996                 /*
10997                  * Want to do this before we do anything else so we can recover
10998                  * properly if we fail to join the transaction.
10999                  */
11000                 trans = btrfs_start_trans_remove_block_group(fs_info,
11001                                                      block_group->key.objectid);
11002                 if (IS_ERR(trans)) {
11003                         btrfs_dec_block_group_ro(root, block_group);
11004                         ret = PTR_ERR(trans);
11005                         goto next;
11006                 }
11007
11008                 /*
11009                  * We could have pending pinned extents for this block group,
11010                  * just delete them, we don't care about them anymore.
11011                  */
11012                 start = block_group->key.objectid;
11013                 end = start + block_group->key.offset - 1;
11014                 /*
11015                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
11016                  * btrfs_finish_extent_commit(). If we are at transaction N,
11017                  * another task might be running finish_extent_commit() for the
11018                  * previous transaction N - 1, and have seen a range belonging
11019                  * to the block group in freed_extents[] before we were able to
11020                  * clear the whole block group range from freed_extents[]. This
11021                  * means that task can lookup for the block group after we
11022                  * unpinned it from freed_extents[] and removed it, leading to
11023                  * a BUG_ON() at btrfs_unpin_extent_range().
11024                  */
11025                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
11026                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
11027                                   EXTENT_DIRTY);
11028                 if (ret) {
11029                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
11030                         btrfs_dec_block_group_ro(root, block_group);
11031                         goto end_trans;
11032                 }
11033                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
11034                                   EXTENT_DIRTY);
11035                 if (ret) {
11036                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
11037                         btrfs_dec_block_group_ro(root, block_group);
11038                         goto end_trans;
11039                 }
11040                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
11041
11042                 /* Reset pinned so btrfs_put_block_group doesn't complain */
11043                 spin_lock(&space_info->lock);
11044                 spin_lock(&block_group->lock);
11045
11046                 space_info->bytes_pinned -= block_group->pinned;
11047                 space_info->bytes_readonly += block_group->pinned;
11048                 percpu_counter_add(&space_info->total_bytes_pinned,
11049                                    -block_group->pinned);
11050                 block_group->pinned = 0;
11051
11052                 spin_unlock(&block_group->lock);
11053                 spin_unlock(&space_info->lock);
11054
11055                 /* DISCARD can flip during remount */
11056                 trimming = btrfs_test_opt(root->fs_info, DISCARD);
11057
11058                 /* Implicit trim during transaction commit. */
11059                 if (trimming)
11060                         btrfs_get_block_group_trimming(block_group);
11061
11062                 /*
11063                  * Btrfs_remove_chunk will abort the transaction if things go
11064                  * horribly wrong.
11065                  */
11066                 ret = btrfs_remove_chunk(trans, root,
11067                                          block_group->key.objectid);
11068
11069                 if (ret) {
11070                         if (trimming)
11071                                 btrfs_put_block_group_trimming(block_group);
11072                         goto end_trans;
11073                 }
11074
11075                 /*
11076                  * If we're not mounted with -odiscard, we can just forget
11077                  * about this block group. Otherwise we'll need to wait
11078                  * until transaction commit to do the actual discard.
11079                  */
11080                 if (trimming) {
11081                         spin_lock(&fs_info->unused_bgs_lock);
11082                         /*
11083                          * A concurrent scrub might have added us to the list
11084                          * fs_info->unused_bgs, so use a list_move operation
11085                          * to add the block group to the deleted_bgs list.
11086                          */
11087                         list_move(&block_group->bg_list,
11088                                   &trans->transaction->deleted_bgs);
11089                         spin_unlock(&fs_info->unused_bgs_lock);
11090                         btrfs_get_block_group(block_group);
11091                 }
11092 end_trans:
11093                 btrfs_end_transaction(trans, root);
11094 next:
11095                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
11096                 btrfs_put_block_group(block_group);
11097                 spin_lock(&fs_info->unused_bgs_lock);
11098         }
11099         spin_unlock(&fs_info->unused_bgs_lock);
11100 }
11101
11102 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
11103 {
11104         struct btrfs_space_info *space_info;
11105         struct btrfs_super_block *disk_super;
11106         u64 features;
11107         u64 flags;
11108         int mixed = 0;
11109         int ret;
11110
11111         disk_super = fs_info->super_copy;
11112         if (!btrfs_super_root(disk_super))
11113                 return -EINVAL;
11114
11115         features = btrfs_super_incompat_flags(disk_super);
11116         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
11117                 mixed = 1;
11118
11119         flags = BTRFS_BLOCK_GROUP_SYSTEM;
11120         ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
11121         if (ret)
11122                 goto out;
11123
11124         if (mixed) {
11125                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
11126                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
11127         } else {
11128                 flags = BTRFS_BLOCK_GROUP_METADATA;
11129                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
11130                 if (ret)
11131                         goto out;
11132
11133                 flags = BTRFS_BLOCK_GROUP_DATA;
11134                 ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
11135         }
11136 out:
11137         return ret;
11138 }
11139
11140 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
11141 {
11142         return unpin_extent_range(root, start, end, false);
11143 }
11144
11145 /*
11146  * It used to be that old block groups would be left around forever.
11147  * Iterating over them would be enough to trim unused space.  Since we
11148  * now automatically remove them, we also need to iterate over unallocated
11149  * space.
11150  *
11151  * We don't want a transaction for this since the discard may take a
11152  * substantial amount of time.  We don't require that a transaction be
11153  * running, but we do need to take a running transaction into account
11154  * to ensure that we're not discarding chunks that were released in
11155  * the current transaction.
11156  *
11157  * Holding the chunks lock will prevent other threads from allocating
11158  * or releasing chunks, but it won't prevent a running transaction
11159  * from committing and releasing the memory that the pending chunks
11160  * list head uses.  For that, we need to take a reference to the
11161  * transaction.
11162  */
11163 static int btrfs_trim_free_extents(struct btrfs_device *device,
11164                                    u64 minlen, u64 *trimmed)
11165 {
11166         u64 start = 0, len = 0;
11167         int ret;
11168
11169         *trimmed = 0;
11170
11171         /* Discard not supported = nothing to do. */
11172         if (!blk_queue_discard(bdev_get_queue(device->bdev)))
11173                 return 0;
11174
11175         /* Not writeable = nothing to do. */
11176         if (!device->writeable)
11177                 return 0;
11178
11179         /* No free space = nothing to do. */
11180         if (device->total_bytes <= device->bytes_used)
11181                 return 0;
11182
11183         ret = 0;
11184
11185         while (1) {
11186                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
11187                 struct btrfs_transaction *trans;
11188                 u64 bytes;
11189
11190                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
11191                 if (ret)
11192                         return ret;
11193
11194                 down_read(&fs_info->commit_root_sem);
11195
11196                 spin_lock(&fs_info->trans_lock);
11197                 trans = fs_info->running_transaction;
11198                 if (trans)
11199                         atomic_inc(&trans->use_count);
11200                 spin_unlock(&fs_info->trans_lock);
11201
11202                 ret = find_free_dev_extent_start(trans, device, minlen, start,
11203                                                  &start, &len);
11204                 if (trans)
11205                         btrfs_put_transaction(trans);
11206
11207                 if (ret) {
11208                         up_read(&fs_info->commit_root_sem);
11209                         mutex_unlock(&fs_info->chunk_mutex);
11210                         if (ret == -ENOSPC)
11211                                 ret = 0;
11212                         break;
11213                 }
11214
11215                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
11216                 up_read(&fs_info->commit_root_sem);
11217                 mutex_unlock(&fs_info->chunk_mutex);
11218
11219                 if (ret)
11220                         break;
11221
11222                 start += len;
11223                 *trimmed += bytes;
11224
11225                 if (fatal_signal_pending(current)) {
11226                         ret = -ERESTARTSYS;
11227                         break;
11228                 }
11229
11230                 cond_resched();
11231         }
11232
11233         return ret;
11234 }
11235
11236 /*
11237  * Trim the whole filesystem by:
11238  * 1) trimming the free space in each block group
11239  * 2) trimming the unallocated space on each device
11240  *
11241  * This will also continue trimming even if a block group or device encounters
11242  * an error.  The return value will be the last error, or 0 if nothing bad
11243  * happens.
11244  */
11245 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
11246 {
11247         struct btrfs_fs_info *fs_info = root->fs_info;
11248         struct btrfs_block_group_cache *cache = NULL;
11249         struct btrfs_device *device;
11250         struct list_head *devices;
11251         u64 group_trimmed;
11252         u64 start;
11253         u64 end;
11254         u64 trimmed = 0;
11255         u64 bg_failed = 0;
11256         u64 dev_failed = 0;
11257         int bg_ret = 0;
11258         int dev_ret = 0;
11259         int ret = 0;
11260
11261         cache = btrfs_lookup_first_block_group(fs_info, range->start);
11262         for (; cache; cache = next_block_group(fs_info->tree_root, cache)) {
11263                 if (cache->key.objectid >= (range->start + range->len)) {
11264                         btrfs_put_block_group(cache);
11265                         break;
11266                 }
11267
11268                 start = max(range->start, cache->key.objectid);
11269                 end = min(range->start + range->len,
11270                                 cache->key.objectid + cache->key.offset);
11271
11272                 if (end - start >= range->minlen) {
11273                         if (!block_group_cache_done(cache)) {
11274                                 ret = cache_block_group(cache, 0);
11275                                 if (ret) {
11276                                         bg_failed++;
11277                                         bg_ret = ret;
11278                                         continue;
11279                                 }
11280                                 ret = wait_block_group_cache_done(cache);
11281                                 if (ret) {
11282                                         bg_failed++;
11283                                         bg_ret = ret;
11284                                         continue;
11285                                 }
11286                         }
11287                         ret = btrfs_trim_block_group(cache,
11288                                                      &group_trimmed,
11289                                                      start,
11290                                                      end,
11291                                                      range->minlen);
11292
11293                         trimmed += group_trimmed;
11294                         if (ret) {
11295                                 bg_failed++;
11296                                 bg_ret = ret;
11297                                 continue;
11298                         }
11299                 }
11300         }
11301
11302         if (bg_failed)
11303                 btrfs_warn(fs_info,
11304                         "failed to trim %llu block group(s), last error %d",
11305                         bg_failed, bg_ret);
11306         mutex_lock(&fs_info->fs_devices->device_list_mutex);
11307         devices = &fs_info->fs_devices->devices;
11308         list_for_each_entry(device, devices, dev_list) {
11309                 ret = btrfs_trim_free_extents(device, range->minlen,
11310                                               &group_trimmed);
11311                 if (ret) {
11312                         dev_failed++;
11313                         dev_ret = ret;
11314                         break;
11315                 }
11316
11317                 trimmed += group_trimmed;
11318         }
11319         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
11320
11321         if (dev_failed)
11322                 btrfs_warn(fs_info,
11323                         "failed to trim %llu device(s), last error %d",
11324                         dev_failed, dev_ret);
11325         range->len = trimmed;
11326         if (bg_ret)
11327                 return bg_ret;
11328         return dev_ret;
11329 }
11330
11331 /*
11332  * btrfs_{start,end}_write_no_snapshoting() are similar to
11333  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
11334  * data into the page cache through nocow before the subvolume is snapshoted,
11335  * but flush the data into disk after the snapshot creation, or to prevent
11336  * operations while snapshoting is ongoing and that cause the snapshot to be
11337  * inconsistent (writes followed by expanding truncates for example).
11338  */
11339 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
11340 {
11341         percpu_counter_dec(&root->subv_writers->counter);
11342         /*
11343          * Make sure counter is updated before we wake up waiters.
11344          */
11345         smp_mb();
11346         if (waitqueue_active(&root->subv_writers->wait))
11347                 wake_up(&root->subv_writers->wait);
11348 }
11349
11350 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
11351 {
11352         if (atomic_read(&root->will_be_snapshoted))
11353                 return 0;
11354
11355         percpu_counter_inc(&root->subv_writers->counter);
11356         /*
11357          * Make sure counter is updated before we check for snapshot creation.
11358          */
11359         smp_mb();
11360         if (atomic_read(&root->will_be_snapshoted)) {
11361                 btrfs_end_write_no_snapshoting(root);
11362                 return 0;
11363         }
11364         return 1;
11365 }
11366
11367 static int wait_snapshoting_atomic_t(atomic_t *a)
11368 {
11369         schedule();
11370         return 0;
11371 }
11372
11373 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11374 {
11375         while (true) {
11376                 int ret;
11377
11378                 ret = btrfs_start_write_no_snapshoting(root);
11379                 if (ret)
11380                         break;
11381                 wait_on_atomic_t(&root->will_be_snapshoted,
11382                                  wait_snapshoting_atomic_t,
11383                                  TASK_UNINTERRUPTIBLE);
11384         }
11385 }