GNU Linux-libre 4.19.264-gnu1
[releases.git] / fs / btrfs / inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/buffer_head.h>
9 #include <linux/file.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/time.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/backing-dev.h>
17 #include <linux/writeback.h>
18 #include <linux/compat.h>
19 #include <linux/xattr.h>
20 #include <linux/posix_acl.h>
21 #include <linux/falloc.h>
22 #include <linux/slab.h>
23 #include <linux/ratelimit.h>
24 #include <linux/btrfs.h>
25 #include <linux/blkdev.h>
26 #include <linux/posix_acl_xattr.h>
27 #include <linux/uio.h>
28 #include <linux/magic.h>
29 #include <linux/iversion.h>
30 #include <asm/unaligned.h>
31 #include "ctree.h"
32 #include "disk-io.h"
33 #include "transaction.h"
34 #include "btrfs_inode.h"
35 #include "print-tree.h"
36 #include "ordered-data.h"
37 #include "xattr.h"
38 #include "tree-log.h"
39 #include "volumes.h"
40 #include "compression.h"
41 #include "locking.h"
42 #include "free-space-cache.h"
43 #include "inode-map.h"
44 #include "backref.h"
45 #include "props.h"
46 #include "qgroup.h"
47 #include "dedupe.h"
48
49 struct btrfs_iget_args {
50         struct btrfs_key *location;
51         struct btrfs_root *root;
52 };
53
54 struct btrfs_dio_data {
55         u64 reserve;
56         u64 unsubmitted_oe_range_start;
57         u64 unsubmitted_oe_range_end;
58         int overwrite;
59 };
60
61 static const struct inode_operations btrfs_dir_inode_operations;
62 static const struct inode_operations btrfs_symlink_inode_operations;
63 static const struct inode_operations btrfs_dir_ro_inode_operations;
64 static const struct inode_operations btrfs_special_inode_operations;
65 static const struct inode_operations btrfs_file_inode_operations;
66 static const struct address_space_operations btrfs_aops;
67 static const struct address_space_operations btrfs_symlink_aops;
68 static const struct file_operations btrfs_dir_file_operations;
69 static const struct extent_io_ops btrfs_extent_io_ops;
70
71 static struct kmem_cache *btrfs_inode_cachep;
72 struct kmem_cache *btrfs_trans_handle_cachep;
73 struct kmem_cache *btrfs_path_cachep;
74 struct kmem_cache *btrfs_free_space_cachep;
75 struct kmem_cache *btrfs_free_space_bitmap_cachep;
76
77 #define S_SHIFT 12
78 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
79         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
80         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
81         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
82         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
83         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
84         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
85         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
86 };
87
88 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
89 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
90 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
91 static noinline int cow_file_range(struct inode *inode,
92                                    struct page *locked_page,
93                                    u64 start, u64 end, u64 delalloc_end,
94                                    int *page_started, unsigned long *nr_written,
95                                    int unlock, struct btrfs_dedupe_hash *hash);
96 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
97                                        u64 orig_start, u64 block_start,
98                                        u64 block_len, u64 orig_block_len,
99                                        u64 ram_bytes, int compress_type,
100                                        int type);
101
102 static void __endio_write_update_ordered(struct inode *inode,
103                                          const u64 offset, const u64 bytes,
104                                          const bool uptodate);
105
106 /*
107  * Cleanup all submitted ordered extents in specified range to handle errors
108  * from the fill_dellaloc() callback.
109  *
110  * NOTE: caller must ensure that when an error happens, it can not call
111  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
112  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
113  * to be released, which we want to happen only when finishing the ordered
114  * extent (btrfs_finish_ordered_io()).
115  */
116 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
117                                                  struct page *locked_page,
118                                                  u64 offset, u64 bytes)
119 {
120         unsigned long index = offset >> PAGE_SHIFT;
121         unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
122         u64 page_start = page_offset(locked_page);
123         u64 page_end = page_start + PAGE_SIZE - 1;
124
125         struct page *page;
126
127         while (index <= end_index) {
128                 page = find_get_page(inode->i_mapping, index);
129                 index++;
130                 if (!page)
131                         continue;
132                 ClearPagePrivate2(page);
133                 put_page(page);
134         }
135
136         /*
137          * In case this page belongs to the delalloc range being instantiated
138          * then skip it, since the first page of a range is going to be
139          * properly cleaned up by the caller of run_delalloc_range
140          */
141         if (page_start >= offset && page_end <= (offset + bytes - 1)) {
142                 offset += PAGE_SIZE;
143                 bytes -= PAGE_SIZE;
144         }
145
146         return __endio_write_update_ordered(inode, offset, bytes, false);
147 }
148
149 static int btrfs_dirty_inode(struct inode *inode);
150
151 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
152 void btrfs_test_inode_set_ops(struct inode *inode)
153 {
154         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
155 }
156 #endif
157
158 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
159                                      struct inode *inode,  struct inode *dir,
160                                      const struct qstr *qstr)
161 {
162         int err;
163
164         err = btrfs_init_acl(trans, inode, dir);
165         if (!err)
166                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
167         return err;
168 }
169
170 /*
171  * this does all the hard work for inserting an inline extent into
172  * the btree.  The caller should have done a btrfs_drop_extents so that
173  * no overlapping inline items exist in the btree
174  */
175 static int insert_inline_extent(struct btrfs_trans_handle *trans,
176                                 struct btrfs_path *path, int extent_inserted,
177                                 struct btrfs_root *root, struct inode *inode,
178                                 u64 start, size_t size, size_t compressed_size,
179                                 int compress_type,
180                                 struct page **compressed_pages)
181 {
182         struct extent_buffer *leaf;
183         struct page *page = NULL;
184         char *kaddr;
185         unsigned long ptr;
186         struct btrfs_file_extent_item *ei;
187         int ret;
188         size_t cur_size = size;
189         unsigned long offset;
190
191         if (compressed_size && compressed_pages)
192                 cur_size = compressed_size;
193
194         inode_add_bytes(inode, size);
195
196         if (!extent_inserted) {
197                 struct btrfs_key key;
198                 size_t datasize;
199
200                 key.objectid = btrfs_ino(BTRFS_I(inode));
201                 key.offset = start;
202                 key.type = BTRFS_EXTENT_DATA_KEY;
203
204                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
205                 path->leave_spinning = 1;
206                 ret = btrfs_insert_empty_item(trans, root, path, &key,
207                                               datasize);
208                 if (ret)
209                         goto fail;
210         }
211         leaf = path->nodes[0];
212         ei = btrfs_item_ptr(leaf, path->slots[0],
213                             struct btrfs_file_extent_item);
214         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
215         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
216         btrfs_set_file_extent_encryption(leaf, ei, 0);
217         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
218         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
219         ptr = btrfs_file_extent_inline_start(ei);
220
221         if (compress_type != BTRFS_COMPRESS_NONE) {
222                 struct page *cpage;
223                 int i = 0;
224                 while (compressed_size > 0) {
225                         cpage = compressed_pages[i];
226                         cur_size = min_t(unsigned long, compressed_size,
227                                        PAGE_SIZE);
228
229                         kaddr = kmap_atomic(cpage);
230                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
231                         kunmap_atomic(kaddr);
232
233                         i++;
234                         ptr += cur_size;
235                         compressed_size -= cur_size;
236                 }
237                 btrfs_set_file_extent_compression(leaf, ei,
238                                                   compress_type);
239         } else {
240                 page = find_get_page(inode->i_mapping,
241                                      start >> PAGE_SHIFT);
242                 btrfs_set_file_extent_compression(leaf, ei, 0);
243                 kaddr = kmap_atomic(page);
244                 offset = start & (PAGE_SIZE - 1);
245                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
246                 kunmap_atomic(kaddr);
247                 put_page(page);
248         }
249         btrfs_mark_buffer_dirty(leaf);
250         btrfs_release_path(path);
251
252         /*
253          * we're an inline extent, so nobody can
254          * extend the file past i_size without locking
255          * a page we already have locked.
256          *
257          * We must do any isize and inode updates
258          * before we unlock the pages.  Otherwise we
259          * could end up racing with unlink.
260          */
261         BTRFS_I(inode)->disk_i_size = inode->i_size;
262         ret = btrfs_update_inode(trans, root, inode);
263
264 fail:
265         return ret;
266 }
267
268
269 /*
270  * conditionally insert an inline extent into the file.  This
271  * does the checks required to make sure the data is small enough
272  * to fit as an inline extent.
273  */
274 static noinline int cow_file_range_inline(struct inode *inode, u64 start,
275                                           u64 end, size_t compressed_size,
276                                           int compress_type,
277                                           struct page **compressed_pages)
278 {
279         struct btrfs_root *root = BTRFS_I(inode)->root;
280         struct btrfs_fs_info *fs_info = root->fs_info;
281         struct btrfs_trans_handle *trans;
282         u64 isize = i_size_read(inode);
283         u64 actual_end = min(end + 1, isize);
284         u64 inline_len = actual_end - start;
285         u64 aligned_end = ALIGN(end, fs_info->sectorsize);
286         u64 data_len = inline_len;
287         int ret;
288         struct btrfs_path *path;
289         int extent_inserted = 0;
290         u32 extent_item_size;
291
292         if (compressed_size)
293                 data_len = compressed_size;
294
295         if (start > 0 ||
296             actual_end > fs_info->sectorsize ||
297             data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
298             (!compressed_size &&
299             (actual_end & (fs_info->sectorsize - 1)) == 0) ||
300             end + 1 < isize ||
301             data_len > fs_info->max_inline) {
302                 return 1;
303         }
304
305         path = btrfs_alloc_path();
306         if (!path)
307                 return -ENOMEM;
308
309         trans = btrfs_join_transaction(root);
310         if (IS_ERR(trans)) {
311                 btrfs_free_path(path);
312                 return PTR_ERR(trans);
313         }
314         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
315
316         if (compressed_size && compressed_pages)
317                 extent_item_size = btrfs_file_extent_calc_inline_size(
318                    compressed_size);
319         else
320                 extent_item_size = btrfs_file_extent_calc_inline_size(
321                     inline_len);
322
323         ret = __btrfs_drop_extents(trans, root, inode, path,
324                                    start, aligned_end, NULL,
325                                    1, 1, extent_item_size, &extent_inserted);
326         if (ret) {
327                 btrfs_abort_transaction(trans, ret);
328                 goto out;
329         }
330
331         if (isize > actual_end)
332                 inline_len = min_t(u64, isize, actual_end);
333         ret = insert_inline_extent(trans, path, extent_inserted,
334                                    root, inode, start,
335                                    inline_len, compressed_size,
336                                    compress_type, compressed_pages);
337         if (ret && ret != -ENOSPC) {
338                 btrfs_abort_transaction(trans, ret);
339                 goto out;
340         } else if (ret == -ENOSPC) {
341                 ret = 1;
342                 goto out;
343         }
344
345         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
346         btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
347 out:
348         /*
349          * Don't forget to free the reserved space, as for inlined extent
350          * it won't count as data extent, free them directly here.
351          * And at reserve time, it's always aligned to page size, so
352          * just free one page here.
353          */
354         btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
355         btrfs_free_path(path);
356         btrfs_end_transaction(trans);
357         return ret;
358 }
359
360 struct async_extent {
361         u64 start;
362         u64 ram_size;
363         u64 compressed_size;
364         struct page **pages;
365         unsigned long nr_pages;
366         int compress_type;
367         struct list_head list;
368 };
369
370 struct async_cow {
371         struct inode *inode;
372         struct btrfs_root *root;
373         struct page *locked_page;
374         u64 start;
375         u64 end;
376         unsigned int write_flags;
377         struct list_head extents;
378         struct btrfs_work work;
379 };
380
381 static noinline int add_async_extent(struct async_cow *cow,
382                                      u64 start, u64 ram_size,
383                                      u64 compressed_size,
384                                      struct page **pages,
385                                      unsigned long nr_pages,
386                                      int compress_type)
387 {
388         struct async_extent *async_extent;
389
390         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
391         BUG_ON(!async_extent); /* -ENOMEM */
392         async_extent->start = start;
393         async_extent->ram_size = ram_size;
394         async_extent->compressed_size = compressed_size;
395         async_extent->pages = pages;
396         async_extent->nr_pages = nr_pages;
397         async_extent->compress_type = compress_type;
398         list_add_tail(&async_extent->list, &cow->extents);
399         return 0;
400 }
401
402 /*
403  * Check if the inode has flags compatible with compression
404  */
405 static inline bool inode_can_compress(struct inode *inode)
406 {
407         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
408             BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
409                 return false;
410         return true;
411 }
412
413 /*
414  * Check if the inode needs to be submitted to compression, based on mount
415  * options, defragmentation, properties or heuristics.
416  */
417 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
418 {
419         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
420
421         if (!inode_can_compress(inode)) {
422                 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
423                         KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
424                         btrfs_ino(BTRFS_I(inode)));
425                 return 0;
426         }
427         /* force compress */
428         if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
429                 return 1;
430         /* defrag ioctl */
431         if (BTRFS_I(inode)->defrag_compress)
432                 return 1;
433         /* bad compression ratios */
434         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
435                 return 0;
436         if (btrfs_test_opt(fs_info, COMPRESS) ||
437             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
438             BTRFS_I(inode)->prop_compress)
439                 return btrfs_compress_heuristic(inode, start, end);
440         return 0;
441 }
442
443 static inline void inode_should_defrag(struct btrfs_inode *inode,
444                 u64 start, u64 end, u64 num_bytes, u64 small_write)
445 {
446         /* If this is a small write inside eof, kick off a defrag */
447         if (num_bytes < small_write &&
448             (start > 0 || end + 1 < inode->disk_i_size))
449                 btrfs_add_inode_defrag(NULL, inode);
450 }
451
452 /*
453  * we create compressed extents in two phases.  The first
454  * phase compresses a range of pages that have already been
455  * locked (both pages and state bits are locked).
456  *
457  * This is done inside an ordered work queue, and the compression
458  * is spread across many cpus.  The actual IO submission is step
459  * two, and the ordered work queue takes care of making sure that
460  * happens in the same order things were put onto the queue by
461  * writepages and friends.
462  *
463  * If this code finds it can't get good compression, it puts an
464  * entry onto the work queue to write the uncompressed bytes.  This
465  * makes sure that both compressed inodes and uncompressed inodes
466  * are written in the same order that the flusher thread sent them
467  * down.
468  */
469 static noinline void compress_file_range(struct inode *inode,
470                                         struct page *locked_page,
471                                         u64 start, u64 end,
472                                         struct async_cow *async_cow,
473                                         int *num_added)
474 {
475         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
476         u64 blocksize = fs_info->sectorsize;
477         u64 actual_end;
478         u64 isize = i_size_read(inode);
479         int ret = 0;
480         struct page **pages = NULL;
481         unsigned long nr_pages;
482         unsigned long total_compressed = 0;
483         unsigned long total_in = 0;
484         int i;
485         int will_compress;
486         int compress_type = fs_info->compress_type;
487         int redirty = 0;
488
489         inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
490                         SZ_16K);
491
492         actual_end = min_t(u64, isize, end + 1);
493 again:
494         will_compress = 0;
495         nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
496         BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
497         nr_pages = min_t(unsigned long, nr_pages,
498                         BTRFS_MAX_COMPRESSED / PAGE_SIZE);
499
500         /*
501          * we don't want to send crud past the end of i_size through
502          * compression, that's just a waste of CPU time.  So, if the
503          * end of the file is before the start of our current
504          * requested range of bytes, we bail out to the uncompressed
505          * cleanup code that can deal with all of this.
506          *
507          * It isn't really the fastest way to fix things, but this is a
508          * very uncommon corner.
509          */
510         if (actual_end <= start)
511                 goto cleanup_and_bail_uncompressed;
512
513         total_compressed = actual_end - start;
514
515         /*
516          * skip compression for a small file range(<=blocksize) that
517          * isn't an inline extent, since it doesn't save disk space at all.
518          */
519         if (total_compressed <= blocksize &&
520            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
521                 goto cleanup_and_bail_uncompressed;
522
523         total_compressed = min_t(unsigned long, total_compressed,
524                         BTRFS_MAX_UNCOMPRESSED);
525         total_in = 0;
526         ret = 0;
527
528         /*
529          * we do compression for mount -o compress and when the
530          * inode has not been flagged as nocompress.  This flag can
531          * change at any time if we discover bad compression ratios.
532          */
533         if (inode_need_compress(inode, start, end)) {
534                 WARN_ON(pages);
535                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
536                 if (!pages) {
537                         /* just bail out to the uncompressed code */
538                         nr_pages = 0;
539                         goto cont;
540                 }
541
542                 if (BTRFS_I(inode)->defrag_compress)
543                         compress_type = BTRFS_I(inode)->defrag_compress;
544                 else if (BTRFS_I(inode)->prop_compress)
545                         compress_type = BTRFS_I(inode)->prop_compress;
546
547                 /*
548                  * we need to call clear_page_dirty_for_io on each
549                  * page in the range.  Otherwise applications with the file
550                  * mmap'd can wander in and change the page contents while
551                  * we are compressing them.
552                  *
553                  * If the compression fails for any reason, we set the pages
554                  * dirty again later on.
555                  *
556                  * Note that the remaining part is redirtied, the start pointer
557                  * has moved, the end is the original one.
558                  */
559                 if (!redirty) {
560                         extent_range_clear_dirty_for_io(inode, start, end);
561                         redirty = 1;
562                 }
563
564                 /* Compression level is applied here and only here */
565                 ret = btrfs_compress_pages(
566                         compress_type | (fs_info->compress_level << 4),
567                                            inode->i_mapping, start,
568                                            pages,
569                                            &nr_pages,
570                                            &total_in,
571                                            &total_compressed);
572
573                 if (!ret) {
574                         unsigned long offset = total_compressed &
575                                 (PAGE_SIZE - 1);
576                         struct page *page = pages[nr_pages - 1];
577                         char *kaddr;
578
579                         /* zero the tail end of the last page, we might be
580                          * sending it down to disk
581                          */
582                         if (offset) {
583                                 kaddr = kmap_atomic(page);
584                                 memset(kaddr + offset, 0,
585                                        PAGE_SIZE - offset);
586                                 kunmap_atomic(kaddr);
587                         }
588                         will_compress = 1;
589                 }
590         }
591 cont:
592         if (start == 0) {
593                 /* lets try to make an inline extent */
594                 if (ret || total_in < actual_end) {
595                         /* we didn't compress the entire range, try
596                          * to make an uncompressed inline extent.
597                          */
598                         ret = cow_file_range_inline(inode, start, end, 0,
599                                                     BTRFS_COMPRESS_NONE, NULL);
600                 } else {
601                         /* try making a compressed inline extent */
602                         ret = cow_file_range_inline(inode, start, end,
603                                                     total_compressed,
604                                                     compress_type, pages);
605                 }
606                 if (ret <= 0) {
607                         unsigned long clear_flags = EXTENT_DELALLOC |
608                                 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
609                                 EXTENT_DO_ACCOUNTING;
610                         unsigned long page_error_op;
611
612                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
613
614                         /*
615                          * inline extent creation worked or returned error,
616                          * we don't need to create any more async work items.
617                          * Unlock and free up our temp pages.
618                          *
619                          * We use DO_ACCOUNTING here because we need the
620                          * delalloc_release_metadata to be done _after_ we drop
621                          * our outstanding extent for clearing delalloc for this
622                          * range.
623                          */
624                         extent_clear_unlock_delalloc(inode, start, end, end,
625                                                      NULL, clear_flags,
626                                                      PAGE_UNLOCK |
627                                                      PAGE_CLEAR_DIRTY |
628                                                      PAGE_SET_WRITEBACK |
629                                                      page_error_op |
630                                                      PAGE_END_WRITEBACK);
631
632                         /*
633                          * Ensure we only free the compressed pages if we have
634                          * them allocated, as we can still reach here with
635                          * inode_need_compress() == false.
636                          */
637                         if (pages) {
638                                 for (i = 0; i < nr_pages; i++) {
639                                         WARN_ON(pages[i]->mapping);
640                                         put_page(pages[i]);
641                                 }
642                                 kfree(pages);
643                         }
644
645                         return;
646                 }
647         }
648
649         if (will_compress) {
650                 /*
651                  * we aren't doing an inline extent round the compressed size
652                  * up to a block size boundary so the allocator does sane
653                  * things
654                  */
655                 total_compressed = ALIGN(total_compressed, blocksize);
656
657                 /*
658                  * one last check to make sure the compression is really a
659                  * win, compare the page count read with the blocks on disk,
660                  * compression must free at least one sector size
661                  */
662                 total_in = ALIGN(total_in, PAGE_SIZE);
663                 if (total_compressed + blocksize <= total_in) {
664                         *num_added += 1;
665
666                         /*
667                          * The async work queues will take care of doing actual
668                          * allocation on disk for these compressed pages, and
669                          * will submit them to the elevator.
670                          */
671                         add_async_extent(async_cow, start, total_in,
672                                         total_compressed, pages, nr_pages,
673                                         compress_type);
674
675                         if (start + total_in < end) {
676                                 start += total_in;
677                                 pages = NULL;
678                                 cond_resched();
679                                 goto again;
680                         }
681                         return;
682                 }
683         }
684         if (pages) {
685                 /*
686                  * the compression code ran but failed to make things smaller,
687                  * free any pages it allocated and our page pointer array
688                  */
689                 for (i = 0; i < nr_pages; i++) {
690                         WARN_ON(pages[i]->mapping);
691                         put_page(pages[i]);
692                 }
693                 kfree(pages);
694                 pages = NULL;
695                 total_compressed = 0;
696                 nr_pages = 0;
697
698                 /* flag the file so we don't compress in the future */
699                 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
700                     !(BTRFS_I(inode)->prop_compress)) {
701                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
702                 }
703         }
704 cleanup_and_bail_uncompressed:
705         /*
706          * No compression, but we still need to write the pages in the file
707          * we've been given so far.  redirty the locked page if it corresponds
708          * to our extent and set things up for the async work queue to run
709          * cow_file_range to do the normal delalloc dance.
710          */
711         if (page_offset(locked_page) >= start &&
712             page_offset(locked_page) <= end)
713                 __set_page_dirty_nobuffers(locked_page);
714                 /* unlocked later on in the async handlers */
715
716         if (redirty)
717                 extent_range_redirty_for_io(inode, start, end);
718         add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
719                          BTRFS_COMPRESS_NONE);
720         *num_added += 1;
721
722         return;
723 }
724
725 static void free_async_extent_pages(struct async_extent *async_extent)
726 {
727         int i;
728
729         if (!async_extent->pages)
730                 return;
731
732         for (i = 0; i < async_extent->nr_pages; i++) {
733                 WARN_ON(async_extent->pages[i]->mapping);
734                 put_page(async_extent->pages[i]);
735         }
736         kfree(async_extent->pages);
737         async_extent->nr_pages = 0;
738         async_extent->pages = NULL;
739 }
740
741 /*
742  * phase two of compressed writeback.  This is the ordered portion
743  * of the code, which only gets called in the order the work was
744  * queued.  We walk all the async extents created by compress_file_range
745  * and send them down to the disk.
746  */
747 static noinline void submit_compressed_extents(struct inode *inode,
748                                               struct async_cow *async_cow)
749 {
750         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
751         struct async_extent *async_extent;
752         u64 alloc_hint = 0;
753         struct btrfs_key ins;
754         struct extent_map *em;
755         struct btrfs_root *root = BTRFS_I(inode)->root;
756         struct extent_io_tree *io_tree;
757         int ret = 0;
758
759 again:
760         while (!list_empty(&async_cow->extents)) {
761                 async_extent = list_entry(async_cow->extents.next,
762                                           struct async_extent, list);
763                 list_del(&async_extent->list);
764
765                 io_tree = &BTRFS_I(inode)->io_tree;
766
767 retry:
768                 /* did the compression code fall back to uncompressed IO? */
769                 if (!async_extent->pages) {
770                         int page_started = 0;
771                         unsigned long nr_written = 0;
772
773                         lock_extent(io_tree, async_extent->start,
774                                          async_extent->start +
775                                          async_extent->ram_size - 1);
776
777                         /* allocate blocks */
778                         ret = cow_file_range(inode, async_cow->locked_page,
779                                              async_extent->start,
780                                              async_extent->start +
781                                              async_extent->ram_size - 1,
782                                              async_extent->start +
783                                              async_extent->ram_size - 1,
784                                              &page_started, &nr_written, 0,
785                                              NULL);
786
787                         /* JDM XXX */
788
789                         /*
790                          * if page_started, cow_file_range inserted an
791                          * inline extent and took care of all the unlocking
792                          * and IO for us.  Otherwise, we need to submit
793                          * all those pages down to the drive.
794                          */
795                         if (!page_started && !ret)
796                                 extent_write_locked_range(inode,
797                                                   async_extent->start,
798                                                   async_extent->start +
799                                                   async_extent->ram_size - 1,
800                                                   WB_SYNC_ALL);
801                         else if (ret)
802                                 unlock_page(async_cow->locked_page);
803                         kfree(async_extent);
804                         cond_resched();
805                         continue;
806                 }
807
808                 lock_extent(io_tree, async_extent->start,
809                             async_extent->start + async_extent->ram_size - 1);
810
811                 ret = btrfs_reserve_extent(root, async_extent->ram_size,
812                                            async_extent->compressed_size,
813                                            async_extent->compressed_size,
814                                            0, alloc_hint, &ins, 1, 1);
815                 if (ret) {
816                         free_async_extent_pages(async_extent);
817
818                         if (ret == -ENOSPC) {
819                                 unlock_extent(io_tree, async_extent->start,
820                                               async_extent->start +
821                                               async_extent->ram_size - 1);
822
823                                 /*
824                                  * we need to redirty the pages if we decide to
825                                  * fallback to uncompressed IO, otherwise we
826                                  * will not submit these pages down to lower
827                                  * layers.
828                                  */
829                                 extent_range_redirty_for_io(inode,
830                                                 async_extent->start,
831                                                 async_extent->start +
832                                                 async_extent->ram_size - 1);
833
834                                 goto retry;
835                         }
836                         goto out_free;
837                 }
838                 /*
839                  * here we're doing allocation and writeback of the
840                  * compressed pages
841                  */
842                 em = create_io_em(inode, async_extent->start,
843                                   async_extent->ram_size, /* len */
844                                   async_extent->start, /* orig_start */
845                                   ins.objectid, /* block_start */
846                                   ins.offset, /* block_len */
847                                   ins.offset, /* orig_block_len */
848                                   async_extent->ram_size, /* ram_bytes */
849                                   async_extent->compress_type,
850                                   BTRFS_ORDERED_COMPRESSED);
851                 if (IS_ERR(em))
852                         /* ret value is not necessary due to void function */
853                         goto out_free_reserve;
854                 free_extent_map(em);
855
856                 ret = btrfs_add_ordered_extent_compress(inode,
857                                                 async_extent->start,
858                                                 ins.objectid,
859                                                 async_extent->ram_size,
860                                                 ins.offset,
861                                                 BTRFS_ORDERED_COMPRESSED,
862                                                 async_extent->compress_type);
863                 if (ret) {
864                         btrfs_drop_extent_cache(BTRFS_I(inode),
865                                                 async_extent->start,
866                                                 async_extent->start +
867                                                 async_extent->ram_size - 1, 0);
868                         goto out_free_reserve;
869                 }
870                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
871
872                 /*
873                  * clear dirty, set writeback and unlock the pages.
874                  */
875                 extent_clear_unlock_delalloc(inode, async_extent->start,
876                                 async_extent->start +
877                                 async_extent->ram_size - 1,
878                                 async_extent->start +
879                                 async_extent->ram_size - 1,
880                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
881                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
882                                 PAGE_SET_WRITEBACK);
883                 if (btrfs_submit_compressed_write(inode,
884                                     async_extent->start,
885                                     async_extent->ram_size,
886                                     ins.objectid,
887                                     ins.offset, async_extent->pages,
888                                     async_extent->nr_pages,
889                                     async_cow->write_flags)) {
890                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
891                         struct page *p = async_extent->pages[0];
892                         const u64 start = async_extent->start;
893                         const u64 end = start + async_extent->ram_size - 1;
894
895                         p->mapping = inode->i_mapping;
896                         tree->ops->writepage_end_io_hook(p, start, end,
897                                                          NULL, 0);
898                         p->mapping = NULL;
899                         extent_clear_unlock_delalloc(inode, start, end, end,
900                                                      NULL, 0,
901                                                      PAGE_END_WRITEBACK |
902                                                      PAGE_SET_ERROR);
903                         free_async_extent_pages(async_extent);
904                 }
905                 alloc_hint = ins.objectid + ins.offset;
906                 kfree(async_extent);
907                 cond_resched();
908         }
909         return;
910 out_free_reserve:
911         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
912         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
913 out_free:
914         extent_clear_unlock_delalloc(inode, async_extent->start,
915                                      async_extent->start +
916                                      async_extent->ram_size - 1,
917                                      async_extent->start +
918                                      async_extent->ram_size - 1,
919                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
920                                      EXTENT_DELALLOC_NEW |
921                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
922                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
923                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
924                                      PAGE_SET_ERROR);
925         free_async_extent_pages(async_extent);
926         kfree(async_extent);
927         goto again;
928 }
929
930 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
931                                       u64 num_bytes)
932 {
933         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
934         struct extent_map *em;
935         u64 alloc_hint = 0;
936
937         read_lock(&em_tree->lock);
938         em = search_extent_mapping(em_tree, start, num_bytes);
939         if (em) {
940                 /*
941                  * if block start isn't an actual block number then find the
942                  * first block in this inode and use that as a hint.  If that
943                  * block is also bogus then just don't worry about it.
944                  */
945                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
946                         free_extent_map(em);
947                         em = search_extent_mapping(em_tree, 0, 0);
948                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
949                                 alloc_hint = em->block_start;
950                         if (em)
951                                 free_extent_map(em);
952                 } else {
953                         alloc_hint = em->block_start;
954                         free_extent_map(em);
955                 }
956         }
957         read_unlock(&em_tree->lock);
958
959         return alloc_hint;
960 }
961
962 /*
963  * when extent_io.c finds a delayed allocation range in the file,
964  * the call backs end up in this code.  The basic idea is to
965  * allocate extents on disk for the range, and create ordered data structs
966  * in ram to track those extents.
967  *
968  * locked_page is the page that writepage had locked already.  We use
969  * it to make sure we don't do extra locks or unlocks.
970  *
971  * *page_started is set to one if we unlock locked_page and do everything
972  * required to start IO on it.  It may be clean and already done with
973  * IO when we return.
974  */
975 static noinline int cow_file_range(struct inode *inode,
976                                    struct page *locked_page,
977                                    u64 start, u64 end, u64 delalloc_end,
978                                    int *page_started, unsigned long *nr_written,
979                                    int unlock, struct btrfs_dedupe_hash *hash)
980 {
981         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
982         struct btrfs_root *root = BTRFS_I(inode)->root;
983         u64 alloc_hint = 0;
984         u64 num_bytes;
985         unsigned long ram_size;
986         u64 cur_alloc_size = 0;
987         u64 min_alloc_size;
988         u64 blocksize = fs_info->sectorsize;
989         struct btrfs_key ins;
990         struct extent_map *em;
991         unsigned clear_bits;
992         unsigned long page_ops;
993         bool extent_reserved = false;
994         int ret = 0;
995
996         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
997                 WARN_ON_ONCE(1);
998                 ret = -EINVAL;
999                 goto out_unlock;
1000         }
1001
1002         num_bytes = ALIGN(end - start + 1, blocksize);
1003         num_bytes = max(blocksize,  num_bytes);
1004         ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1005
1006         inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
1007
1008         if (start == 0) {
1009                 /* lets try to make an inline extent */
1010                 ret = cow_file_range_inline(inode, start, end, 0,
1011                                             BTRFS_COMPRESS_NONE, NULL);
1012                 if (ret == 0) {
1013                         /*
1014                          * We use DO_ACCOUNTING here because we need the
1015                          * delalloc_release_metadata to be run _after_ we drop
1016                          * our outstanding extent for clearing delalloc for this
1017                          * range.
1018                          */
1019                         extent_clear_unlock_delalloc(inode, start, end,
1020                                      delalloc_end, NULL,
1021                                      EXTENT_LOCKED | EXTENT_DELALLOC |
1022                                      EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1023                                      EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1024                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1025                                      PAGE_END_WRITEBACK);
1026                         *nr_written = *nr_written +
1027                              (end - start + PAGE_SIZE) / PAGE_SIZE;
1028                         *page_started = 1;
1029                         goto out;
1030                 } else if (ret < 0) {
1031                         goto out_unlock;
1032                 }
1033         }
1034
1035         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1036         btrfs_drop_extent_cache(BTRFS_I(inode), start,
1037                         start + num_bytes - 1, 0);
1038
1039         /*
1040          * Relocation relies on the relocated extents to have exactly the same
1041          * size as the original extents. Normally writeback for relocation data
1042          * extents follows a NOCOW path because relocation preallocates the
1043          * extents. However, due to an operation such as scrub turning a block
1044          * group to RO mode, it may fallback to COW mode, so we must make sure
1045          * an extent allocated during COW has exactly the requested size and can
1046          * not be split into smaller extents, otherwise relocation breaks and
1047          * fails during the stage where it updates the bytenr of file extent
1048          * items.
1049          */
1050         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1051                 min_alloc_size = num_bytes;
1052         else
1053                 min_alloc_size = fs_info->sectorsize;
1054
1055         while (num_bytes > 0) {
1056                 cur_alloc_size = num_bytes;
1057                 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1058                                            min_alloc_size, 0, alloc_hint,
1059                                            &ins, 1, 1);
1060                 if (ret < 0)
1061                         goto out_unlock;
1062                 cur_alloc_size = ins.offset;
1063                 extent_reserved = true;
1064
1065                 ram_size = ins.offset;
1066                 em = create_io_em(inode, start, ins.offset, /* len */
1067                                   start, /* orig_start */
1068                                   ins.objectid, /* block_start */
1069                                   ins.offset, /* block_len */
1070                                   ins.offset, /* orig_block_len */
1071                                   ram_size, /* ram_bytes */
1072                                   BTRFS_COMPRESS_NONE, /* compress_type */
1073                                   BTRFS_ORDERED_REGULAR /* type */);
1074                 if (IS_ERR(em)) {
1075                         ret = PTR_ERR(em);
1076                         goto out_reserve;
1077                 }
1078                 free_extent_map(em);
1079
1080                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1081                                                ram_size, cur_alloc_size, 0);
1082                 if (ret)
1083                         goto out_drop_extent_cache;
1084
1085                 if (root->root_key.objectid ==
1086                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1087                         ret = btrfs_reloc_clone_csums(inode, start,
1088                                                       cur_alloc_size);
1089                         /*
1090                          * Only drop cache here, and process as normal.
1091                          *
1092                          * We must not allow extent_clear_unlock_delalloc()
1093                          * at out_unlock label to free meta of this ordered
1094                          * extent, as its meta should be freed by
1095                          * btrfs_finish_ordered_io().
1096                          *
1097                          * So we must continue until @start is increased to
1098                          * skip current ordered extent.
1099                          */
1100                         if (ret)
1101                                 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1102                                                 start + ram_size - 1, 0);
1103                 }
1104
1105                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1106
1107                 /* we're not doing compressed IO, don't unlock the first
1108                  * page (which the caller expects to stay locked), don't
1109                  * clear any dirty bits and don't set any writeback bits
1110                  *
1111                  * Do set the Private2 bit so we know this page was properly
1112                  * setup for writepage
1113                  */
1114                 page_ops = unlock ? PAGE_UNLOCK : 0;
1115                 page_ops |= PAGE_SET_PRIVATE2;
1116
1117                 extent_clear_unlock_delalloc(inode, start,
1118                                              start + ram_size - 1,
1119                                              delalloc_end, locked_page,
1120                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1121                                              page_ops);
1122                 if (num_bytes < cur_alloc_size)
1123                         num_bytes = 0;
1124                 else
1125                         num_bytes -= cur_alloc_size;
1126                 alloc_hint = ins.objectid + ins.offset;
1127                 start += cur_alloc_size;
1128                 extent_reserved = false;
1129
1130                 /*
1131                  * btrfs_reloc_clone_csums() error, since start is increased
1132                  * extent_clear_unlock_delalloc() at out_unlock label won't
1133                  * free metadata of current ordered extent, we're OK to exit.
1134                  */
1135                 if (ret)
1136                         goto out_unlock;
1137         }
1138 out:
1139         return ret;
1140
1141 out_drop_extent_cache:
1142         btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1143 out_reserve:
1144         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1145         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1146 out_unlock:
1147         clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1148                 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1149         page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1150                 PAGE_END_WRITEBACK;
1151         /*
1152          * If we reserved an extent for our delalloc range (or a subrange) and
1153          * failed to create the respective ordered extent, then it means that
1154          * when we reserved the extent we decremented the extent's size from
1155          * the data space_info's bytes_may_use counter and incremented the
1156          * space_info's bytes_reserved counter by the same amount. We must make
1157          * sure extent_clear_unlock_delalloc() does not try to decrement again
1158          * the data space_info's bytes_may_use counter, therefore we do not pass
1159          * it the flag EXTENT_CLEAR_DATA_RESV.
1160          */
1161         if (extent_reserved) {
1162                 extent_clear_unlock_delalloc(inode, start,
1163                                              start + cur_alloc_size - 1,
1164                                              start + cur_alloc_size - 1,
1165                                              locked_page,
1166                                              clear_bits,
1167                                              page_ops);
1168                 start += cur_alloc_size;
1169                 if (start >= end)
1170                         goto out;
1171         }
1172         extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
1173                                      locked_page,
1174                                      clear_bits | EXTENT_CLEAR_DATA_RESV,
1175                                      page_ops);
1176         goto out;
1177 }
1178
1179 /*
1180  * work queue call back to started compression on a file and pages
1181  */
1182 static noinline void async_cow_start(struct btrfs_work *work)
1183 {
1184         struct async_cow *async_cow;
1185         int num_added = 0;
1186         async_cow = container_of(work, struct async_cow, work);
1187
1188         compress_file_range(async_cow->inode, async_cow->locked_page,
1189                             async_cow->start, async_cow->end, async_cow,
1190                             &num_added);
1191         if (num_added == 0) {
1192                 btrfs_add_delayed_iput(async_cow->inode);
1193                 async_cow->inode = NULL;
1194         }
1195 }
1196
1197 /*
1198  * work queue call back to submit previously compressed pages
1199  */
1200 static noinline void async_cow_submit(struct btrfs_work *work)
1201 {
1202         struct btrfs_fs_info *fs_info;
1203         struct async_cow *async_cow;
1204         struct btrfs_root *root;
1205         unsigned long nr_pages;
1206
1207         async_cow = container_of(work, struct async_cow, work);
1208
1209         root = async_cow->root;
1210         fs_info = root->fs_info;
1211         nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1212                 PAGE_SHIFT;
1213
1214         /* atomic_sub_return implies a barrier */
1215         if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1216             5 * SZ_1M)
1217                 cond_wake_up_nomb(&fs_info->async_submit_wait);
1218
1219         if (async_cow->inode)
1220                 submit_compressed_extents(async_cow->inode, async_cow);
1221 }
1222
1223 static noinline void async_cow_free(struct btrfs_work *work)
1224 {
1225         struct async_cow *async_cow;
1226         async_cow = container_of(work, struct async_cow, work);
1227         if (async_cow->inode)
1228                 btrfs_add_delayed_iput(async_cow->inode);
1229         kfree(async_cow);
1230 }
1231
1232 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1233                                 u64 start, u64 end, int *page_started,
1234                                 unsigned long *nr_written,
1235                                 unsigned int write_flags)
1236 {
1237         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1238         struct async_cow *async_cow;
1239         struct btrfs_root *root = BTRFS_I(inode)->root;
1240         unsigned long nr_pages;
1241         u64 cur_end;
1242
1243         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1244                          1, 0, NULL);
1245         while (start < end) {
1246                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1247                 BUG_ON(!async_cow); /* -ENOMEM */
1248                 async_cow->inode = igrab(inode);
1249                 async_cow->root = root;
1250                 async_cow->locked_page = locked_page;
1251                 async_cow->start = start;
1252                 async_cow->write_flags = write_flags;
1253
1254                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1255                     !btrfs_test_opt(fs_info, FORCE_COMPRESS))
1256                         cur_end = end;
1257                 else
1258                         cur_end = min(end, start + SZ_512K - 1);
1259
1260                 async_cow->end = cur_end;
1261                 INIT_LIST_HEAD(&async_cow->extents);
1262
1263                 btrfs_init_work(&async_cow->work,
1264                                 btrfs_delalloc_helper,
1265                                 async_cow_start, async_cow_submit,
1266                                 async_cow_free);
1267
1268                 nr_pages = (cur_end - start + PAGE_SIZE) >>
1269                         PAGE_SHIFT;
1270                 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1271
1272                 btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
1273
1274                 *nr_written += nr_pages;
1275                 start = cur_end + 1;
1276         }
1277         *page_started = 1;
1278         return 0;
1279 }
1280
1281 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1282                                         u64 bytenr, u64 num_bytes)
1283 {
1284         int ret;
1285         struct btrfs_ordered_sum *sums;
1286         LIST_HEAD(list);
1287
1288         ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1289                                        bytenr + num_bytes - 1, &list, 0);
1290         if (ret == 0 && list_empty(&list))
1291                 return 0;
1292
1293         while (!list_empty(&list)) {
1294                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1295                 list_del(&sums->list);
1296                 kfree(sums);
1297         }
1298         if (ret < 0)
1299                 return ret;
1300         return 1;
1301 }
1302
1303 /*
1304  * when nowcow writeback call back.  This checks for snapshots or COW copies
1305  * of the extents that exist in the file, and COWs the file as required.
1306  *
1307  * If no cow copies or snapshots exist, we write directly to the existing
1308  * blocks on disk
1309  */
1310 static noinline int run_delalloc_nocow(struct inode *inode,
1311                                        struct page *locked_page,
1312                               u64 start, u64 end, int *page_started, int force,
1313                               unsigned long *nr_written)
1314 {
1315         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1316         struct btrfs_root *root = BTRFS_I(inode)->root;
1317         struct extent_buffer *leaf;
1318         struct btrfs_path *path;
1319         struct btrfs_file_extent_item *fi;
1320         struct btrfs_key found_key;
1321         struct extent_map *em;
1322         u64 cow_start;
1323         u64 cur_offset;
1324         u64 extent_end;
1325         u64 extent_offset;
1326         u64 disk_bytenr;
1327         u64 num_bytes;
1328         u64 disk_num_bytes;
1329         u64 ram_bytes;
1330         int extent_type;
1331         int ret;
1332         int type;
1333         int nocow;
1334         int check_prev = 1;
1335         bool nolock;
1336         u64 ino = btrfs_ino(BTRFS_I(inode));
1337
1338         path = btrfs_alloc_path();
1339         if (!path) {
1340                 extent_clear_unlock_delalloc(inode, start, end, end,
1341                                              locked_page,
1342                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1343                                              EXTENT_DO_ACCOUNTING |
1344                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1345                                              PAGE_CLEAR_DIRTY |
1346                                              PAGE_SET_WRITEBACK |
1347                                              PAGE_END_WRITEBACK);
1348                 return -ENOMEM;
1349         }
1350
1351         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
1352
1353         cow_start = (u64)-1;
1354         cur_offset = start;
1355         while (1) {
1356                 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1357                                                cur_offset, 0);
1358                 if (ret < 0)
1359                         goto error;
1360                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1361                         leaf = path->nodes[0];
1362                         btrfs_item_key_to_cpu(leaf, &found_key,
1363                                               path->slots[0] - 1);
1364                         if (found_key.objectid == ino &&
1365                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1366                                 path->slots[0]--;
1367                 }
1368                 check_prev = 0;
1369 next_slot:
1370                 leaf = path->nodes[0];
1371                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1372                         ret = btrfs_next_leaf(root, path);
1373                         if (ret < 0) {
1374                                 if (cow_start != (u64)-1)
1375                                         cur_offset = cow_start;
1376                                 goto error;
1377                         }
1378                         if (ret > 0)
1379                                 break;
1380                         leaf = path->nodes[0];
1381                 }
1382
1383                 nocow = 0;
1384                 disk_bytenr = 0;
1385                 num_bytes = 0;
1386                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1387
1388                 if (found_key.objectid > ino)
1389                         break;
1390                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1391                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1392                         path->slots[0]++;
1393                         goto next_slot;
1394                 }
1395                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1396                     found_key.offset > end)
1397                         break;
1398
1399                 if (found_key.offset > cur_offset) {
1400                         extent_end = found_key.offset;
1401                         extent_type = 0;
1402                         goto out_check;
1403                 }
1404
1405                 fi = btrfs_item_ptr(leaf, path->slots[0],
1406                                     struct btrfs_file_extent_item);
1407                 extent_type = btrfs_file_extent_type(leaf, fi);
1408
1409                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1410                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1411                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1412                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1413                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1414                         extent_end = found_key.offset +
1415                                 btrfs_file_extent_num_bytes(leaf, fi);
1416                         disk_num_bytes =
1417                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1418                         if (extent_end <= start) {
1419                                 path->slots[0]++;
1420                                 goto next_slot;
1421                         }
1422                         if (disk_bytenr == 0)
1423                                 goto out_check;
1424                         if (btrfs_file_extent_compression(leaf, fi) ||
1425                             btrfs_file_extent_encryption(leaf, fi) ||
1426                             btrfs_file_extent_other_encoding(leaf, fi))
1427                                 goto out_check;
1428                         /*
1429                          * Do the same check as in btrfs_cross_ref_exist but
1430                          * without the unnecessary search.
1431                          */
1432                         if (!nolock &&
1433                             btrfs_file_extent_generation(leaf, fi) <=
1434                             btrfs_root_last_snapshot(&root->root_item))
1435                                 goto out_check;
1436                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1437                                 goto out_check;
1438                         if (btrfs_extent_readonly(fs_info, disk_bytenr))
1439                                 goto out_check;
1440                         ret = btrfs_cross_ref_exist(root, ino,
1441                                                     found_key.offset -
1442                                                     extent_offset, disk_bytenr);
1443                         if (ret) {
1444                                 /*
1445                                  * ret could be -EIO if the above fails to read
1446                                  * metadata.
1447                                  */
1448                                 if (ret < 0) {
1449                                         if (cow_start != (u64)-1)
1450                                                 cur_offset = cow_start;
1451                                         goto error;
1452                                 }
1453
1454                                 WARN_ON_ONCE(nolock);
1455                                 goto out_check;
1456                         }
1457                         disk_bytenr += extent_offset;
1458                         disk_bytenr += cur_offset - found_key.offset;
1459                         num_bytes = min(end + 1, extent_end) - cur_offset;
1460                         /*
1461                          * if there are pending snapshots for this root,
1462                          * we fall into common COW way.
1463                          */
1464                         if (!nolock && atomic_read(&root->snapshot_force_cow))
1465                                 goto out_check;
1466                         /*
1467                          * force cow if csum exists in the range.
1468                          * this ensure that csum for a given extent are
1469                          * either valid or do not exist.
1470                          */
1471                         ret = csum_exist_in_range(fs_info, disk_bytenr,
1472                                                   num_bytes);
1473                         if (ret) {
1474                                 /*
1475                                  * ret could be -EIO if the above fails to read
1476                                  * metadata.
1477                                  */
1478                                 if (ret < 0) {
1479                                         if (cow_start != (u64)-1)
1480                                                 cur_offset = cow_start;
1481                                         goto error;
1482                                 }
1483                                 WARN_ON_ONCE(nolock);
1484                                 goto out_check;
1485                         }
1486                         if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
1487                                 goto out_check;
1488                         nocow = 1;
1489                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1490                         extent_end = found_key.offset +
1491                                 btrfs_file_extent_ram_bytes(leaf, fi);
1492                         extent_end = ALIGN(extent_end,
1493                                            fs_info->sectorsize);
1494                 } else {
1495                         BUG_ON(1);
1496                 }
1497 out_check:
1498                 if (extent_end <= start) {
1499                         path->slots[0]++;
1500                         if (nocow)
1501                                 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1502                         goto next_slot;
1503                 }
1504                 if (!nocow) {
1505                         if (cow_start == (u64)-1)
1506                                 cow_start = cur_offset;
1507                         cur_offset = extent_end;
1508                         if (cur_offset > end)
1509                                 break;
1510                         path->slots[0]++;
1511                         goto next_slot;
1512                 }
1513
1514                 btrfs_release_path(path);
1515                 if (cow_start != (u64)-1) {
1516                         ret = cow_file_range(inode, locked_page,
1517                                              cow_start, found_key.offset - 1,
1518                                              end, page_started, nr_written, 1,
1519                                              NULL);
1520                         if (ret) {
1521                                 if (nocow)
1522                                         btrfs_dec_nocow_writers(fs_info,
1523                                                                 disk_bytenr);
1524                                 goto error;
1525                         }
1526                         cow_start = (u64)-1;
1527                 }
1528
1529                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1530                         u64 orig_start = found_key.offset - extent_offset;
1531
1532                         em = create_io_em(inode, cur_offset, num_bytes,
1533                                           orig_start,
1534                                           disk_bytenr, /* block_start */
1535                                           num_bytes, /* block_len */
1536                                           disk_num_bytes, /* orig_block_len */
1537                                           ram_bytes, BTRFS_COMPRESS_NONE,
1538                                           BTRFS_ORDERED_PREALLOC);
1539                         if (IS_ERR(em)) {
1540                                 if (nocow)
1541                                         btrfs_dec_nocow_writers(fs_info,
1542                                                                 disk_bytenr);
1543                                 ret = PTR_ERR(em);
1544                                 goto error;
1545                         }
1546                         free_extent_map(em);
1547                 }
1548
1549                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1550                         type = BTRFS_ORDERED_PREALLOC;
1551                 } else {
1552                         type = BTRFS_ORDERED_NOCOW;
1553                 }
1554
1555                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1556                                                num_bytes, num_bytes, type);
1557                 if (nocow)
1558                         btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1559                 BUG_ON(ret); /* -ENOMEM */
1560
1561                 if (root->root_key.objectid ==
1562                     BTRFS_DATA_RELOC_TREE_OBJECTID)
1563                         /*
1564                          * Error handled later, as we must prevent
1565                          * extent_clear_unlock_delalloc() in error handler
1566                          * from freeing metadata of created ordered extent.
1567                          */
1568                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1569                                                       num_bytes);
1570
1571                 extent_clear_unlock_delalloc(inode, cur_offset,
1572                                              cur_offset + num_bytes - 1, end,
1573                                              locked_page, EXTENT_LOCKED |
1574                                              EXTENT_DELALLOC |
1575                                              EXTENT_CLEAR_DATA_RESV,
1576                                              PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1577
1578                 cur_offset = extent_end;
1579
1580                 /*
1581                  * btrfs_reloc_clone_csums() error, now we're OK to call error
1582                  * handler, as metadata for created ordered extent will only
1583                  * be freed by btrfs_finish_ordered_io().
1584                  */
1585                 if (ret)
1586                         goto error;
1587                 if (cur_offset > end)
1588                         break;
1589         }
1590         btrfs_release_path(path);
1591
1592         if (cur_offset <= end && cow_start == (u64)-1)
1593                 cow_start = cur_offset;
1594
1595         if (cow_start != (u64)-1) {
1596                 cur_offset = end;
1597                 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1598                                      page_started, nr_written, 1, NULL);
1599                 if (ret)
1600                         goto error;
1601         }
1602
1603 error:
1604         if (ret && cur_offset < end)
1605                 extent_clear_unlock_delalloc(inode, cur_offset, end, end,
1606                                              locked_page, EXTENT_LOCKED |
1607                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1608                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1609                                              PAGE_CLEAR_DIRTY |
1610                                              PAGE_SET_WRITEBACK |
1611                                              PAGE_END_WRITEBACK);
1612         btrfs_free_path(path);
1613         return ret;
1614 }
1615
1616 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1617 {
1618
1619         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1620             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1621                 return 0;
1622
1623         /*
1624          * @defrag_bytes is a hint value, no spinlock held here,
1625          * if is not zero, it means the file is defragging.
1626          * Force cow if given extent needs to be defragged.
1627          */
1628         if (BTRFS_I(inode)->defrag_bytes &&
1629             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1630                            EXTENT_DEFRAG, 0, NULL))
1631                 return 1;
1632
1633         return 0;
1634 }
1635
1636 /*
1637  * Function to process delayed allocation (create CoW) for ranges which are
1638  * being touched for the first time.
1639  */
1640 int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
1641                 u64 start, u64 end, int *page_started, unsigned long *nr_written,
1642                 struct writeback_control *wbc)
1643 {
1644         struct inode *inode = private_data;
1645         int ret;
1646         int force_cow = need_force_cow(inode, start, end);
1647         unsigned int write_flags = wbc_to_write_flags(wbc);
1648
1649         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1650                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1651                                          page_started, 1, nr_written);
1652         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1653                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1654                                          page_started, 0, nr_written);
1655         } else if (!inode_can_compress(inode) ||
1656                    !inode_need_compress(inode, start, end)) {
1657                 ret = cow_file_range(inode, locked_page, start, end, end,
1658                                       page_started, nr_written, 1, NULL);
1659         } else {
1660                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1661                         &BTRFS_I(inode)->runtime_flags);
1662                 ret = cow_file_range_async(inode, locked_page, start, end,
1663                                            page_started, nr_written,
1664                                            write_flags);
1665         }
1666         if (ret)
1667                 btrfs_cleanup_ordered_extents(inode, locked_page, start,
1668                                               end - start + 1);
1669         return ret;
1670 }
1671
1672 static void btrfs_split_extent_hook(void *private_data,
1673                                     struct extent_state *orig, u64 split)
1674 {
1675         struct inode *inode = private_data;
1676         u64 size;
1677
1678         /* not delalloc, ignore it */
1679         if (!(orig->state & EXTENT_DELALLOC))
1680                 return;
1681
1682         size = orig->end - orig->start + 1;
1683         if (size > BTRFS_MAX_EXTENT_SIZE) {
1684                 u32 num_extents;
1685                 u64 new_size;
1686
1687                 /*
1688                  * See the explanation in btrfs_merge_extent_hook, the same
1689                  * applies here, just in reverse.
1690                  */
1691                 new_size = orig->end - split + 1;
1692                 num_extents = count_max_extents(new_size);
1693                 new_size = split - orig->start;
1694                 num_extents += count_max_extents(new_size);
1695                 if (count_max_extents(size) >= num_extents)
1696                         return;
1697         }
1698
1699         spin_lock(&BTRFS_I(inode)->lock);
1700         btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1701         spin_unlock(&BTRFS_I(inode)->lock);
1702 }
1703
1704 /*
1705  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1706  * extents so we can keep track of new extents that are just merged onto old
1707  * extents, such as when we are doing sequential writes, so we can properly
1708  * account for the metadata space we'll need.
1709  */
1710 static void btrfs_merge_extent_hook(void *private_data,
1711                                     struct extent_state *new,
1712                                     struct extent_state *other)
1713 {
1714         struct inode *inode = private_data;
1715         u64 new_size, old_size;
1716         u32 num_extents;
1717
1718         /* not delalloc, ignore it */
1719         if (!(other->state & EXTENT_DELALLOC))
1720                 return;
1721
1722         if (new->start > other->start)
1723                 new_size = new->end - other->start + 1;
1724         else
1725                 new_size = other->end - new->start + 1;
1726
1727         /* we're not bigger than the max, unreserve the space and go */
1728         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1729                 spin_lock(&BTRFS_I(inode)->lock);
1730                 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1731                 spin_unlock(&BTRFS_I(inode)->lock);
1732                 return;
1733         }
1734
1735         /*
1736          * We have to add up either side to figure out how many extents were
1737          * accounted for before we merged into one big extent.  If the number of
1738          * extents we accounted for is <= the amount we need for the new range
1739          * then we can return, otherwise drop.  Think of it like this
1740          *
1741          * [ 4k][MAX_SIZE]
1742          *
1743          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1744          * need 2 outstanding extents, on one side we have 1 and the other side
1745          * we have 1 so they are == and we can return.  But in this case
1746          *
1747          * [MAX_SIZE+4k][MAX_SIZE+4k]
1748          *
1749          * Each range on their own accounts for 2 extents, but merged together
1750          * they are only 3 extents worth of accounting, so we need to drop in
1751          * this case.
1752          */
1753         old_size = other->end - other->start + 1;
1754         num_extents = count_max_extents(old_size);
1755         old_size = new->end - new->start + 1;
1756         num_extents += count_max_extents(old_size);
1757         if (count_max_extents(new_size) >= num_extents)
1758                 return;
1759
1760         spin_lock(&BTRFS_I(inode)->lock);
1761         btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1762         spin_unlock(&BTRFS_I(inode)->lock);
1763 }
1764
1765 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1766                                       struct inode *inode)
1767 {
1768         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1769
1770         spin_lock(&root->delalloc_lock);
1771         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1772                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1773                               &root->delalloc_inodes);
1774                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1775                         &BTRFS_I(inode)->runtime_flags);
1776                 root->nr_delalloc_inodes++;
1777                 if (root->nr_delalloc_inodes == 1) {
1778                         spin_lock(&fs_info->delalloc_root_lock);
1779                         BUG_ON(!list_empty(&root->delalloc_root));
1780                         list_add_tail(&root->delalloc_root,
1781                                       &fs_info->delalloc_roots);
1782                         spin_unlock(&fs_info->delalloc_root_lock);
1783                 }
1784         }
1785         spin_unlock(&root->delalloc_lock);
1786 }
1787
1788
1789 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1790                                 struct btrfs_inode *inode)
1791 {
1792         struct btrfs_fs_info *fs_info = root->fs_info;
1793
1794         if (!list_empty(&inode->delalloc_inodes)) {
1795                 list_del_init(&inode->delalloc_inodes);
1796                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1797                           &inode->runtime_flags);
1798                 root->nr_delalloc_inodes--;
1799                 if (!root->nr_delalloc_inodes) {
1800                         ASSERT(list_empty(&root->delalloc_inodes));
1801                         spin_lock(&fs_info->delalloc_root_lock);
1802                         BUG_ON(list_empty(&root->delalloc_root));
1803                         list_del_init(&root->delalloc_root);
1804                         spin_unlock(&fs_info->delalloc_root_lock);
1805                 }
1806         }
1807 }
1808
1809 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1810                                      struct btrfs_inode *inode)
1811 {
1812         spin_lock(&root->delalloc_lock);
1813         __btrfs_del_delalloc_inode(root, inode);
1814         spin_unlock(&root->delalloc_lock);
1815 }
1816
1817 /*
1818  * extent_io.c set_bit_hook, used to track delayed allocation
1819  * bytes in this file, and to maintain the list of inodes that
1820  * have pending delalloc work to be done.
1821  */
1822 static void btrfs_set_bit_hook(void *private_data,
1823                                struct extent_state *state, unsigned *bits)
1824 {
1825         struct inode *inode = private_data;
1826
1827         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1828
1829         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1830                 WARN_ON(1);
1831         /*
1832          * set_bit and clear bit hooks normally require _irqsave/restore
1833          * but in this case, we are only testing for the DELALLOC
1834          * bit, which is only set or cleared with irqs on
1835          */
1836         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1837                 struct btrfs_root *root = BTRFS_I(inode)->root;
1838                 u64 len = state->end + 1 - state->start;
1839                 u32 num_extents = count_max_extents(len);
1840                 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1841
1842                 spin_lock(&BTRFS_I(inode)->lock);
1843                 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
1844                 spin_unlock(&BTRFS_I(inode)->lock);
1845
1846                 /* For sanity tests */
1847                 if (btrfs_is_testing(fs_info))
1848                         return;
1849
1850                 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
1851                                          fs_info->delalloc_batch);
1852                 spin_lock(&BTRFS_I(inode)->lock);
1853                 BTRFS_I(inode)->delalloc_bytes += len;
1854                 if (*bits & EXTENT_DEFRAG)
1855                         BTRFS_I(inode)->defrag_bytes += len;
1856                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1857                                          &BTRFS_I(inode)->runtime_flags))
1858                         btrfs_add_delalloc_inodes(root, inode);
1859                 spin_unlock(&BTRFS_I(inode)->lock);
1860         }
1861
1862         if (!(state->state & EXTENT_DELALLOC_NEW) &&
1863             (*bits & EXTENT_DELALLOC_NEW)) {
1864                 spin_lock(&BTRFS_I(inode)->lock);
1865                 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
1866                         state->start;
1867                 spin_unlock(&BTRFS_I(inode)->lock);
1868         }
1869 }
1870
1871 /*
1872  * extent_io.c clear_bit_hook, see set_bit_hook for why
1873  */
1874 static void btrfs_clear_bit_hook(void *private_data,
1875                                  struct extent_state *state,
1876                                  unsigned *bits)
1877 {
1878         struct btrfs_inode *inode = BTRFS_I((struct inode *)private_data);
1879         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1880         u64 len = state->end + 1 - state->start;
1881         u32 num_extents = count_max_extents(len);
1882
1883         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
1884                 spin_lock(&inode->lock);
1885                 inode->defrag_bytes -= len;
1886                 spin_unlock(&inode->lock);
1887         }
1888
1889         /*
1890          * set_bit and clear bit hooks normally require _irqsave/restore
1891          * but in this case, we are only testing for the DELALLOC
1892          * bit, which is only set or cleared with irqs on
1893          */
1894         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1895                 struct btrfs_root *root = inode->root;
1896                 bool do_list = !btrfs_is_free_space_inode(inode);
1897
1898                 spin_lock(&inode->lock);
1899                 btrfs_mod_outstanding_extents(inode, -num_extents);
1900                 spin_unlock(&inode->lock);
1901
1902                 /*
1903                  * We don't reserve metadata space for space cache inodes so we
1904                  * don't need to call dellalloc_release_metadata if there is an
1905                  * error.
1906                  */
1907                 if (*bits & EXTENT_CLEAR_META_RESV &&
1908                     root != fs_info->tree_root)
1909                         btrfs_delalloc_release_metadata(inode, len, false);
1910
1911                 /* For sanity tests. */
1912                 if (btrfs_is_testing(fs_info))
1913                         return;
1914
1915                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
1916                     do_list && !(state->state & EXTENT_NORESERVE) &&
1917                     (*bits & EXTENT_CLEAR_DATA_RESV))
1918                         btrfs_free_reserved_data_space_noquota(
1919                                         &inode->vfs_inode,
1920                                         state->start, len);
1921
1922                 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
1923                                          fs_info->delalloc_batch);
1924                 spin_lock(&inode->lock);
1925                 inode->delalloc_bytes -= len;
1926                 if (do_list && inode->delalloc_bytes == 0 &&
1927                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1928                                         &inode->runtime_flags))
1929                         btrfs_del_delalloc_inode(root, inode);
1930                 spin_unlock(&inode->lock);
1931         }
1932
1933         if ((state->state & EXTENT_DELALLOC_NEW) &&
1934             (*bits & EXTENT_DELALLOC_NEW)) {
1935                 spin_lock(&inode->lock);
1936                 ASSERT(inode->new_delalloc_bytes >= len);
1937                 inode->new_delalloc_bytes -= len;
1938                 spin_unlock(&inode->lock);
1939         }
1940 }
1941
1942 /*
1943  * Merge bio hook, this must check the chunk tree to make sure we don't create
1944  * bios that span stripes or chunks
1945  *
1946  * return 1 if page cannot be merged to bio
1947  * return 0 if page can be merged to bio
1948  * return error otherwise
1949  */
1950 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1951                          size_t size, struct bio *bio,
1952                          unsigned long bio_flags)
1953 {
1954         struct inode *inode = page->mapping->host;
1955         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1956         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1957         u64 length = 0;
1958         u64 map_length;
1959         int ret;
1960
1961         if (bio_flags & EXTENT_BIO_COMPRESSED)
1962                 return 0;
1963
1964         length = bio->bi_iter.bi_size;
1965         map_length = length;
1966         ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
1967                               NULL, 0);
1968         if (ret < 0)
1969                 return ret;
1970         if (map_length < length + size)
1971                 return 1;
1972         return 0;
1973 }
1974
1975 /*
1976  * in order to insert checksums into the metadata in large chunks,
1977  * we wait until bio submission time.   All the pages in the bio are
1978  * checksummed and sums are attached onto the ordered extent record.
1979  *
1980  * At IO completion time the cums attached on the ordered extent record
1981  * are inserted into the btree
1982  */
1983 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
1984                                     u64 bio_offset)
1985 {
1986         struct inode *inode = private_data;
1987         blk_status_t ret = 0;
1988
1989         ret = btrfs_csum_one_bio(inode, bio, 0, 0);
1990         BUG_ON(ret); /* -ENOMEM */
1991         return 0;
1992 }
1993
1994 /*
1995  * in order to insert checksums into the metadata in large chunks,
1996  * we wait until bio submission time.   All the pages in the bio are
1997  * checksummed and sums are attached onto the ordered extent record.
1998  *
1999  * At IO completion time the cums attached on the ordered extent record
2000  * are inserted into the btree
2001  */
2002 blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
2003                           int mirror_num)
2004 {
2005         struct inode *inode = private_data;
2006         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2007         blk_status_t ret;
2008
2009         ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
2010         if (ret) {
2011                 bio->bi_status = ret;
2012                 bio_endio(bio);
2013         }
2014         return ret;
2015 }
2016
2017 /*
2018  * extent_io.c submission hook. This does the right thing for csum calculation
2019  * on write, or reading the csums from the tree before a read.
2020  *
2021  * Rules about async/sync submit,
2022  * a) read:                             sync submit
2023  *
2024  * b) write without checksum:           sync submit
2025  *
2026  * c) write with checksum:
2027  *    c-1) if bio is issued by fsync:   sync submit
2028  *         (sync_writers != 0)
2029  *
2030  *    c-2) if root is reloc root:       sync submit
2031  *         (only in case of buffered IO)
2032  *
2033  *    c-3) otherwise:                   async submit
2034  */
2035 static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
2036                                  int mirror_num, unsigned long bio_flags,
2037                                  u64 bio_offset)
2038 {
2039         struct inode *inode = private_data;
2040         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2041         struct btrfs_root *root = BTRFS_I(inode)->root;
2042         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
2043         blk_status_t ret = 0;
2044         int skip_sum;
2045         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2046
2047         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
2048
2049         if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2050                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2051
2052         if (bio_op(bio) != REQ_OP_WRITE) {
2053                 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2054                 if (ret)
2055                         goto out;
2056
2057                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
2058                         ret = btrfs_submit_compressed_read(inode, bio,
2059                                                            mirror_num,
2060                                                            bio_flags);
2061                         goto out;
2062                 } else if (!skip_sum) {
2063                         ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2064                         if (ret)
2065                                 goto out;
2066                 }
2067                 goto mapit;
2068         } else if (async && !skip_sum) {
2069                 /* csum items have already been cloned */
2070                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2071                         goto mapit;
2072                 /* we're doing a write, do the async checksumming */
2073                 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
2074                                           bio_offset, inode,
2075                                           btrfs_submit_bio_start);
2076                 goto out;
2077         } else if (!skip_sum) {
2078                 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2079                 if (ret)
2080                         goto out;
2081         }
2082
2083 mapit:
2084         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
2085
2086 out:
2087         if (ret) {
2088                 bio->bi_status = ret;
2089                 bio_endio(bio);
2090         }
2091         return ret;
2092 }
2093
2094 /*
2095  * given a list of ordered sums record them in the inode.  This happens
2096  * at IO completion time based on sums calculated at bio submission time.
2097  */
2098 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
2099                              struct inode *inode, struct list_head *list)
2100 {
2101         struct btrfs_ordered_sum *sum;
2102         int ret;
2103
2104         list_for_each_entry(sum, list, list) {
2105                 trans->adding_csums = true;
2106                 ret = btrfs_csum_file_blocks(trans,
2107                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
2108                 trans->adding_csums = false;
2109                 if (ret)
2110                         return ret;
2111         }
2112         return 0;
2113 }
2114
2115 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2116                               unsigned int extra_bits,
2117                               struct extent_state **cached_state, int dedupe)
2118 {
2119         WARN_ON((end & (PAGE_SIZE - 1)) == 0);
2120         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2121                                    extra_bits, cached_state);
2122 }
2123
2124 /* see btrfs_writepage_start_hook for details on why this is required */
2125 struct btrfs_writepage_fixup {
2126         struct page *page;
2127         struct btrfs_work work;
2128 };
2129
2130 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2131 {
2132         struct btrfs_writepage_fixup *fixup;
2133         struct btrfs_ordered_extent *ordered;
2134         struct extent_state *cached_state = NULL;
2135         struct extent_changeset *data_reserved = NULL;
2136         struct page *page;
2137         struct inode *inode;
2138         u64 page_start;
2139         u64 page_end;
2140         int ret;
2141
2142         fixup = container_of(work, struct btrfs_writepage_fixup, work);
2143         page = fixup->page;
2144 again:
2145         lock_page(page);
2146         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2147                 ClearPageChecked(page);
2148                 goto out_page;
2149         }
2150
2151         inode = page->mapping->host;
2152         page_start = page_offset(page);
2153         page_end = page_offset(page) + PAGE_SIZE - 1;
2154
2155         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2156                          &cached_state);
2157
2158         /* already ordered? We're done */
2159         if (PagePrivate2(page))
2160                 goto out;
2161
2162         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
2163                                         PAGE_SIZE);
2164         if (ordered) {
2165                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2166                                      page_end, &cached_state);
2167                 unlock_page(page);
2168                 btrfs_start_ordered_extent(inode, ordered, 1);
2169                 btrfs_put_ordered_extent(ordered);
2170                 goto again;
2171         }
2172
2173         ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2174                                            PAGE_SIZE);
2175         if (ret) {
2176                 mapping_set_error(page->mapping, ret);
2177                 end_extent_writepage(page, ret, page_start, page_end);
2178                 ClearPageChecked(page);
2179                 goto out;
2180          }
2181
2182         ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2183                                         &cached_state, 0);
2184         if (ret) {
2185                 mapping_set_error(page->mapping, ret);
2186                 end_extent_writepage(page, ret, page_start, page_end);
2187                 ClearPageChecked(page);
2188                 goto out_reserved;
2189         }
2190
2191         ClearPageChecked(page);
2192         set_page_dirty(page);
2193 out_reserved:
2194         btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
2195         if (ret)
2196                 btrfs_delalloc_release_space(inode, data_reserved, page_start,
2197                                              PAGE_SIZE, true);
2198 out:
2199         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2200                              &cached_state);
2201 out_page:
2202         unlock_page(page);
2203         put_page(page);
2204         kfree(fixup);
2205         extent_changeset_free(data_reserved);
2206 }
2207
2208 /*
2209  * There are a few paths in the higher layers of the kernel that directly
2210  * set the page dirty bit without asking the filesystem if it is a
2211  * good idea.  This causes problems because we want to make sure COW
2212  * properly happens and the data=ordered rules are followed.
2213  *
2214  * In our case any range that doesn't have the ORDERED bit set
2215  * hasn't been properly setup for IO.  We kick off an async process
2216  * to fix it up.  The async helper will wait for ordered extents, set
2217  * the delalloc bit and make it safe to write the page.
2218  */
2219 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2220 {
2221         struct inode *inode = page->mapping->host;
2222         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2223         struct btrfs_writepage_fixup *fixup;
2224
2225         /* this page is properly in the ordered list */
2226         if (TestClearPagePrivate2(page))
2227                 return 0;
2228
2229         if (PageChecked(page))
2230                 return -EAGAIN;
2231
2232         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2233         if (!fixup)
2234                 return -EAGAIN;
2235
2236         SetPageChecked(page);
2237         get_page(page);
2238         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2239                         btrfs_writepage_fixup_worker, NULL, NULL);
2240         fixup->page = page;
2241         btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2242         return -EBUSY;
2243 }
2244
2245 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2246                                        struct inode *inode, u64 file_pos,
2247                                        u64 disk_bytenr, u64 disk_num_bytes,
2248                                        u64 num_bytes, u64 ram_bytes,
2249                                        u8 compression, u8 encryption,
2250                                        u16 other_encoding, int extent_type)
2251 {
2252         struct btrfs_root *root = BTRFS_I(inode)->root;
2253         struct btrfs_file_extent_item *fi;
2254         struct btrfs_path *path;
2255         struct extent_buffer *leaf;
2256         struct btrfs_key ins;
2257         u64 qg_released;
2258         int extent_inserted = 0;
2259         int ret;
2260
2261         path = btrfs_alloc_path();
2262         if (!path)
2263                 return -ENOMEM;
2264
2265         /*
2266          * we may be replacing one extent in the tree with another.
2267          * The new extent is pinned in the extent map, and we don't want
2268          * to drop it from the cache until it is completely in the btree.
2269          *
2270          * So, tell btrfs_drop_extents to leave this extent in the cache.
2271          * the caller is expected to unpin it and allow it to be merged
2272          * with the others.
2273          */
2274         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2275                                    file_pos + num_bytes, NULL, 0,
2276                                    1, sizeof(*fi), &extent_inserted);
2277         if (ret)
2278                 goto out;
2279
2280         if (!extent_inserted) {
2281                 ins.objectid = btrfs_ino(BTRFS_I(inode));
2282                 ins.offset = file_pos;
2283                 ins.type = BTRFS_EXTENT_DATA_KEY;
2284
2285                 path->leave_spinning = 1;
2286                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2287                                               sizeof(*fi));
2288                 if (ret)
2289                         goto out;
2290         }
2291         leaf = path->nodes[0];
2292         fi = btrfs_item_ptr(leaf, path->slots[0],
2293                             struct btrfs_file_extent_item);
2294         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2295         btrfs_set_file_extent_type(leaf, fi, extent_type);
2296         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2297         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2298         btrfs_set_file_extent_offset(leaf, fi, 0);
2299         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2300         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2301         btrfs_set_file_extent_compression(leaf, fi, compression);
2302         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2303         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2304
2305         btrfs_mark_buffer_dirty(leaf);
2306         btrfs_release_path(path);
2307
2308         inode_add_bytes(inode, num_bytes);
2309
2310         ins.objectid = disk_bytenr;
2311         ins.offset = disk_num_bytes;
2312         ins.type = BTRFS_EXTENT_ITEM_KEY;
2313
2314         /*
2315          * Release the reserved range from inode dirty range map, as it is
2316          * already moved into delayed_ref_head
2317          */
2318         ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2319         if (ret < 0)
2320                 goto out;
2321         qg_released = ret;
2322         ret = btrfs_alloc_reserved_file_extent(trans, root,
2323                                                btrfs_ino(BTRFS_I(inode)),
2324                                                file_pos, qg_released, &ins);
2325 out:
2326         btrfs_free_path(path);
2327
2328         return ret;
2329 }
2330
2331 /* snapshot-aware defrag */
2332 struct sa_defrag_extent_backref {
2333         struct rb_node node;
2334         struct old_sa_defrag_extent *old;
2335         u64 root_id;
2336         u64 inum;
2337         u64 file_pos;
2338         u64 extent_offset;
2339         u64 num_bytes;
2340         u64 generation;
2341 };
2342
2343 struct old_sa_defrag_extent {
2344         struct list_head list;
2345         struct new_sa_defrag_extent *new;
2346
2347         u64 extent_offset;
2348         u64 bytenr;
2349         u64 offset;
2350         u64 len;
2351         int count;
2352 };
2353
2354 struct new_sa_defrag_extent {
2355         struct rb_root root;
2356         struct list_head head;
2357         struct btrfs_path *path;
2358         struct inode *inode;
2359         u64 file_pos;
2360         u64 len;
2361         u64 bytenr;
2362         u64 disk_len;
2363         u8 compress_type;
2364 };
2365
2366 static int backref_comp(struct sa_defrag_extent_backref *b1,
2367                         struct sa_defrag_extent_backref *b2)
2368 {
2369         if (b1->root_id < b2->root_id)
2370                 return -1;
2371         else if (b1->root_id > b2->root_id)
2372                 return 1;
2373
2374         if (b1->inum < b2->inum)
2375                 return -1;
2376         else if (b1->inum > b2->inum)
2377                 return 1;
2378
2379         if (b1->file_pos < b2->file_pos)
2380                 return -1;
2381         else if (b1->file_pos > b2->file_pos)
2382                 return 1;
2383
2384         /*
2385          * [------------------------------] ===> (a range of space)
2386          *     |<--->|   |<---->| =============> (fs/file tree A)
2387          * |<---------------------------->| ===> (fs/file tree B)
2388          *
2389          * A range of space can refer to two file extents in one tree while
2390          * refer to only one file extent in another tree.
2391          *
2392          * So we may process a disk offset more than one time(two extents in A)
2393          * and locate at the same extent(one extent in B), then insert two same
2394          * backrefs(both refer to the extent in B).
2395          */
2396         return 0;
2397 }
2398
2399 static void backref_insert(struct rb_root *root,
2400                            struct sa_defrag_extent_backref *backref)
2401 {
2402         struct rb_node **p = &root->rb_node;
2403         struct rb_node *parent = NULL;
2404         struct sa_defrag_extent_backref *entry;
2405         int ret;
2406
2407         while (*p) {
2408                 parent = *p;
2409                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2410
2411                 ret = backref_comp(backref, entry);
2412                 if (ret < 0)
2413                         p = &(*p)->rb_left;
2414                 else
2415                         p = &(*p)->rb_right;
2416         }
2417
2418         rb_link_node(&backref->node, parent, p);
2419         rb_insert_color(&backref->node, root);
2420 }
2421
2422 /*
2423  * Note the backref might has changed, and in this case we just return 0.
2424  */
2425 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2426                                        void *ctx)
2427 {
2428         struct btrfs_file_extent_item *extent;
2429         struct old_sa_defrag_extent *old = ctx;
2430         struct new_sa_defrag_extent *new = old->new;
2431         struct btrfs_path *path = new->path;
2432         struct btrfs_key key;
2433         struct btrfs_root *root;
2434         struct sa_defrag_extent_backref *backref;
2435         struct extent_buffer *leaf;
2436         struct inode *inode = new->inode;
2437         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2438         int slot;
2439         int ret;
2440         u64 extent_offset;
2441         u64 num_bytes;
2442
2443         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2444             inum == btrfs_ino(BTRFS_I(inode)))
2445                 return 0;
2446
2447         key.objectid = root_id;
2448         key.type = BTRFS_ROOT_ITEM_KEY;
2449         key.offset = (u64)-1;
2450
2451         root = btrfs_read_fs_root_no_name(fs_info, &key);
2452         if (IS_ERR(root)) {
2453                 if (PTR_ERR(root) == -ENOENT)
2454                         return 0;
2455                 WARN_ON(1);
2456                 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2457                          inum, offset, root_id);
2458                 return PTR_ERR(root);
2459         }
2460
2461         key.objectid = inum;
2462         key.type = BTRFS_EXTENT_DATA_KEY;
2463         if (offset > (u64)-1 << 32)
2464                 key.offset = 0;
2465         else
2466                 key.offset = offset;
2467
2468         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2469         if (WARN_ON(ret < 0))
2470                 return ret;
2471         ret = 0;
2472
2473         while (1) {
2474                 cond_resched();
2475
2476                 leaf = path->nodes[0];
2477                 slot = path->slots[0];
2478
2479                 if (slot >= btrfs_header_nritems(leaf)) {
2480                         ret = btrfs_next_leaf(root, path);
2481                         if (ret < 0) {
2482                                 goto out;
2483                         } else if (ret > 0) {
2484                                 ret = 0;
2485                                 goto out;
2486                         }
2487                         continue;
2488                 }
2489
2490                 path->slots[0]++;
2491
2492                 btrfs_item_key_to_cpu(leaf, &key, slot);
2493
2494                 if (key.objectid > inum)
2495                         goto out;
2496
2497                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2498                         continue;
2499
2500                 extent = btrfs_item_ptr(leaf, slot,
2501                                         struct btrfs_file_extent_item);
2502
2503                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2504                         continue;
2505
2506                 /*
2507                  * 'offset' refers to the exact key.offset,
2508                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2509                  * (key.offset - extent_offset).
2510                  */
2511                 if (key.offset != offset)
2512                         continue;
2513
2514                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2515                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2516
2517                 if (extent_offset >= old->extent_offset + old->offset +
2518                     old->len || extent_offset + num_bytes <=
2519                     old->extent_offset + old->offset)
2520                         continue;
2521                 break;
2522         }
2523
2524         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2525         if (!backref) {
2526                 ret = -ENOENT;
2527                 goto out;
2528         }
2529
2530         backref->root_id = root_id;
2531         backref->inum = inum;
2532         backref->file_pos = offset;
2533         backref->num_bytes = num_bytes;
2534         backref->extent_offset = extent_offset;
2535         backref->generation = btrfs_file_extent_generation(leaf, extent);
2536         backref->old = old;
2537         backref_insert(&new->root, backref);
2538         old->count++;
2539 out:
2540         btrfs_release_path(path);
2541         WARN_ON(ret);
2542         return ret;
2543 }
2544
2545 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2546                                    struct new_sa_defrag_extent *new)
2547 {
2548         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2549         struct old_sa_defrag_extent *old, *tmp;
2550         int ret;
2551
2552         new->path = path;
2553
2554         list_for_each_entry_safe(old, tmp, &new->head, list) {
2555                 ret = iterate_inodes_from_logical(old->bytenr +
2556                                                   old->extent_offset, fs_info,
2557                                                   path, record_one_backref,
2558                                                   old, false);
2559                 if (ret < 0 && ret != -ENOENT)
2560                         return false;
2561
2562                 /* no backref to be processed for this extent */
2563                 if (!old->count) {
2564                         list_del(&old->list);
2565                         kfree(old);
2566                 }
2567         }
2568
2569         if (list_empty(&new->head))
2570                 return false;
2571
2572         return true;
2573 }
2574
2575 static int relink_is_mergable(struct extent_buffer *leaf,
2576                               struct btrfs_file_extent_item *fi,
2577                               struct new_sa_defrag_extent *new)
2578 {
2579         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2580                 return 0;
2581
2582         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2583                 return 0;
2584
2585         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2586                 return 0;
2587
2588         if (btrfs_file_extent_encryption(leaf, fi) ||
2589             btrfs_file_extent_other_encoding(leaf, fi))
2590                 return 0;
2591
2592         return 1;
2593 }
2594
2595 /*
2596  * Note the backref might has changed, and in this case we just return 0.
2597  */
2598 static noinline int relink_extent_backref(struct btrfs_path *path,
2599                                  struct sa_defrag_extent_backref *prev,
2600                                  struct sa_defrag_extent_backref *backref)
2601 {
2602         struct btrfs_file_extent_item *extent;
2603         struct btrfs_file_extent_item *item;
2604         struct btrfs_ordered_extent *ordered;
2605         struct btrfs_trans_handle *trans;
2606         struct btrfs_root *root;
2607         struct btrfs_key key;
2608         struct extent_buffer *leaf;
2609         struct old_sa_defrag_extent *old = backref->old;
2610         struct new_sa_defrag_extent *new = old->new;
2611         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2612         struct inode *inode;
2613         struct extent_state *cached = NULL;
2614         int ret = 0;
2615         u64 start;
2616         u64 len;
2617         u64 lock_start;
2618         u64 lock_end;
2619         bool merge = false;
2620         int index;
2621
2622         if (prev && prev->root_id == backref->root_id &&
2623             prev->inum == backref->inum &&
2624             prev->file_pos + prev->num_bytes == backref->file_pos)
2625                 merge = true;
2626
2627         /* step 1: get root */
2628         key.objectid = backref->root_id;
2629         key.type = BTRFS_ROOT_ITEM_KEY;
2630         key.offset = (u64)-1;
2631
2632         index = srcu_read_lock(&fs_info->subvol_srcu);
2633
2634         root = btrfs_read_fs_root_no_name(fs_info, &key);
2635         if (IS_ERR(root)) {
2636                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2637                 if (PTR_ERR(root) == -ENOENT)
2638                         return 0;
2639                 return PTR_ERR(root);
2640         }
2641
2642         if (btrfs_root_readonly(root)) {
2643                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2644                 return 0;
2645         }
2646
2647         /* step 2: get inode */
2648         key.objectid = backref->inum;
2649         key.type = BTRFS_INODE_ITEM_KEY;
2650         key.offset = 0;
2651
2652         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2653         if (IS_ERR(inode)) {
2654                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2655                 return 0;
2656         }
2657
2658         srcu_read_unlock(&fs_info->subvol_srcu, index);
2659
2660         /* step 3: relink backref */
2661         lock_start = backref->file_pos;
2662         lock_end = backref->file_pos + backref->num_bytes - 1;
2663         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2664                          &cached);
2665
2666         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2667         if (ordered) {
2668                 btrfs_put_ordered_extent(ordered);
2669                 goto out_unlock;
2670         }
2671
2672         trans = btrfs_join_transaction(root);
2673         if (IS_ERR(trans)) {
2674                 ret = PTR_ERR(trans);
2675                 goto out_unlock;
2676         }
2677
2678         key.objectid = backref->inum;
2679         key.type = BTRFS_EXTENT_DATA_KEY;
2680         key.offset = backref->file_pos;
2681
2682         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2683         if (ret < 0) {
2684                 goto out_free_path;
2685         } else if (ret > 0) {
2686                 ret = 0;
2687                 goto out_free_path;
2688         }
2689
2690         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2691                                 struct btrfs_file_extent_item);
2692
2693         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2694             backref->generation)
2695                 goto out_free_path;
2696
2697         btrfs_release_path(path);
2698
2699         start = backref->file_pos;
2700         if (backref->extent_offset < old->extent_offset + old->offset)
2701                 start += old->extent_offset + old->offset -
2702                          backref->extent_offset;
2703
2704         len = min(backref->extent_offset + backref->num_bytes,
2705                   old->extent_offset + old->offset + old->len);
2706         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2707
2708         ret = btrfs_drop_extents(trans, root, inode, start,
2709                                  start + len, 1);
2710         if (ret)
2711                 goto out_free_path;
2712 again:
2713         key.objectid = btrfs_ino(BTRFS_I(inode));
2714         key.type = BTRFS_EXTENT_DATA_KEY;
2715         key.offset = start;
2716
2717         path->leave_spinning = 1;
2718         if (merge) {
2719                 struct btrfs_file_extent_item *fi;
2720                 u64 extent_len;
2721                 struct btrfs_key found_key;
2722
2723                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2724                 if (ret < 0)
2725                         goto out_free_path;
2726
2727                 path->slots[0]--;
2728                 leaf = path->nodes[0];
2729                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2730
2731                 fi = btrfs_item_ptr(leaf, path->slots[0],
2732                                     struct btrfs_file_extent_item);
2733                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2734
2735                 if (extent_len + found_key.offset == start &&
2736                     relink_is_mergable(leaf, fi, new)) {
2737                         btrfs_set_file_extent_num_bytes(leaf, fi,
2738                                                         extent_len + len);
2739                         btrfs_mark_buffer_dirty(leaf);
2740                         inode_add_bytes(inode, len);
2741
2742                         ret = 1;
2743                         goto out_free_path;
2744                 } else {
2745                         merge = false;
2746                         btrfs_release_path(path);
2747                         goto again;
2748                 }
2749         }
2750
2751         ret = btrfs_insert_empty_item(trans, root, path, &key,
2752                                         sizeof(*extent));
2753         if (ret) {
2754                 btrfs_abort_transaction(trans, ret);
2755                 goto out_free_path;
2756         }
2757
2758         leaf = path->nodes[0];
2759         item = btrfs_item_ptr(leaf, path->slots[0],
2760                                 struct btrfs_file_extent_item);
2761         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2762         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2763         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2764         btrfs_set_file_extent_num_bytes(leaf, item, len);
2765         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2766         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2767         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2768         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2769         btrfs_set_file_extent_encryption(leaf, item, 0);
2770         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2771
2772         btrfs_mark_buffer_dirty(leaf);
2773         inode_add_bytes(inode, len);
2774         btrfs_release_path(path);
2775
2776         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2777                         new->disk_len, 0,
2778                         backref->root_id, backref->inum,
2779                         new->file_pos); /* start - extent_offset */
2780         if (ret) {
2781                 btrfs_abort_transaction(trans, ret);
2782                 goto out_free_path;
2783         }
2784
2785         ret = 1;
2786 out_free_path:
2787         btrfs_release_path(path);
2788         path->leave_spinning = 0;
2789         btrfs_end_transaction(trans);
2790 out_unlock:
2791         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2792                              &cached);
2793         iput(inode);
2794         return ret;
2795 }
2796
2797 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2798 {
2799         struct old_sa_defrag_extent *old, *tmp;
2800
2801         if (!new)
2802                 return;
2803
2804         list_for_each_entry_safe(old, tmp, &new->head, list) {
2805                 kfree(old);
2806         }
2807         kfree(new);
2808 }
2809
2810 static void relink_file_extents(struct new_sa_defrag_extent *new)
2811 {
2812         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2813         struct btrfs_path *path;
2814         struct sa_defrag_extent_backref *backref;
2815         struct sa_defrag_extent_backref *prev = NULL;
2816         struct inode *inode;
2817         struct rb_node *node;
2818         int ret;
2819
2820         inode = new->inode;
2821
2822         path = btrfs_alloc_path();
2823         if (!path)
2824                 return;
2825
2826         if (!record_extent_backrefs(path, new)) {
2827                 btrfs_free_path(path);
2828                 goto out;
2829         }
2830         btrfs_release_path(path);
2831
2832         while (1) {
2833                 node = rb_first(&new->root);
2834                 if (!node)
2835                         break;
2836                 rb_erase(node, &new->root);
2837
2838                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2839
2840                 ret = relink_extent_backref(path, prev, backref);
2841                 WARN_ON(ret < 0);
2842
2843                 kfree(prev);
2844
2845                 if (ret == 1)
2846                         prev = backref;
2847                 else
2848                         prev = NULL;
2849                 cond_resched();
2850         }
2851         kfree(prev);
2852
2853         btrfs_free_path(path);
2854 out:
2855         free_sa_defrag_extent(new);
2856
2857         atomic_dec(&fs_info->defrag_running);
2858         wake_up(&fs_info->transaction_wait);
2859 }
2860
2861 static struct new_sa_defrag_extent *
2862 record_old_file_extents(struct inode *inode,
2863                         struct btrfs_ordered_extent *ordered)
2864 {
2865         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2866         struct btrfs_root *root = BTRFS_I(inode)->root;
2867         struct btrfs_path *path;
2868         struct btrfs_key key;
2869         struct old_sa_defrag_extent *old;
2870         struct new_sa_defrag_extent *new;
2871         int ret;
2872
2873         new = kmalloc(sizeof(*new), GFP_NOFS);
2874         if (!new)
2875                 return NULL;
2876
2877         new->inode = inode;
2878         new->file_pos = ordered->file_offset;
2879         new->len = ordered->len;
2880         new->bytenr = ordered->start;
2881         new->disk_len = ordered->disk_len;
2882         new->compress_type = ordered->compress_type;
2883         new->root = RB_ROOT;
2884         INIT_LIST_HEAD(&new->head);
2885
2886         path = btrfs_alloc_path();
2887         if (!path)
2888                 goto out_kfree;
2889
2890         key.objectid = btrfs_ino(BTRFS_I(inode));
2891         key.type = BTRFS_EXTENT_DATA_KEY;
2892         key.offset = new->file_pos;
2893
2894         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2895         if (ret < 0)
2896                 goto out_free_path;
2897         if (ret > 0 && path->slots[0] > 0)
2898                 path->slots[0]--;
2899
2900         /* find out all the old extents for the file range */
2901         while (1) {
2902                 struct btrfs_file_extent_item *extent;
2903                 struct extent_buffer *l;
2904                 int slot;
2905                 u64 num_bytes;
2906                 u64 offset;
2907                 u64 end;
2908                 u64 disk_bytenr;
2909                 u64 extent_offset;
2910
2911                 l = path->nodes[0];
2912                 slot = path->slots[0];
2913
2914                 if (slot >= btrfs_header_nritems(l)) {
2915                         ret = btrfs_next_leaf(root, path);
2916                         if (ret < 0)
2917                                 goto out_free_path;
2918                         else if (ret > 0)
2919                                 break;
2920                         continue;
2921                 }
2922
2923                 btrfs_item_key_to_cpu(l, &key, slot);
2924
2925                 if (key.objectid != btrfs_ino(BTRFS_I(inode)))
2926                         break;
2927                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2928                         break;
2929                 if (key.offset >= new->file_pos + new->len)
2930                         break;
2931
2932                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2933
2934                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2935                 if (key.offset + num_bytes < new->file_pos)
2936                         goto next;
2937
2938                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2939                 if (!disk_bytenr)
2940                         goto next;
2941
2942                 extent_offset = btrfs_file_extent_offset(l, extent);
2943
2944                 old = kmalloc(sizeof(*old), GFP_NOFS);
2945                 if (!old)
2946                         goto out_free_path;
2947
2948                 offset = max(new->file_pos, key.offset);
2949                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2950
2951                 old->bytenr = disk_bytenr;
2952                 old->extent_offset = extent_offset;
2953                 old->offset = offset - key.offset;
2954                 old->len = end - offset;
2955                 old->new = new;
2956                 old->count = 0;
2957                 list_add_tail(&old->list, &new->head);
2958 next:
2959                 path->slots[0]++;
2960                 cond_resched();
2961         }
2962
2963         btrfs_free_path(path);
2964         atomic_inc(&fs_info->defrag_running);
2965
2966         return new;
2967
2968 out_free_path:
2969         btrfs_free_path(path);
2970 out_kfree:
2971         free_sa_defrag_extent(new);
2972         return NULL;
2973 }
2974
2975 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2976                                          u64 start, u64 len)
2977 {
2978         struct btrfs_block_group_cache *cache;
2979
2980         cache = btrfs_lookup_block_group(fs_info, start);
2981         ASSERT(cache);
2982
2983         spin_lock(&cache->lock);
2984         cache->delalloc_bytes -= len;
2985         spin_unlock(&cache->lock);
2986
2987         btrfs_put_block_group(cache);
2988 }
2989
2990 /* as ordered data IO finishes, this gets called so we can finish
2991  * an ordered extent if the range of bytes in the file it covers are
2992  * fully written.
2993  */
2994 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2995 {
2996         struct inode *inode = ordered_extent->inode;
2997         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2998         struct btrfs_root *root = BTRFS_I(inode)->root;
2999         struct btrfs_trans_handle *trans = NULL;
3000         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3001         struct extent_state *cached_state = NULL;
3002         struct new_sa_defrag_extent *new = NULL;
3003         int compress_type = 0;
3004         int ret = 0;
3005         u64 logical_len = ordered_extent->len;
3006         bool nolock;
3007         bool truncated = false;
3008         bool range_locked = false;
3009         bool clear_new_delalloc_bytes = false;
3010         bool clear_reserved_extent = true;
3011
3012         if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3013             !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3014             !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
3015                 clear_new_delalloc_bytes = true;
3016
3017         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
3018
3019         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3020                 ret = -EIO;
3021                 goto out;
3022         }
3023
3024         btrfs_free_io_failure_record(BTRFS_I(inode),
3025                         ordered_extent->file_offset,
3026                         ordered_extent->file_offset +
3027                         ordered_extent->len - 1);
3028
3029         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3030                 truncated = true;
3031                 logical_len = ordered_extent->truncated_len;
3032                 /* Truncated the entire extent, don't bother adding */
3033                 if (!logical_len)
3034                         goto out;
3035         }
3036
3037         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3038                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3039
3040                 /*
3041                  * For mwrite(mmap + memset to write) case, we still reserve
3042                  * space for NOCOW range.
3043                  * As NOCOW won't cause a new delayed ref, just free the space
3044                  */
3045                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3046                                        ordered_extent->len);
3047                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3048                 if (nolock)
3049                         trans = btrfs_join_transaction_nolock(root);
3050                 else
3051                         trans = btrfs_join_transaction(root);
3052                 if (IS_ERR(trans)) {
3053                         ret = PTR_ERR(trans);
3054                         trans = NULL;
3055                         goto out;
3056                 }
3057                 trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3058                 ret = btrfs_update_inode_fallback(trans, root, inode);
3059                 if (ret) /* -ENOMEM or corruption */
3060                         btrfs_abort_transaction(trans, ret);
3061                 goto out;
3062         }
3063
3064         range_locked = true;
3065         lock_extent_bits(io_tree, ordered_extent->file_offset,
3066                          ordered_extent->file_offset + ordered_extent->len - 1,
3067                          &cached_state);
3068
3069         ret = test_range_bit(io_tree, ordered_extent->file_offset,
3070                         ordered_extent->file_offset + ordered_extent->len - 1,
3071                         EXTENT_DEFRAG, 0, cached_state);
3072         if (ret) {
3073                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
3074                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
3075                         /* the inode is shared */
3076                         new = record_old_file_extents(inode, ordered_extent);
3077
3078                 clear_extent_bit(io_tree, ordered_extent->file_offset,
3079                         ordered_extent->file_offset + ordered_extent->len - 1,
3080                         EXTENT_DEFRAG, 0, 0, &cached_state);
3081         }
3082
3083         if (nolock)
3084                 trans = btrfs_join_transaction_nolock(root);
3085         else
3086                 trans = btrfs_join_transaction(root);
3087         if (IS_ERR(trans)) {
3088                 ret = PTR_ERR(trans);
3089                 trans = NULL;
3090                 goto out;
3091         }
3092
3093         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3094
3095         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3096                 compress_type = ordered_extent->compress_type;
3097         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3098                 BUG_ON(compress_type);
3099                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3100                                        ordered_extent->len);
3101                 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3102                                                 ordered_extent->file_offset,
3103                                                 ordered_extent->file_offset +
3104                                                 logical_len);
3105         } else {
3106                 BUG_ON(root == fs_info->tree_root);
3107                 ret = insert_reserved_file_extent(trans, inode,
3108                                                 ordered_extent->file_offset,
3109                                                 ordered_extent->start,
3110                                                 ordered_extent->disk_len,
3111                                                 logical_len, logical_len,
3112                                                 compress_type, 0, 0,
3113                                                 BTRFS_FILE_EXTENT_REG);
3114                 if (!ret) {
3115                         clear_reserved_extent = false;
3116                         btrfs_release_delalloc_bytes(fs_info,
3117                                                      ordered_extent->start,
3118                                                      ordered_extent->disk_len);
3119                 }
3120         }
3121         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
3122                            ordered_extent->file_offset, ordered_extent->len,
3123                            trans->transid);
3124         if (ret < 0) {
3125                 btrfs_abort_transaction(trans, ret);
3126                 goto out;
3127         }
3128
3129         ret = add_pending_csums(trans, inode, &ordered_extent->list);
3130         if (ret) {
3131                 btrfs_abort_transaction(trans, ret);
3132                 goto out;
3133         }
3134
3135         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3136         ret = btrfs_update_inode_fallback(trans, root, inode);
3137         if (ret) { /* -ENOMEM or corruption */
3138                 btrfs_abort_transaction(trans, ret);
3139                 goto out;
3140         }
3141         ret = 0;
3142 out:
3143         if (range_locked || clear_new_delalloc_bytes) {
3144                 unsigned int clear_bits = 0;
3145
3146                 if (range_locked)
3147                         clear_bits |= EXTENT_LOCKED;
3148                 if (clear_new_delalloc_bytes)
3149                         clear_bits |= EXTENT_DELALLOC_NEW;
3150                 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3151                                  ordered_extent->file_offset,
3152                                  ordered_extent->file_offset +
3153                                  ordered_extent->len - 1,
3154                                  clear_bits,
3155                                  (clear_bits & EXTENT_LOCKED) ? 1 : 0,
3156                                  0, &cached_state);
3157         }
3158
3159         if (trans)
3160                 btrfs_end_transaction(trans);
3161
3162         if (ret || truncated) {
3163                 u64 start, end;
3164
3165                 /*
3166                  * If we failed to finish this ordered extent for any reason we
3167                  * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3168                  * extent, and mark the inode with the error if it wasn't
3169                  * already set.  Any error during writeback would have already
3170                  * set the mapping error, so we need to set it if we're the ones
3171                  * marking this ordered extent as failed.
3172                  */
3173                 if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3174                                              &ordered_extent->flags))
3175                         mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3176
3177                 if (truncated)
3178                         start = ordered_extent->file_offset + logical_len;
3179                 else
3180                         start = ordered_extent->file_offset;
3181                 end = ordered_extent->file_offset + ordered_extent->len - 1;
3182                 clear_extent_uptodate(io_tree, start, end, NULL);
3183
3184                 /* Drop the cache for the part of the extent we didn't write. */
3185                 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3186
3187                 /*
3188                  * If the ordered extent had an IOERR or something else went
3189                  * wrong we need to return the space for this ordered extent
3190                  * back to the allocator.  We only free the extent in the
3191                  * truncated case if we didn't write out the extent at all.
3192                  *
3193                  * If we made it past insert_reserved_file_extent before we
3194                  * errored out then we don't need to do this as the accounting
3195                  * has already been done.
3196                  */
3197                 if ((ret || !logical_len) &&
3198                     clear_reserved_extent &&
3199                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3200                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
3201                         btrfs_free_reserved_extent(fs_info,
3202                                                    ordered_extent->start,
3203                                                    ordered_extent->disk_len, 1);
3204         }
3205
3206
3207         /*
3208          * This needs to be done to make sure anybody waiting knows we are done
3209          * updating everything for this ordered extent.
3210          */
3211         btrfs_remove_ordered_extent(inode, ordered_extent);
3212
3213         /* for snapshot-aware defrag */
3214         if (new) {
3215                 if (ret) {
3216                         free_sa_defrag_extent(new);
3217                         atomic_dec(&fs_info->defrag_running);
3218                 } else {
3219                         relink_file_extents(new);
3220                 }
3221         }
3222
3223         /* once for us */
3224         btrfs_put_ordered_extent(ordered_extent);
3225         /* once for the tree */
3226         btrfs_put_ordered_extent(ordered_extent);
3227
3228         return ret;
3229 }
3230
3231 static void finish_ordered_fn(struct btrfs_work *work)
3232 {
3233         struct btrfs_ordered_extent *ordered_extent;
3234         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3235         btrfs_finish_ordered_io(ordered_extent);
3236 }
3237
3238 static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3239                                 struct extent_state *state, int uptodate)
3240 {
3241         struct inode *inode = page->mapping->host;
3242         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3243         struct btrfs_ordered_extent *ordered_extent = NULL;
3244         struct btrfs_workqueue *wq;
3245         btrfs_work_func_t func;
3246
3247         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3248
3249         ClearPagePrivate2(page);
3250         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3251                                             end - start + 1, uptodate))
3252                 return;
3253
3254         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
3255                 wq = fs_info->endio_freespace_worker;
3256                 func = btrfs_freespace_write_helper;
3257         } else {
3258                 wq = fs_info->endio_write_workers;
3259                 func = btrfs_endio_write_helper;
3260         }
3261
3262         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3263                         NULL);
3264         btrfs_queue_work(wq, &ordered_extent->work);
3265 }
3266
3267 static int __readpage_endio_check(struct inode *inode,
3268                                   struct btrfs_io_bio *io_bio,
3269                                   int icsum, struct page *page,
3270                                   int pgoff, u64 start, size_t len)
3271 {
3272         char *kaddr;
3273         u32 csum_expected;
3274         u32 csum = ~(u32)0;
3275
3276         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3277
3278         kaddr = kmap_atomic(page);
3279         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3280         btrfs_csum_final(csum, (u8 *)&csum);
3281         if (csum != csum_expected)
3282                 goto zeroit;
3283
3284         kunmap_atomic(kaddr);
3285         return 0;
3286 zeroit:
3287         btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3288                                     io_bio->mirror_num);
3289         memset(kaddr + pgoff, 1, len);
3290         flush_dcache_page(page);
3291         kunmap_atomic(kaddr);
3292         return -EIO;
3293 }
3294
3295 /*
3296  * when reads are done, we need to check csums to verify the data is correct
3297  * if there's a match, we allow the bio to finish.  If not, the code in
3298  * extent_io.c will try to find good copies for us.
3299  */
3300 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3301                                       u64 phy_offset, struct page *page,
3302                                       u64 start, u64 end, int mirror)
3303 {
3304         size_t offset = start - page_offset(page);
3305         struct inode *inode = page->mapping->host;
3306         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3307         struct btrfs_root *root = BTRFS_I(inode)->root;
3308
3309         if (PageChecked(page)) {
3310                 ClearPageChecked(page);
3311                 return 0;
3312         }
3313
3314         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3315                 return 0;
3316
3317         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3318             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3319                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3320                 return 0;
3321         }
3322
3323         phy_offset >>= inode->i_sb->s_blocksize_bits;
3324         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3325                                       start, (size_t)(end - start + 1));
3326 }
3327
3328 /*
3329  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3330  *
3331  * @inode: The inode we want to perform iput on
3332  *
3333  * This function uses the generic vfs_inode::i_count to track whether we should
3334  * just decrement it (in case it's > 1) or if this is the last iput then link
3335  * the inode to the delayed iput machinery. Delayed iputs are processed at
3336  * transaction commit time/superblock commit/cleaner kthread.
3337  */
3338 void btrfs_add_delayed_iput(struct inode *inode)
3339 {
3340         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3341         struct btrfs_inode *binode = BTRFS_I(inode);
3342
3343         if (atomic_add_unless(&inode->i_count, -1, 1))
3344                 return;
3345
3346         spin_lock(&fs_info->delayed_iput_lock);
3347         ASSERT(list_empty(&binode->delayed_iput));
3348         list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3349         spin_unlock(&fs_info->delayed_iput_lock);
3350 }
3351
3352 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3353 {
3354
3355         spin_lock(&fs_info->delayed_iput_lock);
3356         while (!list_empty(&fs_info->delayed_iputs)) {
3357                 struct btrfs_inode *inode;
3358
3359                 inode = list_first_entry(&fs_info->delayed_iputs,
3360                                 struct btrfs_inode, delayed_iput);
3361                 list_del_init(&inode->delayed_iput);
3362                 spin_unlock(&fs_info->delayed_iput_lock);
3363                 iput(&inode->vfs_inode);
3364                 spin_lock(&fs_info->delayed_iput_lock);
3365         }
3366         spin_unlock(&fs_info->delayed_iput_lock);
3367 }
3368
3369 /*
3370  * This creates an orphan entry for the given inode in case something goes wrong
3371  * in the middle of an unlink.
3372  */
3373 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3374                      struct btrfs_inode *inode)
3375 {
3376         int ret;
3377
3378         ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3379         if (ret && ret != -EEXIST) {
3380                 btrfs_abort_transaction(trans, ret);
3381                 return ret;
3382         }
3383
3384         return 0;
3385 }
3386
3387 /*
3388  * We have done the delete so we can go ahead and remove the orphan item for
3389  * this particular inode.
3390  */
3391 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3392                             struct btrfs_inode *inode)
3393 {
3394         return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3395 }
3396
3397 /*
3398  * this cleans up any orphans that may be left on the list from the last use
3399  * of this root.
3400  */
3401 int btrfs_orphan_cleanup(struct btrfs_root *root)
3402 {
3403         struct btrfs_fs_info *fs_info = root->fs_info;
3404         struct btrfs_path *path;
3405         struct extent_buffer *leaf;
3406         struct btrfs_key key, found_key;
3407         struct btrfs_trans_handle *trans;
3408         struct inode *inode;
3409         u64 last_objectid = 0;
3410         int ret = 0, nr_unlink = 0;
3411
3412         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3413                 return 0;
3414
3415         path = btrfs_alloc_path();
3416         if (!path) {
3417                 ret = -ENOMEM;
3418                 goto out;
3419         }
3420         path->reada = READA_BACK;
3421
3422         key.objectid = BTRFS_ORPHAN_OBJECTID;
3423         key.type = BTRFS_ORPHAN_ITEM_KEY;
3424         key.offset = (u64)-1;
3425
3426         while (1) {
3427                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3428                 if (ret < 0)
3429                         goto out;
3430
3431                 /*
3432                  * if ret == 0 means we found what we were searching for, which
3433                  * is weird, but possible, so only screw with path if we didn't
3434                  * find the key and see if we have stuff that matches
3435                  */
3436                 if (ret > 0) {
3437                         ret = 0;
3438                         if (path->slots[0] == 0)
3439                                 break;
3440                         path->slots[0]--;
3441                 }
3442
3443                 /* pull out the item */
3444                 leaf = path->nodes[0];
3445                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3446
3447                 /* make sure the item matches what we want */
3448                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3449                         break;
3450                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3451                         break;
3452
3453                 /* release the path since we're done with it */
3454                 btrfs_release_path(path);
3455
3456                 /*
3457                  * this is where we are basically btrfs_lookup, without the
3458                  * crossing root thing.  we store the inode number in the
3459                  * offset of the orphan item.
3460                  */
3461
3462                 if (found_key.offset == last_objectid) {
3463                         btrfs_err(fs_info,
3464                                   "Error removing orphan entry, stopping orphan cleanup");
3465                         ret = -EINVAL;
3466                         goto out;
3467                 }
3468
3469                 last_objectid = found_key.offset;
3470
3471                 found_key.objectid = found_key.offset;
3472                 found_key.type = BTRFS_INODE_ITEM_KEY;
3473                 found_key.offset = 0;
3474                 inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
3475                 ret = PTR_ERR_OR_ZERO(inode);
3476                 if (ret && ret != -ENOENT)
3477                         goto out;
3478
3479                 if (ret == -ENOENT && root == fs_info->tree_root) {
3480                         struct btrfs_root *dead_root;
3481                         struct btrfs_fs_info *fs_info = root->fs_info;
3482                         int is_dead_root = 0;
3483
3484                         /*
3485                          * this is an orphan in the tree root. Currently these
3486                          * could come from 2 sources:
3487                          *  a) a snapshot deletion in progress
3488                          *  b) a free space cache inode
3489                          * We need to distinguish those two, as the snapshot
3490                          * orphan must not get deleted.
3491                          * find_dead_roots already ran before us, so if this
3492                          * is a snapshot deletion, we should find the root
3493                          * in the dead_roots list
3494                          */
3495                         spin_lock(&fs_info->trans_lock);
3496                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3497                                             root_list) {
3498                                 if (dead_root->root_key.objectid ==
3499                                     found_key.objectid) {
3500                                         is_dead_root = 1;
3501                                         break;
3502                                 }
3503                         }
3504                         spin_unlock(&fs_info->trans_lock);
3505                         if (is_dead_root) {
3506                                 /* prevent this orphan from being found again */
3507                                 key.offset = found_key.objectid - 1;
3508                                 continue;
3509                         }
3510
3511                 }
3512
3513                 /*
3514                  * If we have an inode with links, there are a couple of
3515                  * possibilities. Old kernels (before v3.12) used to create an
3516                  * orphan item for truncate indicating that there were possibly
3517                  * extent items past i_size that needed to be deleted. In v3.12,
3518                  * truncate was changed to update i_size in sync with the extent
3519                  * items, but the (useless) orphan item was still created. Since
3520                  * v4.18, we don't create the orphan item for truncate at all.
3521                  *
3522                  * So, this item could mean that we need to do a truncate, but
3523                  * only if this filesystem was last used on a pre-v3.12 kernel
3524                  * and was not cleanly unmounted. The odds of that are quite
3525                  * slim, and it's a pain to do the truncate now, so just delete
3526                  * the orphan item.
3527                  *
3528                  * It's also possible that this orphan item was supposed to be
3529                  * deleted but wasn't. The inode number may have been reused,
3530                  * but either way, we can delete the orphan item.
3531                  */
3532                 if (ret == -ENOENT || inode->i_nlink) {
3533                         if (!ret)
3534                                 iput(inode);
3535                         trans = btrfs_start_transaction(root, 1);
3536                         if (IS_ERR(trans)) {
3537                                 ret = PTR_ERR(trans);
3538                                 goto out;
3539                         }
3540                         btrfs_debug(fs_info, "auto deleting %Lu",
3541                                     found_key.objectid);
3542                         ret = btrfs_del_orphan_item(trans, root,
3543                                                     found_key.objectid);
3544                         btrfs_end_transaction(trans);
3545                         if (ret)
3546                                 goto out;
3547                         continue;
3548                 }
3549
3550                 nr_unlink++;
3551
3552                 /* this will do delete_inode and everything for us */
3553                 iput(inode);
3554                 if (ret)
3555                         goto out;
3556         }
3557         /* release the path since we're done with it */
3558         btrfs_release_path(path);
3559
3560         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3561
3562         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3563                 trans = btrfs_join_transaction(root);
3564                 if (!IS_ERR(trans))
3565                         btrfs_end_transaction(trans);
3566         }
3567
3568         if (nr_unlink)
3569                 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3570
3571 out:
3572         if (ret)
3573                 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3574         btrfs_free_path(path);
3575         return ret;
3576 }
3577
3578 /*
3579  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3580  * don't find any xattrs, we know there can't be any acls.
3581  *
3582  * slot is the slot the inode is in, objectid is the objectid of the inode
3583  */
3584 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3585                                           int slot, u64 objectid,
3586                                           int *first_xattr_slot)
3587 {
3588         u32 nritems = btrfs_header_nritems(leaf);
3589         struct btrfs_key found_key;
3590         static u64 xattr_access = 0;
3591         static u64 xattr_default = 0;
3592         int scanned = 0;
3593
3594         if (!xattr_access) {
3595                 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3596                                         strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3597                 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3598                                         strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3599         }
3600
3601         slot++;
3602         *first_xattr_slot = -1;
3603         while (slot < nritems) {
3604                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3605
3606                 /* we found a different objectid, there must not be acls */
3607                 if (found_key.objectid != objectid)
3608                         return 0;
3609
3610                 /* we found an xattr, assume we've got an acl */
3611                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3612                         if (*first_xattr_slot == -1)
3613                                 *first_xattr_slot = slot;
3614                         if (found_key.offset == xattr_access ||
3615                             found_key.offset == xattr_default)
3616                                 return 1;
3617                 }
3618
3619                 /*
3620                  * we found a key greater than an xattr key, there can't
3621                  * be any acls later on
3622                  */
3623                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3624                         return 0;
3625
3626                 slot++;
3627                 scanned++;
3628
3629                 /*
3630                  * it goes inode, inode backrefs, xattrs, extents,
3631                  * so if there are a ton of hard links to an inode there can
3632                  * be a lot of backrefs.  Don't waste time searching too hard,
3633                  * this is just an optimization
3634                  */
3635                 if (scanned >= 8)
3636                         break;
3637         }
3638         /* we hit the end of the leaf before we found an xattr or
3639          * something larger than an xattr.  We have to assume the inode
3640          * has acls
3641          */
3642         if (*first_xattr_slot == -1)
3643                 *first_xattr_slot = slot;
3644         return 1;
3645 }
3646
3647 /*
3648  * read an inode from the btree into the in-memory inode
3649  */
3650 static int btrfs_read_locked_inode(struct inode *inode,
3651                                    struct btrfs_path *in_path)
3652 {
3653         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3654         struct btrfs_path *path = in_path;
3655         struct extent_buffer *leaf;
3656         struct btrfs_inode_item *inode_item;
3657         struct btrfs_root *root = BTRFS_I(inode)->root;
3658         struct btrfs_key location;
3659         unsigned long ptr;
3660         int maybe_acls;
3661         u32 rdev;
3662         int ret;
3663         bool filled = false;
3664         int first_xattr_slot;
3665
3666         ret = btrfs_fill_inode(inode, &rdev);
3667         if (!ret)
3668                 filled = true;
3669
3670         if (!path) {
3671                 path = btrfs_alloc_path();
3672                 if (!path)
3673                         return -ENOMEM;
3674         }
3675
3676         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3677
3678         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3679         if (ret) {
3680                 if (path != in_path)
3681                         btrfs_free_path(path);
3682                 return ret;
3683         }
3684
3685         leaf = path->nodes[0];
3686
3687         if (filled)
3688                 goto cache_index;
3689
3690         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3691                                     struct btrfs_inode_item);
3692         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3693         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3694         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3695         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3696         btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3697
3698         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3699         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3700
3701         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3702         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3703
3704         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3705         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3706
3707         BTRFS_I(inode)->i_otime.tv_sec =
3708                 btrfs_timespec_sec(leaf, &inode_item->otime);
3709         BTRFS_I(inode)->i_otime.tv_nsec =
3710                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3711
3712         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3713         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3714         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3715
3716         inode_set_iversion_queried(inode,
3717                                    btrfs_inode_sequence(leaf, inode_item));
3718         inode->i_generation = BTRFS_I(inode)->generation;
3719         inode->i_rdev = 0;
3720         rdev = btrfs_inode_rdev(leaf, inode_item);
3721
3722         BTRFS_I(inode)->index_cnt = (u64)-1;
3723         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3724
3725 cache_index:
3726         /*
3727          * If we were modified in the current generation and evicted from memory
3728          * and then re-read we need to do a full sync since we don't have any
3729          * idea about which extents were modified before we were evicted from
3730          * cache.
3731          *
3732          * This is required for both inode re-read from disk and delayed inode
3733          * in delayed_nodes_tree.
3734          */
3735         if (BTRFS_I(inode)->last_trans == fs_info->generation)
3736                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3737                         &BTRFS_I(inode)->runtime_flags);
3738
3739         /*
3740          * We don't persist the id of the transaction where an unlink operation
3741          * against the inode was last made. So here we assume the inode might
3742          * have been evicted, and therefore the exact value of last_unlink_trans
3743          * lost, and set it to last_trans to avoid metadata inconsistencies
3744          * between the inode and its parent if the inode is fsync'ed and the log
3745          * replayed. For example, in the scenario:
3746          *
3747          * touch mydir/foo
3748          * ln mydir/foo mydir/bar
3749          * sync
3750          * unlink mydir/bar
3751          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3752          * xfs_io -c fsync mydir/foo
3753          * <power failure>
3754          * mount fs, triggers fsync log replay
3755          *
3756          * We must make sure that when we fsync our inode foo we also log its
3757          * parent inode, otherwise after log replay the parent still has the
3758          * dentry with the "bar" name but our inode foo has a link count of 1
3759          * and doesn't have an inode ref with the name "bar" anymore.
3760          *
3761          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3762          * but it guarantees correctness at the expense of occasional full
3763          * transaction commits on fsync if our inode is a directory, or if our
3764          * inode is not a directory, logging its parent unnecessarily.
3765          */
3766         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3767         /*
3768          * Similar reasoning for last_link_trans, needs to be set otherwise
3769          * for a case like the following:
3770          *
3771          * mkdir A
3772          * touch foo
3773          * ln foo A/bar
3774          * echo 2 > /proc/sys/vm/drop_caches
3775          * fsync foo
3776          * <power failure>
3777          *
3778          * Would result in link bar and directory A not existing after the power
3779          * failure.
3780          */
3781         BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
3782
3783         path->slots[0]++;
3784         if (inode->i_nlink != 1 ||
3785             path->slots[0] >= btrfs_header_nritems(leaf))
3786                 goto cache_acl;
3787
3788         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3789         if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3790                 goto cache_acl;
3791
3792         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3793         if (location.type == BTRFS_INODE_REF_KEY) {
3794                 struct btrfs_inode_ref *ref;
3795
3796                 ref = (struct btrfs_inode_ref *)ptr;
3797                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3798         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3799                 struct btrfs_inode_extref *extref;
3800
3801                 extref = (struct btrfs_inode_extref *)ptr;
3802                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3803                                                                      extref);
3804         }
3805 cache_acl:
3806         /*
3807          * try to precache a NULL acl entry for files that don't have
3808          * any xattrs or acls
3809          */
3810         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3811                         btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3812         if (first_xattr_slot != -1) {
3813                 path->slots[0] = first_xattr_slot;
3814                 ret = btrfs_load_inode_props(inode, path);
3815                 if (ret)
3816                         btrfs_err(fs_info,
3817                                   "error loading props for ino %llu (root %llu): %d",
3818                                   btrfs_ino(BTRFS_I(inode)),
3819                                   root->root_key.objectid, ret);
3820         }
3821         if (path != in_path)
3822                 btrfs_free_path(path);
3823
3824         if (!maybe_acls)
3825                 cache_no_acl(inode);
3826
3827         switch (inode->i_mode & S_IFMT) {
3828         case S_IFREG:
3829                 inode->i_mapping->a_ops = &btrfs_aops;
3830                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3831                 inode->i_fop = &btrfs_file_operations;
3832                 inode->i_op = &btrfs_file_inode_operations;
3833                 break;
3834         case S_IFDIR:
3835                 inode->i_fop = &btrfs_dir_file_operations;
3836                 inode->i_op = &btrfs_dir_inode_operations;
3837                 break;
3838         case S_IFLNK:
3839                 inode->i_op = &btrfs_symlink_inode_operations;
3840                 inode_nohighmem(inode);
3841                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3842                 break;
3843         default:
3844                 inode->i_op = &btrfs_special_inode_operations;
3845                 init_special_inode(inode, inode->i_mode, rdev);
3846                 break;
3847         }
3848
3849         btrfs_sync_inode_flags_to_i_flags(inode);
3850         return 0;
3851 }
3852
3853 /*
3854  * given a leaf and an inode, copy the inode fields into the leaf
3855  */
3856 static void fill_inode_item(struct btrfs_trans_handle *trans,
3857                             struct extent_buffer *leaf,
3858                             struct btrfs_inode_item *item,
3859                             struct inode *inode)
3860 {
3861         struct btrfs_map_token token;
3862
3863         btrfs_init_map_token(&token);
3864
3865         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3866         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3867         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3868                                    &token);
3869         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3870         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3871
3872         btrfs_set_token_timespec_sec(leaf, &item->atime,
3873                                      inode->i_atime.tv_sec, &token);
3874         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3875                                       inode->i_atime.tv_nsec, &token);
3876
3877         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3878                                      inode->i_mtime.tv_sec, &token);
3879         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3880                                       inode->i_mtime.tv_nsec, &token);
3881
3882         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3883                                      inode->i_ctime.tv_sec, &token);
3884         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3885                                       inode->i_ctime.tv_nsec, &token);
3886
3887         btrfs_set_token_timespec_sec(leaf, &item->otime,
3888                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3889         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3890                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3891
3892         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3893                                      &token);
3894         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3895                                          &token);
3896         btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
3897                                        &token);
3898         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3899         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3900         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3901         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3902 }
3903
3904 /*
3905  * copy everything in the in-memory inode into the btree.
3906  */
3907 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3908                                 struct btrfs_root *root, struct inode *inode)
3909 {
3910         struct btrfs_inode_item *inode_item;
3911         struct btrfs_path *path;
3912         struct extent_buffer *leaf;
3913         int ret;
3914
3915         path = btrfs_alloc_path();
3916         if (!path)
3917                 return -ENOMEM;
3918
3919         path->leave_spinning = 1;
3920         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3921                                  1);
3922         if (ret) {
3923                 if (ret > 0)
3924                         ret = -ENOENT;
3925                 goto failed;
3926         }
3927
3928         leaf = path->nodes[0];
3929         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3930                                     struct btrfs_inode_item);
3931
3932         fill_inode_item(trans, leaf, inode_item, inode);
3933         btrfs_mark_buffer_dirty(leaf);
3934         btrfs_set_inode_last_trans(trans, inode);
3935         ret = 0;
3936 failed:
3937         btrfs_free_path(path);
3938         return ret;
3939 }
3940
3941 /*
3942  * copy everything in the in-memory inode into the btree.
3943  */
3944 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3945                                 struct btrfs_root *root, struct inode *inode)
3946 {
3947         struct btrfs_fs_info *fs_info = root->fs_info;
3948         int ret;
3949
3950         /*
3951          * If the inode is a free space inode, we can deadlock during commit
3952          * if we put it into the delayed code.
3953          *
3954          * The data relocation inode should also be directly updated
3955          * without delay
3956          */
3957         if (!btrfs_is_free_space_inode(BTRFS_I(inode))
3958             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3959             && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
3960                 btrfs_update_root_times(trans, root);
3961
3962                 ret = btrfs_delayed_update_inode(trans, root, inode);
3963                 if (!ret)
3964                         btrfs_set_inode_last_trans(trans, inode);
3965                 return ret;
3966         }
3967
3968         return btrfs_update_inode_item(trans, root, inode);
3969 }
3970
3971 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3972                                          struct btrfs_root *root,
3973                                          struct inode *inode)
3974 {
3975         int ret;
3976
3977         ret = btrfs_update_inode(trans, root, inode);
3978         if (ret == -ENOSPC)
3979                 return btrfs_update_inode_item(trans, root, inode);
3980         return ret;
3981 }
3982
3983 /*
3984  * unlink helper that gets used here in inode.c and in the tree logging
3985  * recovery code.  It remove a link in a directory with a given name, and
3986  * also drops the back refs in the inode to the directory
3987  */
3988 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3989                                 struct btrfs_root *root,
3990                                 struct btrfs_inode *dir,
3991                                 struct btrfs_inode *inode,
3992                                 const char *name, int name_len)
3993 {
3994         struct btrfs_fs_info *fs_info = root->fs_info;
3995         struct btrfs_path *path;
3996         int ret = 0;
3997         struct extent_buffer *leaf;
3998         struct btrfs_dir_item *di;
3999         struct btrfs_key key;
4000         u64 index;
4001         u64 ino = btrfs_ino(inode);
4002         u64 dir_ino = btrfs_ino(dir);
4003
4004         path = btrfs_alloc_path();
4005         if (!path) {
4006                 ret = -ENOMEM;
4007                 goto out;
4008         }
4009
4010         path->leave_spinning = 1;
4011         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4012                                     name, name_len, -1);
4013         if (IS_ERR(di)) {
4014                 ret = PTR_ERR(di);
4015                 goto err;
4016         }
4017         if (!di) {
4018                 ret = -ENOENT;
4019                 goto err;
4020         }
4021         leaf = path->nodes[0];
4022         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4023         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4024         if (ret)
4025                 goto err;
4026         btrfs_release_path(path);
4027
4028         /*
4029          * If we don't have dir index, we have to get it by looking up
4030          * the inode ref, since we get the inode ref, remove it directly,
4031          * it is unnecessary to do delayed deletion.
4032          *
4033          * But if we have dir index, needn't search inode ref to get it.
4034          * Since the inode ref is close to the inode item, it is better
4035          * that we delay to delete it, and just do this deletion when
4036          * we update the inode item.
4037          */
4038         if (inode->dir_index) {
4039                 ret = btrfs_delayed_delete_inode_ref(inode);
4040                 if (!ret) {
4041                         index = inode->dir_index;
4042                         goto skip_backref;
4043                 }
4044         }
4045
4046         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4047                                   dir_ino, &index);
4048         if (ret) {
4049                 btrfs_info(fs_info,
4050                         "failed to delete reference to %.*s, inode %llu parent %llu",
4051                         name_len, name, ino, dir_ino);
4052                 btrfs_abort_transaction(trans, ret);
4053                 goto err;
4054         }
4055 skip_backref:
4056         ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4057         if (ret) {
4058                 btrfs_abort_transaction(trans, ret);
4059                 goto err;
4060         }
4061
4062         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
4063                         dir_ino);
4064         if (ret != 0 && ret != -ENOENT) {
4065                 btrfs_abort_transaction(trans, ret);
4066                 goto err;
4067         }
4068
4069         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
4070                         index);
4071         if (ret == -ENOENT)
4072                 ret = 0;
4073         else if (ret)
4074                 btrfs_abort_transaction(trans, ret);
4075 err:
4076         btrfs_free_path(path);
4077         if (ret)
4078                 goto out;
4079
4080         btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
4081         inode_inc_iversion(&inode->vfs_inode);
4082         inode_inc_iversion(&dir->vfs_inode);
4083         inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
4084                 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4085         ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
4086 out:
4087         return ret;
4088 }
4089
4090 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4091                        struct btrfs_root *root,
4092                        struct btrfs_inode *dir, struct btrfs_inode *inode,
4093                        const char *name, int name_len)
4094 {
4095         int ret;
4096         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4097         if (!ret) {
4098                 drop_nlink(&inode->vfs_inode);
4099                 ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
4100         }
4101         return ret;
4102 }
4103
4104 /*
4105  * helper to start transaction for unlink and rmdir.
4106  *
4107  * unlink and rmdir are special in btrfs, they do not always free space, so
4108  * if we cannot make our reservations the normal way try and see if there is
4109  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4110  * allow the unlink to occur.
4111  */
4112 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4113 {
4114         struct btrfs_root *root = BTRFS_I(dir)->root;
4115
4116         /*
4117          * 1 for the possible orphan item
4118          * 1 for the dir item
4119          * 1 for the dir index
4120          * 1 for the inode ref
4121          * 1 for the inode
4122          */
4123         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4124 }
4125
4126 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4127 {
4128         struct btrfs_root *root = BTRFS_I(dir)->root;
4129         struct btrfs_trans_handle *trans;
4130         struct inode *inode = d_inode(dentry);
4131         int ret;
4132
4133         trans = __unlink_start_trans(dir);
4134         if (IS_ERR(trans))
4135                 return PTR_ERR(trans);
4136
4137         btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4138                         0);
4139
4140         ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4141                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4142                         dentry->d_name.len);
4143         if (ret)
4144                 goto out;
4145
4146         if (inode->i_nlink == 0) {
4147                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4148                 if (ret)
4149                         goto out;
4150         }
4151
4152 out:
4153         btrfs_end_transaction(trans);
4154         btrfs_btree_balance_dirty(root->fs_info);
4155         return ret;
4156 }
4157
4158 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4159                                struct inode *dir, struct dentry *dentry)
4160 {
4161         struct btrfs_root *root = BTRFS_I(dir)->root;
4162         struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4163         struct btrfs_path *path;
4164         struct extent_buffer *leaf;
4165         struct btrfs_dir_item *di;
4166         struct btrfs_key key;
4167         const char *name = dentry->d_name.name;
4168         int name_len = dentry->d_name.len;
4169         u64 index;
4170         int ret;
4171         u64 objectid;
4172         u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4173
4174         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4175                 objectid = inode->root->root_key.objectid;
4176         } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4177                 objectid = inode->location.objectid;
4178         } else {
4179                 WARN_ON(1);
4180                 return -EINVAL;
4181         }
4182
4183         path = btrfs_alloc_path();
4184         if (!path)
4185                 return -ENOMEM;
4186
4187         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4188                                    name, name_len, -1);
4189         if (IS_ERR_OR_NULL(di)) {
4190                 if (!di)
4191                         ret = -ENOENT;
4192                 else
4193                         ret = PTR_ERR(di);
4194                 goto out;
4195         }
4196
4197         leaf = path->nodes[0];
4198         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4199         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4200         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4201         if (ret) {
4202                 btrfs_abort_transaction(trans, ret);
4203                 goto out;
4204         }
4205         btrfs_release_path(path);
4206
4207         /*
4208          * This is a placeholder inode for a subvolume we didn't have a
4209          * reference to at the time of the snapshot creation.  In the meantime
4210          * we could have renamed the real subvol link into our snapshot, so
4211          * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
4212          * Instead simply lookup the dir_index_item for this entry so we can
4213          * remove it.  Otherwise we know we have a ref to the root and we can
4214          * call btrfs_del_root_ref, and it _shouldn't_ fail.
4215          */
4216         if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4217                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4218                                                  name, name_len);
4219                 if (IS_ERR_OR_NULL(di)) {
4220                         if (!di)
4221                                 ret = -ENOENT;
4222                         else
4223                                 ret = PTR_ERR(di);
4224                         btrfs_abort_transaction(trans, ret);
4225                         goto out;
4226                 }
4227
4228                 leaf = path->nodes[0];
4229                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4230                 index = key.offset;
4231                 btrfs_release_path(path);
4232         } else {
4233                 ret = btrfs_del_root_ref(trans, objectid,
4234                                          root->root_key.objectid, dir_ino,
4235                                          &index, name, name_len);
4236                 if (ret) {
4237                         btrfs_abort_transaction(trans, ret);
4238                         goto out;
4239                 }
4240         }
4241
4242         ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
4243         if (ret) {
4244                 btrfs_abort_transaction(trans, ret);
4245                 goto out;
4246         }
4247
4248         btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4249         inode_inc_iversion(dir);
4250         dir->i_mtime = dir->i_ctime = current_time(dir);
4251         ret = btrfs_update_inode_fallback(trans, root, dir);
4252         if (ret)
4253                 btrfs_abort_transaction(trans, ret);
4254 out:
4255         btrfs_free_path(path);
4256         return ret;
4257 }
4258
4259 /*
4260  * Helper to check if the subvolume references other subvolumes or if it's
4261  * default.
4262  */
4263 static noinline int may_destroy_subvol(struct btrfs_root *root)
4264 {
4265         struct btrfs_fs_info *fs_info = root->fs_info;
4266         struct btrfs_path *path;
4267         struct btrfs_dir_item *di;
4268         struct btrfs_key key;
4269         u64 dir_id;
4270         int ret;
4271
4272         path = btrfs_alloc_path();
4273         if (!path)
4274                 return -ENOMEM;
4275
4276         /* Make sure this root isn't set as the default subvol */
4277         dir_id = btrfs_super_root_dir(fs_info->super_copy);
4278         di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4279                                    dir_id, "default", 7, 0);
4280         if (di && !IS_ERR(di)) {
4281                 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4282                 if (key.objectid == root->root_key.objectid) {
4283                         ret = -EPERM;
4284                         btrfs_err(fs_info,
4285                                   "deleting default subvolume %llu is not allowed",
4286                                   key.objectid);
4287                         goto out;
4288                 }
4289                 btrfs_release_path(path);
4290         }
4291
4292         key.objectid = root->root_key.objectid;
4293         key.type = BTRFS_ROOT_REF_KEY;
4294         key.offset = (u64)-1;
4295
4296         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4297         if (ret < 0)
4298                 goto out;
4299         BUG_ON(ret == 0);
4300
4301         ret = 0;
4302         if (path->slots[0] > 0) {
4303                 path->slots[0]--;
4304                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4305                 if (key.objectid == root->root_key.objectid &&
4306                     key.type == BTRFS_ROOT_REF_KEY)
4307                         ret = -ENOTEMPTY;
4308         }
4309 out:
4310         btrfs_free_path(path);
4311         return ret;
4312 }
4313
4314 /* Delete all dentries for inodes belonging to the root */
4315 static void btrfs_prune_dentries(struct btrfs_root *root)
4316 {
4317         struct btrfs_fs_info *fs_info = root->fs_info;
4318         struct rb_node *node;
4319         struct rb_node *prev;
4320         struct btrfs_inode *entry;
4321         struct inode *inode;
4322         u64 objectid = 0;
4323
4324         if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
4325                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4326
4327         spin_lock(&root->inode_lock);
4328 again:
4329         node = root->inode_tree.rb_node;
4330         prev = NULL;
4331         while (node) {
4332                 prev = node;
4333                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4334
4335                 if (objectid < btrfs_ino(entry))
4336                         node = node->rb_left;
4337                 else if (objectid > btrfs_ino(entry))
4338                         node = node->rb_right;
4339                 else
4340                         break;
4341         }
4342         if (!node) {
4343                 while (prev) {
4344                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
4345                         if (objectid <= btrfs_ino(entry)) {
4346                                 node = prev;
4347                                 break;
4348                         }
4349                         prev = rb_next(prev);
4350                 }
4351         }
4352         while (node) {
4353                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4354                 objectid = btrfs_ino(entry) + 1;
4355                 inode = igrab(&entry->vfs_inode);
4356                 if (inode) {
4357                         spin_unlock(&root->inode_lock);
4358                         if (atomic_read(&inode->i_count) > 1)
4359                                 d_prune_aliases(inode);
4360                         /*
4361                          * btrfs_drop_inode will have it removed from the inode
4362                          * cache when its usage count hits zero.
4363                          */
4364                         iput(inode);
4365                         cond_resched();
4366                         spin_lock(&root->inode_lock);
4367                         goto again;
4368                 }
4369
4370                 if (cond_resched_lock(&root->inode_lock))
4371                         goto again;
4372
4373                 node = rb_next(node);
4374         }
4375         spin_unlock(&root->inode_lock);
4376 }
4377
4378 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
4379 {
4380         struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4381         struct btrfs_root *root = BTRFS_I(dir)->root;
4382         struct inode *inode = d_inode(dentry);
4383         struct btrfs_root *dest = BTRFS_I(inode)->root;
4384         struct btrfs_trans_handle *trans;
4385         struct btrfs_block_rsv block_rsv;
4386         u64 root_flags;
4387         int ret;
4388         int err;
4389
4390         /*
4391          * Don't allow to delete a subvolume with send in progress. This is
4392          * inside the inode lock so the error handling that has to drop the bit
4393          * again is not run concurrently.
4394          */
4395         spin_lock(&dest->root_item_lock);
4396         root_flags = btrfs_root_flags(&dest->root_item);
4397         if (dest->send_in_progress == 0) {
4398                 btrfs_set_root_flags(&dest->root_item,
4399                                 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4400                 spin_unlock(&dest->root_item_lock);
4401         } else {
4402                 spin_unlock(&dest->root_item_lock);
4403                 btrfs_warn(fs_info,
4404                            "attempt to delete subvolume %llu during send",
4405                            dest->root_key.objectid);
4406                 return -EPERM;
4407         }
4408
4409         down_write(&fs_info->subvol_sem);
4410
4411         err = may_destroy_subvol(dest);
4412         if (err)
4413                 goto out_up_write;
4414
4415         btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4416         /*
4417          * One for dir inode,
4418          * two for dir entries,
4419          * two for root ref/backref.
4420          */
4421         err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4422         if (err)
4423                 goto out_up_write;
4424
4425         trans = btrfs_start_transaction(root, 0);
4426         if (IS_ERR(trans)) {
4427                 err = PTR_ERR(trans);
4428                 goto out_release;
4429         }
4430         trans->block_rsv = &block_rsv;
4431         trans->bytes_reserved = block_rsv.size;
4432
4433         btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
4434
4435         ret = btrfs_unlink_subvol(trans, dir, dentry);
4436         if (ret) {
4437                 err = ret;
4438                 btrfs_abort_transaction(trans, ret);
4439                 goto out_end_trans;
4440         }
4441
4442         btrfs_record_root_in_trans(trans, dest);
4443
4444         memset(&dest->root_item.drop_progress, 0,
4445                 sizeof(dest->root_item.drop_progress));
4446         dest->root_item.drop_level = 0;
4447         btrfs_set_root_refs(&dest->root_item, 0);
4448
4449         if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4450                 ret = btrfs_insert_orphan_item(trans,
4451                                         fs_info->tree_root,
4452                                         dest->root_key.objectid);
4453                 if (ret) {
4454                         btrfs_abort_transaction(trans, ret);
4455                         err = ret;
4456                         goto out_end_trans;
4457                 }
4458         }
4459
4460         ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4461                                   BTRFS_UUID_KEY_SUBVOL,
4462                                   dest->root_key.objectid);
4463         if (ret && ret != -ENOENT) {
4464                 btrfs_abort_transaction(trans, ret);
4465                 err = ret;
4466                 goto out_end_trans;
4467         }
4468         if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4469                 ret = btrfs_uuid_tree_remove(trans,
4470                                           dest->root_item.received_uuid,
4471                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4472                                           dest->root_key.objectid);
4473                 if (ret && ret != -ENOENT) {
4474                         btrfs_abort_transaction(trans, ret);
4475                         err = ret;
4476                         goto out_end_trans;
4477                 }
4478         }
4479
4480         free_anon_bdev(dest->anon_dev);
4481         dest->anon_dev = 0;
4482 out_end_trans:
4483         trans->block_rsv = NULL;
4484         trans->bytes_reserved = 0;
4485         ret = btrfs_end_transaction(trans);
4486         if (ret && !err)
4487                 err = ret;
4488         inode->i_flags |= S_DEAD;
4489 out_release:
4490         btrfs_subvolume_release_metadata(fs_info, &block_rsv);
4491 out_up_write:
4492         up_write(&fs_info->subvol_sem);
4493         if (err) {
4494                 spin_lock(&dest->root_item_lock);
4495                 root_flags = btrfs_root_flags(&dest->root_item);
4496                 btrfs_set_root_flags(&dest->root_item,
4497                                 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4498                 spin_unlock(&dest->root_item_lock);
4499         } else {
4500                 d_invalidate(dentry);
4501                 btrfs_prune_dentries(dest);
4502                 ASSERT(dest->send_in_progress == 0);
4503
4504                 /* the last ref */
4505                 if (dest->ino_cache_inode) {
4506                         iput(dest->ino_cache_inode);
4507                         dest->ino_cache_inode = NULL;
4508                 }
4509         }
4510
4511         return err;
4512 }
4513
4514 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4515 {
4516         struct inode *inode = d_inode(dentry);
4517         int err = 0;
4518         struct btrfs_root *root = BTRFS_I(dir)->root;
4519         struct btrfs_trans_handle *trans;
4520         u64 last_unlink_trans;
4521
4522         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4523                 return -ENOTEMPTY;
4524         if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
4525                 return btrfs_delete_subvolume(dir, dentry);
4526
4527         trans = __unlink_start_trans(dir);
4528         if (IS_ERR(trans))
4529                 return PTR_ERR(trans);
4530
4531         if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4532                 err = btrfs_unlink_subvol(trans, dir, dentry);
4533                 goto out;
4534         }
4535
4536         err = btrfs_orphan_add(trans, BTRFS_I(inode));
4537         if (err)
4538                 goto out;
4539
4540         last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4541
4542         /* now the directory is empty */
4543         err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4544                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4545                         dentry->d_name.len);
4546         if (!err) {
4547                 btrfs_i_size_write(BTRFS_I(inode), 0);
4548                 /*
4549                  * Propagate the last_unlink_trans value of the deleted dir to
4550                  * its parent directory. This is to prevent an unrecoverable
4551                  * log tree in the case we do something like this:
4552                  * 1) create dir foo
4553                  * 2) create snapshot under dir foo
4554                  * 3) delete the snapshot
4555                  * 4) rmdir foo
4556                  * 5) mkdir foo
4557                  * 6) fsync foo or some file inside foo
4558                  */
4559                 if (last_unlink_trans >= trans->transid)
4560                         BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4561         }
4562 out:
4563         btrfs_end_transaction(trans);
4564         btrfs_btree_balance_dirty(root->fs_info);
4565
4566         return err;
4567 }
4568
4569 static int truncate_space_check(struct btrfs_trans_handle *trans,
4570                                 struct btrfs_root *root,
4571                                 u64 bytes_deleted)
4572 {
4573         struct btrfs_fs_info *fs_info = root->fs_info;
4574         int ret;
4575
4576         /*
4577          * This is only used to apply pressure to the enospc system, we don't
4578          * intend to use this reservation at all.
4579          */
4580         bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
4581         bytes_deleted *= fs_info->nodesize;
4582         ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
4583                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4584         if (!ret) {
4585                 trace_btrfs_space_reservation(fs_info, "transaction",
4586                                               trans->transid,
4587                                               bytes_deleted, 1);
4588                 trans->bytes_reserved += bytes_deleted;
4589         }
4590         return ret;
4591
4592 }
4593
4594 /*
4595  * Return this if we need to call truncate_block for the last bit of the
4596  * truncate.
4597  */
4598 #define NEED_TRUNCATE_BLOCK 1
4599
4600 /*
4601  * this can truncate away extent items, csum items and directory items.
4602  * It starts at a high offset and removes keys until it can't find
4603  * any higher than new_size
4604  *
4605  * csum items that cross the new i_size are truncated to the new size
4606  * as well.
4607  *
4608  * min_type is the minimum key type to truncate down to.  If set to 0, this
4609  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4610  */
4611 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4612                                struct btrfs_root *root,
4613                                struct inode *inode,
4614                                u64 new_size, u32 min_type)
4615 {
4616         struct btrfs_fs_info *fs_info = root->fs_info;
4617         struct btrfs_path *path;
4618         struct extent_buffer *leaf;
4619         struct btrfs_file_extent_item *fi;
4620         struct btrfs_key key;
4621         struct btrfs_key found_key;
4622         u64 extent_start = 0;
4623         u64 extent_num_bytes = 0;
4624         u64 extent_offset = 0;
4625         u64 item_end = 0;
4626         u64 last_size = new_size;
4627         u32 found_type = (u8)-1;
4628         int found_extent;
4629         int del_item;
4630         int pending_del_nr = 0;
4631         int pending_del_slot = 0;
4632         int extent_type = -1;
4633         int ret;
4634         u64 ino = btrfs_ino(BTRFS_I(inode));
4635         u64 bytes_deleted = 0;
4636         bool be_nice = false;
4637         bool should_throttle = false;
4638         bool should_end = false;
4639
4640         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4641
4642         /*
4643          * for non-free space inodes and ref cows, we want to back off from
4644          * time to time
4645          */
4646         if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
4647             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4648                 be_nice = true;
4649
4650         path = btrfs_alloc_path();
4651         if (!path)
4652                 return -ENOMEM;
4653         path->reada = READA_BACK;
4654
4655         /*
4656          * We want to drop from the next block forward in case this new size is
4657          * not block aligned since we will be keeping the last block of the
4658          * extent just the way it is.
4659          */
4660         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4661             root == fs_info->tree_root)
4662                 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
4663                                         fs_info->sectorsize),
4664                                         (u64)-1, 0);
4665
4666         /*
4667          * This function is also used to drop the items in the log tree before
4668          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4669          * it is used to drop the loged items. So we shouldn't kill the delayed
4670          * items.
4671          */
4672         if (min_type == 0 && root == BTRFS_I(inode)->root)
4673                 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
4674
4675         key.objectid = ino;
4676         key.offset = (u64)-1;
4677         key.type = (u8)-1;
4678
4679 search_again:
4680         /*
4681          * with a 16K leaf size and 128MB extents, you can actually queue
4682          * up a huge file in a single leaf.  Most of the time that
4683          * bytes_deleted is > 0, it will be huge by the time we get here
4684          */
4685         if (be_nice && bytes_deleted > SZ_32M &&
4686             btrfs_should_end_transaction(trans)) {
4687                 ret = -EAGAIN;
4688                 goto out;
4689         }
4690
4691         path->leave_spinning = 1;
4692         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4693         if (ret < 0)
4694                 goto out;
4695
4696         if (ret > 0) {
4697                 ret = 0;
4698                 /* there are no items in the tree for us to truncate, we're
4699                  * done
4700                  */
4701                 if (path->slots[0] == 0)
4702                         goto out;
4703                 path->slots[0]--;
4704         }
4705
4706         while (1) {
4707                 fi = NULL;
4708                 leaf = path->nodes[0];
4709                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4710                 found_type = found_key.type;
4711
4712                 if (found_key.objectid != ino)
4713                         break;
4714
4715                 if (found_type < min_type)
4716                         break;
4717
4718                 item_end = found_key.offset;
4719                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4720                         fi = btrfs_item_ptr(leaf, path->slots[0],
4721                                             struct btrfs_file_extent_item);
4722                         extent_type = btrfs_file_extent_type(leaf, fi);
4723                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4724                                 item_end +=
4725                                     btrfs_file_extent_num_bytes(leaf, fi);
4726
4727                                 trace_btrfs_truncate_show_fi_regular(
4728                                         BTRFS_I(inode), leaf, fi,
4729                                         found_key.offset);
4730                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4731                                 item_end += btrfs_file_extent_ram_bytes(leaf,
4732                                                                         fi);
4733
4734                                 trace_btrfs_truncate_show_fi_inline(
4735                                         BTRFS_I(inode), leaf, fi, path->slots[0],
4736                                         found_key.offset);
4737                         }
4738                         item_end--;
4739                 }
4740                 if (found_type > min_type) {
4741                         del_item = 1;
4742                 } else {
4743                         if (item_end < new_size)
4744                                 break;
4745                         if (found_key.offset >= new_size)
4746                                 del_item = 1;
4747                         else
4748                                 del_item = 0;
4749                 }
4750                 found_extent = 0;
4751                 /* FIXME, shrink the extent if the ref count is only 1 */
4752                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4753                         goto delete;
4754
4755                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4756                         u64 num_dec;
4757                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4758                         if (!del_item) {
4759                                 u64 orig_num_bytes =
4760                                         btrfs_file_extent_num_bytes(leaf, fi);
4761                                 extent_num_bytes = ALIGN(new_size -
4762                                                 found_key.offset,
4763                                                 fs_info->sectorsize);
4764                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4765                                                          extent_num_bytes);
4766                                 num_dec = (orig_num_bytes -
4767                                            extent_num_bytes);
4768                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4769                                              &root->state) &&
4770                                     extent_start != 0)
4771                                         inode_sub_bytes(inode, num_dec);
4772                                 btrfs_mark_buffer_dirty(leaf);
4773                         } else {
4774                                 extent_num_bytes =
4775                                         btrfs_file_extent_disk_num_bytes(leaf,
4776                                                                          fi);
4777                                 extent_offset = found_key.offset -
4778                                         btrfs_file_extent_offset(leaf, fi);
4779
4780                                 /* FIXME blocksize != 4096 */
4781                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4782                                 if (extent_start != 0) {
4783                                         found_extent = 1;
4784                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4785                                                      &root->state))
4786                                                 inode_sub_bytes(inode, num_dec);
4787                                 }
4788                         }
4789                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4790                         /*
4791                          * we can't truncate inline items that have had
4792                          * special encodings
4793                          */
4794                         if (!del_item &&
4795                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4796                             btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
4797                             btrfs_file_extent_compression(leaf, fi) == 0) {
4798                                 u32 size = (u32)(new_size - found_key.offset);
4799
4800                                 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4801                                 size = btrfs_file_extent_calc_inline_size(size);
4802                                 btrfs_truncate_item(root->fs_info, path, size, 1);
4803                         } else if (!del_item) {
4804                                 /*
4805                                  * We have to bail so the last_size is set to
4806                                  * just before this extent.
4807                                  */
4808                                 ret = NEED_TRUNCATE_BLOCK;
4809                                 break;
4810                         }
4811
4812                         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4813                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4814                 }
4815 delete:
4816                 if (del_item)
4817                         last_size = found_key.offset;
4818                 else
4819                         last_size = new_size;
4820                 if (del_item) {
4821                         if (!pending_del_nr) {
4822                                 /* no pending yet, add ourselves */
4823                                 pending_del_slot = path->slots[0];
4824                                 pending_del_nr = 1;
4825                         } else if (pending_del_nr &&
4826                                    path->slots[0] + 1 == pending_del_slot) {
4827                                 /* hop on the pending chunk */
4828                                 pending_del_nr++;
4829                                 pending_del_slot = path->slots[0];
4830                         } else {
4831                                 BUG();
4832                         }
4833                 } else {
4834                         break;
4835                 }
4836                 should_throttle = false;
4837
4838                 if (found_extent &&
4839                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4840                      root == fs_info->tree_root)) {
4841                         btrfs_set_path_blocking(path);
4842                         bytes_deleted += extent_num_bytes;
4843                         ret = btrfs_free_extent(trans, root, extent_start,
4844                                                 extent_num_bytes, 0,
4845                                                 btrfs_header_owner(leaf),
4846                                                 ino, extent_offset);
4847                         if (ret) {
4848                                 btrfs_abort_transaction(trans, ret);
4849                                 break;
4850                         }
4851                         if (btrfs_should_throttle_delayed_refs(trans, fs_info))
4852                                 btrfs_async_run_delayed_refs(fs_info,
4853                                         trans->delayed_ref_updates * 2,
4854                                         trans->transid, 0);
4855                         if (be_nice) {
4856                                 if (truncate_space_check(trans, root,
4857                                                          extent_num_bytes)) {
4858                                         should_end = true;
4859                                 }
4860                                 if (btrfs_should_throttle_delayed_refs(trans,
4861                                                                        fs_info))
4862                                         should_throttle = true;
4863                         }
4864                 }
4865
4866                 if (found_type == BTRFS_INODE_ITEM_KEY)
4867                         break;
4868
4869                 if (path->slots[0] == 0 ||
4870                     path->slots[0] != pending_del_slot ||
4871                     should_throttle || should_end) {
4872                         if (pending_del_nr) {
4873                                 ret = btrfs_del_items(trans, root, path,
4874                                                 pending_del_slot,
4875                                                 pending_del_nr);
4876                                 if (ret) {
4877                                         btrfs_abort_transaction(trans, ret);
4878                                         break;
4879                                 }
4880                                 pending_del_nr = 0;
4881                         }
4882                         btrfs_release_path(path);
4883                         if (should_throttle) {
4884                                 unsigned long updates = trans->delayed_ref_updates;
4885                                 if (updates) {
4886                                         trans->delayed_ref_updates = 0;
4887                                         ret = btrfs_run_delayed_refs(trans,
4888                                                                    updates * 2);
4889                                         if (ret)
4890                                                 break;
4891                                 }
4892                         }
4893                         /*
4894                          * if we failed to refill our space rsv, bail out
4895                          * and let the transaction restart
4896                          */
4897                         if (should_end) {
4898                                 ret = -EAGAIN;
4899                                 break;
4900                         }
4901                         goto search_again;
4902                 } else {
4903                         path->slots[0]--;
4904                 }
4905         }
4906 out:
4907         if (ret >= 0 && pending_del_nr) {
4908                 int err;
4909
4910                 err = btrfs_del_items(trans, root, path, pending_del_slot,
4911                                       pending_del_nr);
4912                 if (err) {
4913                         btrfs_abort_transaction(trans, err);
4914                         ret = err;
4915                 }
4916         }
4917         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4918                 ASSERT(last_size >= new_size);
4919                 if (!ret && last_size > new_size)
4920                         last_size = new_size;
4921                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4922         }
4923
4924         btrfs_free_path(path);
4925
4926         if (be_nice && bytes_deleted > SZ_32M && (ret >= 0 || ret == -EAGAIN)) {
4927                 unsigned long updates = trans->delayed_ref_updates;
4928                 int err;
4929
4930                 if (updates) {
4931                         trans->delayed_ref_updates = 0;
4932                         err = btrfs_run_delayed_refs(trans, updates * 2);
4933                         if (err)
4934                                 ret = err;
4935                 }
4936         }
4937         return ret;
4938 }
4939
4940 /*
4941  * btrfs_truncate_block - read, zero a chunk and write a block
4942  * @inode - inode that we're zeroing
4943  * @from - the offset to start zeroing
4944  * @len - the length to zero, 0 to zero the entire range respective to the
4945  *      offset
4946  * @front - zero up to the offset instead of from the offset on
4947  *
4948  * This will find the block for the "from" offset and cow the block and zero the
4949  * part we want to zero.  This is used with truncate and hole punching.
4950  */
4951 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4952                         int front)
4953 {
4954         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4955         struct address_space *mapping = inode->i_mapping;
4956         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4957         struct btrfs_ordered_extent *ordered;
4958         struct extent_state *cached_state = NULL;
4959         struct extent_changeset *data_reserved = NULL;
4960         char *kaddr;
4961         u32 blocksize = fs_info->sectorsize;
4962         pgoff_t index = from >> PAGE_SHIFT;
4963         unsigned offset = from & (blocksize - 1);
4964         struct page *page;
4965         gfp_t mask = btrfs_alloc_write_mask(mapping);
4966         int ret = 0;
4967         u64 block_start;
4968         u64 block_end;
4969
4970         if (IS_ALIGNED(offset, blocksize) &&
4971             (!len || IS_ALIGNED(len, blocksize)))
4972                 goto out;
4973
4974         block_start = round_down(from, blocksize);
4975         block_end = block_start + blocksize - 1;
4976
4977         ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
4978                                            block_start, blocksize);
4979         if (ret)
4980                 goto out;
4981
4982 again:
4983         page = find_or_create_page(mapping, index, mask);
4984         if (!page) {
4985                 btrfs_delalloc_release_space(inode, data_reserved,
4986                                              block_start, blocksize, true);
4987                 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
4988                 ret = -ENOMEM;
4989                 goto out;
4990         }
4991
4992         if (!PageUptodate(page)) {
4993                 ret = btrfs_readpage(NULL, page);
4994                 lock_page(page);
4995                 if (page->mapping != mapping) {
4996                         unlock_page(page);
4997                         put_page(page);
4998                         goto again;
4999                 }
5000                 if (!PageUptodate(page)) {
5001                         ret = -EIO;
5002                         goto out_unlock;
5003                 }
5004         }
5005         wait_on_page_writeback(page);
5006
5007         lock_extent_bits(io_tree, block_start, block_end, &cached_state);
5008         set_page_extent_mapped(page);
5009
5010         ordered = btrfs_lookup_ordered_extent(inode, block_start);
5011         if (ordered) {
5012                 unlock_extent_cached(io_tree, block_start, block_end,
5013                                      &cached_state);
5014                 unlock_page(page);
5015                 put_page(page);
5016                 btrfs_start_ordered_extent(inode, ordered, 1);
5017                 btrfs_put_ordered_extent(ordered);
5018                 goto again;
5019         }
5020
5021         clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
5022                           EXTENT_DIRTY | EXTENT_DELALLOC |
5023                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
5024                           0, 0, &cached_state);
5025
5026         ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
5027                                         &cached_state, 0);
5028         if (ret) {
5029                 unlock_extent_cached(io_tree, block_start, block_end,
5030                                      &cached_state);
5031                 goto out_unlock;
5032         }
5033
5034         if (offset != blocksize) {
5035                 if (!len)
5036                         len = blocksize - offset;
5037                 kaddr = kmap(page);
5038                 if (front)
5039                         memset(kaddr + (block_start - page_offset(page)),
5040                                 0, offset);
5041                 else
5042                         memset(kaddr + (block_start - page_offset(page)) +  offset,
5043                                 0, len);
5044                 flush_dcache_page(page);
5045                 kunmap(page);
5046         }
5047         ClearPageChecked(page);
5048         set_page_dirty(page);
5049         unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
5050
5051 out_unlock:
5052         if (ret)
5053                 btrfs_delalloc_release_space(inode, data_reserved, block_start,
5054                                              blocksize, true);
5055         btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
5056         unlock_page(page);
5057         put_page(page);
5058 out:
5059         extent_changeset_free(data_reserved);
5060         return ret;
5061 }
5062
5063 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
5064                              u64 offset, u64 len)
5065 {
5066         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5067         struct btrfs_trans_handle *trans;
5068         int ret;
5069
5070         /*
5071          * Still need to make sure the inode looks like it's been updated so
5072          * that any holes get logged if we fsync.
5073          */
5074         if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
5075                 BTRFS_I(inode)->last_trans = fs_info->generation;
5076                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
5077                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
5078                 return 0;
5079         }
5080
5081         /*
5082          * 1 - for the one we're dropping
5083          * 1 - for the one we're adding
5084          * 1 - for updating the inode.
5085          */
5086         trans = btrfs_start_transaction(root, 3);
5087         if (IS_ERR(trans))
5088                 return PTR_ERR(trans);
5089
5090         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
5091         if (ret) {
5092                 btrfs_abort_transaction(trans, ret);
5093                 btrfs_end_transaction(trans);
5094                 return ret;
5095         }
5096
5097         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
5098                         offset, 0, 0, len, 0, len, 0, 0, 0);
5099         if (ret)
5100                 btrfs_abort_transaction(trans, ret);
5101         else
5102                 btrfs_update_inode(trans, root, inode);
5103         btrfs_end_transaction(trans);
5104         return ret;
5105 }
5106
5107 /*
5108  * This function puts in dummy file extents for the area we're creating a hole
5109  * for.  So if we are truncating this file to a larger size we need to insert
5110  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5111  * the range between oldsize and size
5112  */
5113 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
5114 {
5115         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5116         struct btrfs_root *root = BTRFS_I(inode)->root;
5117         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5118         struct extent_map *em = NULL;
5119         struct extent_state *cached_state = NULL;
5120         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5121         u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5122         u64 block_end = ALIGN(size, fs_info->sectorsize);
5123         u64 last_byte;
5124         u64 cur_offset;
5125         u64 hole_size;
5126         int err = 0;
5127
5128         /*
5129          * If our size started in the middle of a block we need to zero out the
5130          * rest of the block before we expand the i_size, otherwise we could
5131          * expose stale data.
5132          */
5133         err = btrfs_truncate_block(inode, oldsize, 0, 0);
5134         if (err)
5135                 return err;
5136
5137         if (size <= hole_start)
5138                 return 0;
5139
5140         while (1) {
5141                 struct btrfs_ordered_extent *ordered;
5142
5143                 lock_extent_bits(io_tree, hole_start, block_end - 1,
5144                                  &cached_state);
5145                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start,
5146                                                      block_end - hole_start);
5147                 if (!ordered)
5148                         break;
5149                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
5150                                      &cached_state);
5151                 btrfs_start_ordered_extent(inode, ordered, 1);
5152                 btrfs_put_ordered_extent(ordered);
5153         }
5154
5155         cur_offset = hole_start;
5156         while (1) {
5157                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
5158                                 block_end - cur_offset, 0);
5159                 if (IS_ERR(em)) {
5160                         err = PTR_ERR(em);
5161                         em = NULL;
5162                         break;
5163                 }
5164                 last_byte = min(extent_map_end(em), block_end);
5165                 last_byte = ALIGN(last_byte, fs_info->sectorsize);
5166                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5167                         struct extent_map *hole_em;
5168                         hole_size = last_byte - cur_offset;
5169
5170                         err = maybe_insert_hole(root, inode, cur_offset,
5171                                                 hole_size);
5172                         if (err)
5173                                 break;
5174                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
5175                                                 cur_offset + hole_size - 1, 0);
5176                         hole_em = alloc_extent_map();
5177                         if (!hole_em) {
5178                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5179                                         &BTRFS_I(inode)->runtime_flags);
5180                                 goto next;
5181                         }
5182                         hole_em->start = cur_offset;
5183                         hole_em->len = hole_size;
5184                         hole_em->orig_start = cur_offset;
5185
5186                         hole_em->block_start = EXTENT_MAP_HOLE;
5187                         hole_em->block_len = 0;
5188                         hole_em->orig_block_len = 0;
5189                         hole_em->ram_bytes = hole_size;
5190                         hole_em->bdev = fs_info->fs_devices->latest_bdev;
5191                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
5192                         hole_em->generation = fs_info->generation;
5193
5194                         while (1) {
5195                                 write_lock(&em_tree->lock);
5196                                 err = add_extent_mapping(em_tree, hole_em, 1);
5197                                 write_unlock(&em_tree->lock);
5198                                 if (err != -EEXIST)
5199                                         break;
5200                                 btrfs_drop_extent_cache(BTRFS_I(inode),
5201                                                         cur_offset,
5202                                                         cur_offset +
5203                                                         hole_size - 1, 0);
5204                         }
5205                         free_extent_map(hole_em);
5206                 }
5207 next:
5208                 free_extent_map(em);
5209                 em = NULL;
5210                 cur_offset = last_byte;
5211                 if (cur_offset >= block_end)
5212                         break;
5213         }
5214         free_extent_map(em);
5215         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
5216         return err;
5217 }
5218
5219 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5220 {
5221         struct btrfs_root *root = BTRFS_I(inode)->root;
5222         struct btrfs_trans_handle *trans;
5223         loff_t oldsize = i_size_read(inode);
5224         loff_t newsize = attr->ia_size;
5225         int mask = attr->ia_valid;
5226         int ret;
5227
5228         /*
5229          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5230          * special case where we need to update the times despite not having
5231          * these flags set.  For all other operations the VFS set these flags
5232          * explicitly if it wants a timestamp update.
5233          */
5234         if (newsize != oldsize) {
5235                 inode_inc_iversion(inode);
5236                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5237                         inode->i_ctime = inode->i_mtime =
5238                                 current_time(inode);
5239         }
5240
5241         if (newsize > oldsize) {
5242                 /*
5243                  * Don't do an expanding truncate while snapshotting is ongoing.
5244                  * This is to ensure the snapshot captures a fully consistent
5245                  * state of this file - if the snapshot captures this expanding
5246                  * truncation, it must capture all writes that happened before
5247                  * this truncation.
5248                  */
5249                 btrfs_wait_for_snapshot_creation(root);
5250                 ret = btrfs_cont_expand(inode, oldsize, newsize);
5251                 if (ret) {
5252                         btrfs_end_write_no_snapshotting(root);
5253                         return ret;
5254                 }
5255
5256                 trans = btrfs_start_transaction(root, 1);
5257                 if (IS_ERR(trans)) {
5258                         btrfs_end_write_no_snapshotting(root);
5259                         return PTR_ERR(trans);
5260                 }
5261
5262                 i_size_write(inode, newsize);
5263                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
5264                 pagecache_isize_extended(inode, oldsize, newsize);
5265                 ret = btrfs_update_inode(trans, root, inode);
5266                 btrfs_end_write_no_snapshotting(root);
5267                 btrfs_end_transaction(trans);
5268         } else {
5269
5270                 /*
5271                  * We're truncating a file that used to have good data down to
5272                  * zero. Make sure it gets into the ordered flush list so that
5273                  * any new writes get down to disk quickly.
5274                  */
5275                 if (newsize == 0)
5276                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
5277                                 &BTRFS_I(inode)->runtime_flags);
5278
5279                 truncate_setsize(inode, newsize);
5280
5281                 /* Disable nonlocked read DIO to avoid the end less truncate */
5282                 btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
5283                 inode_dio_wait(inode);
5284                 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
5285
5286                 ret = btrfs_truncate(inode, newsize == oldsize);
5287                 if (ret && inode->i_nlink) {
5288                         int err;
5289
5290                         /*
5291                          * Truncate failed, so fix up the in-memory size. We
5292                          * adjusted disk_i_size down as we removed extents, so
5293                          * wait for disk_i_size to be stable and then update the
5294                          * in-memory size to match.
5295                          */
5296                         err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5297                         if (err)
5298                                 return err;
5299                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5300                 }
5301         }
5302
5303         return ret;
5304 }
5305
5306 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5307 {
5308         struct inode *inode = d_inode(dentry);
5309         struct btrfs_root *root = BTRFS_I(inode)->root;
5310         int err;
5311
5312         if (btrfs_root_readonly(root))
5313                 return -EROFS;
5314
5315         err = setattr_prepare(dentry, attr);
5316         if (err)
5317                 return err;
5318
5319         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5320                 err = btrfs_setsize(inode, attr);
5321                 if (err)
5322                         return err;
5323         }
5324
5325         if (attr->ia_valid) {
5326                 setattr_copy(inode, attr);
5327                 inode_inc_iversion(inode);
5328                 err = btrfs_dirty_inode(inode);
5329
5330                 if (!err && attr->ia_valid & ATTR_MODE)
5331                         err = posix_acl_chmod(inode, inode->i_mode);
5332         }
5333
5334         return err;
5335 }
5336
5337 /*
5338  * While truncating the inode pages during eviction, we get the VFS calling
5339  * btrfs_invalidatepage() against each page of the inode. This is slow because
5340  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5341  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5342  * extent_state structures over and over, wasting lots of time.
5343  *
5344  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5345  * those expensive operations on a per page basis and do only the ordered io
5346  * finishing, while we release here the extent_map and extent_state structures,
5347  * without the excessive merging and splitting.
5348  */
5349 static void evict_inode_truncate_pages(struct inode *inode)
5350 {
5351         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5352         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5353         struct rb_node *node;
5354
5355         ASSERT(inode->i_state & I_FREEING);
5356         truncate_inode_pages_final(&inode->i_data);
5357
5358         write_lock(&map_tree->lock);
5359         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5360                 struct extent_map *em;
5361
5362                 node = rb_first(&map_tree->map);
5363                 em = rb_entry(node, struct extent_map, rb_node);
5364                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5365                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5366                 remove_extent_mapping(map_tree, em);
5367                 free_extent_map(em);
5368                 if (need_resched()) {
5369                         write_unlock(&map_tree->lock);
5370                         cond_resched();
5371                         write_lock(&map_tree->lock);
5372                 }
5373         }
5374         write_unlock(&map_tree->lock);
5375
5376         /*
5377          * Keep looping until we have no more ranges in the io tree.
5378          * We can have ongoing bios started by readpages (called from readahead)
5379          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5380          * still in progress (unlocked the pages in the bio but did not yet
5381          * unlocked the ranges in the io tree). Therefore this means some
5382          * ranges can still be locked and eviction started because before
5383          * submitting those bios, which are executed by a separate task (work
5384          * queue kthread), inode references (inode->i_count) were not taken
5385          * (which would be dropped in the end io callback of each bio).
5386          * Therefore here we effectively end up waiting for those bios and
5387          * anyone else holding locked ranges without having bumped the inode's
5388          * reference count - if we don't do it, when they access the inode's
5389          * io_tree to unlock a range it may be too late, leading to an
5390          * use-after-free issue.
5391          */
5392         spin_lock(&io_tree->lock);
5393         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5394                 struct extent_state *state;
5395                 struct extent_state *cached_state = NULL;
5396                 u64 start;
5397                 u64 end;
5398                 unsigned state_flags;
5399
5400                 node = rb_first(&io_tree->state);
5401                 state = rb_entry(node, struct extent_state, rb_node);
5402                 start = state->start;
5403                 end = state->end;
5404                 state_flags = state->state;
5405                 spin_unlock(&io_tree->lock);
5406
5407                 lock_extent_bits(io_tree, start, end, &cached_state);
5408
5409                 /*
5410                  * If still has DELALLOC flag, the extent didn't reach disk,
5411                  * and its reserved space won't be freed by delayed_ref.
5412                  * So we need to free its reserved space here.
5413                  * (Refer to comment in btrfs_invalidatepage, case 2)
5414                  *
5415                  * Note, end is the bytenr of last byte, so we need + 1 here.
5416                  */
5417                 if (state_flags & EXTENT_DELALLOC)
5418                         btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
5419
5420                 clear_extent_bit(io_tree, start, end,
5421                                  EXTENT_LOCKED | EXTENT_DIRTY |
5422                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5423                                  EXTENT_DEFRAG, 1, 1, &cached_state);
5424
5425                 cond_resched();
5426                 spin_lock(&io_tree->lock);
5427         }
5428         spin_unlock(&io_tree->lock);
5429 }
5430
5431 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5432                                                         struct btrfs_block_rsv *rsv,
5433                                                         u64 min_size)
5434 {
5435         struct btrfs_fs_info *fs_info = root->fs_info;
5436         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5437         int failures = 0;
5438
5439         for (;;) {
5440                 struct btrfs_trans_handle *trans;
5441                 int ret;
5442
5443                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5444                                              BTRFS_RESERVE_FLUSH_LIMIT);
5445
5446                 if (ret && ++failures > 2) {
5447                         btrfs_warn(fs_info,
5448                                    "could not allocate space for a delete; will truncate on mount");
5449                         return ERR_PTR(-ENOSPC);
5450                 }
5451
5452                 trans = btrfs_join_transaction(root);
5453                 if (IS_ERR(trans) || !ret)
5454                         return trans;
5455
5456                 /*
5457                  * Try to steal from the global reserve if there is space for
5458                  * it.
5459                  */
5460                 if (!btrfs_check_space_for_delayed_refs(trans, fs_info) &&
5461                     !btrfs_block_rsv_migrate(global_rsv, rsv, min_size, 0))
5462                         return trans;
5463
5464                 /* If not, commit and try again. */
5465                 ret = btrfs_commit_transaction(trans);
5466                 if (ret)
5467                         return ERR_PTR(ret);
5468         }
5469 }
5470
5471 void btrfs_evict_inode(struct inode *inode)
5472 {
5473         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5474         struct btrfs_trans_handle *trans;
5475         struct btrfs_root *root = BTRFS_I(inode)->root;
5476         struct btrfs_block_rsv *rsv;
5477         u64 min_size;
5478         int ret;
5479
5480         trace_btrfs_inode_evict(inode);
5481
5482         if (!root) {
5483                 clear_inode(inode);
5484                 return;
5485         }
5486
5487         min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
5488
5489         evict_inode_truncate_pages(inode);
5490
5491         if (inode->i_nlink &&
5492             ((btrfs_root_refs(&root->root_item) != 0 &&
5493               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5494              btrfs_is_free_space_inode(BTRFS_I(inode))))
5495                 goto no_delete;
5496
5497         if (is_bad_inode(inode))
5498                 goto no_delete;
5499         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5500         if (!special_file(inode->i_mode))
5501                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5502
5503         btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5504
5505         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5506                 goto no_delete;
5507
5508         if (inode->i_nlink > 0) {
5509                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5510                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5511                 goto no_delete;
5512         }
5513
5514         ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5515         if (ret)
5516                 goto no_delete;
5517
5518         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5519         if (!rsv)
5520                 goto no_delete;
5521         rsv->size = min_size;
5522         rsv->failfast = 1;
5523
5524         btrfs_i_size_write(BTRFS_I(inode), 0);
5525
5526         while (1) {
5527                 trans = evict_refill_and_join(root, rsv, min_size);
5528                 if (IS_ERR(trans))
5529                         goto free_rsv;
5530
5531                 trans->block_rsv = rsv;
5532
5533                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5534                 trans->block_rsv = &fs_info->trans_block_rsv;
5535                 btrfs_end_transaction(trans);
5536                 btrfs_btree_balance_dirty(fs_info);
5537                 if (ret && ret != -ENOSPC && ret != -EAGAIN)
5538                         goto free_rsv;
5539                 else if (!ret)
5540                         break;
5541         }
5542
5543         /*
5544          * Errors here aren't a big deal, it just means we leave orphan items in
5545          * the tree. They will be cleaned up on the next mount. If the inode
5546          * number gets reused, cleanup deletes the orphan item without doing
5547          * anything, and unlink reuses the existing orphan item.
5548          *
5549          * If it turns out that we are dropping too many of these, we might want
5550          * to add a mechanism for retrying these after a commit.
5551          */
5552         trans = evict_refill_and_join(root, rsv, min_size);
5553         if (!IS_ERR(trans)) {
5554                 trans->block_rsv = rsv;
5555                 btrfs_orphan_del(trans, BTRFS_I(inode));
5556                 trans->block_rsv = &fs_info->trans_block_rsv;
5557                 btrfs_end_transaction(trans);
5558         }
5559
5560         if (!(root == fs_info->tree_root ||
5561               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5562                 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
5563
5564 free_rsv:
5565         btrfs_free_block_rsv(fs_info, rsv);
5566 no_delete:
5567         /*
5568          * If we didn't successfully delete, the orphan item will still be in
5569          * the tree and we'll retry on the next mount. Again, we might also want
5570          * to retry these periodically in the future.
5571          */
5572         btrfs_remove_delayed_node(BTRFS_I(inode));
5573         clear_inode(inode);
5574 }
5575
5576 /*
5577  * Return the key found in the dir entry in the location pointer, fill @type
5578  * with BTRFS_FT_*, and return 0.
5579  *
5580  * If no dir entries were found, returns -ENOENT.
5581  * If found a corrupted location in dir entry, returns -EUCLEAN.
5582  */
5583 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5584                                struct btrfs_key *location, u8 *type)
5585 {
5586         const char *name = dentry->d_name.name;
5587         int namelen = dentry->d_name.len;
5588         struct btrfs_dir_item *di;
5589         struct btrfs_path *path;
5590         struct btrfs_root *root = BTRFS_I(dir)->root;
5591         int ret = 0;
5592
5593         path = btrfs_alloc_path();
5594         if (!path)
5595                 return -ENOMEM;
5596
5597         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5598                         name, namelen, 0);
5599         if (!di) {
5600                 ret = -ENOENT;
5601                 goto out;
5602         }
5603         if (IS_ERR(di)) {
5604                 ret = PTR_ERR(di);
5605                 goto out;
5606         }
5607
5608         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5609         if (location->type != BTRFS_INODE_ITEM_KEY &&
5610             location->type != BTRFS_ROOT_ITEM_KEY) {
5611                 ret = -EUCLEAN;
5612                 btrfs_warn(root->fs_info,
5613 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5614                            __func__, name, btrfs_ino(BTRFS_I(dir)),
5615                            location->objectid, location->type, location->offset);
5616         }
5617         if (!ret)
5618                 *type = btrfs_dir_type(path->nodes[0], di);
5619 out:
5620         btrfs_free_path(path);
5621         return ret;
5622 }
5623
5624 /*
5625  * when we hit a tree root in a directory, the btrfs part of the inode
5626  * needs to be changed to reflect the root directory of the tree root.  This
5627  * is kind of like crossing a mount point.
5628  */
5629 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5630                                     struct inode *dir,
5631                                     struct dentry *dentry,
5632                                     struct btrfs_key *location,
5633                                     struct btrfs_root **sub_root)
5634 {
5635         struct btrfs_path *path;
5636         struct btrfs_root *new_root;
5637         struct btrfs_root_ref *ref;
5638         struct extent_buffer *leaf;
5639         struct btrfs_key key;
5640         int ret;
5641         int err = 0;
5642
5643         path = btrfs_alloc_path();
5644         if (!path) {
5645                 err = -ENOMEM;
5646                 goto out;
5647         }
5648
5649         err = -ENOENT;
5650         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5651         key.type = BTRFS_ROOT_REF_KEY;
5652         key.offset = location->objectid;
5653
5654         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5655         if (ret) {
5656                 if (ret < 0)
5657                         err = ret;
5658                 goto out;
5659         }
5660
5661         leaf = path->nodes[0];
5662         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5663         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5664             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5665                 goto out;
5666
5667         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5668                                    (unsigned long)(ref + 1),
5669                                    dentry->d_name.len);
5670         if (ret)
5671                 goto out;
5672
5673         btrfs_release_path(path);
5674
5675         new_root = btrfs_read_fs_root_no_name(fs_info, location);
5676         if (IS_ERR(new_root)) {
5677                 err = PTR_ERR(new_root);
5678                 goto out;
5679         }
5680
5681         *sub_root = new_root;
5682         location->objectid = btrfs_root_dirid(&new_root->root_item);
5683         location->type = BTRFS_INODE_ITEM_KEY;
5684         location->offset = 0;
5685         err = 0;
5686 out:
5687         btrfs_free_path(path);
5688         return err;
5689 }
5690
5691 static void inode_tree_add(struct inode *inode)
5692 {
5693         struct btrfs_root *root = BTRFS_I(inode)->root;
5694         struct btrfs_inode *entry;
5695         struct rb_node **p;
5696         struct rb_node *parent;
5697         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5698         u64 ino = btrfs_ino(BTRFS_I(inode));
5699
5700         if (inode_unhashed(inode))
5701                 return;
5702         parent = NULL;
5703         spin_lock(&root->inode_lock);
5704         p = &root->inode_tree.rb_node;
5705         while (*p) {
5706                 parent = *p;
5707                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5708
5709                 if (ino < btrfs_ino(entry))
5710                         p = &parent->rb_left;
5711                 else if (ino > btrfs_ino(entry))
5712                         p = &parent->rb_right;
5713                 else {
5714                         WARN_ON(!(entry->vfs_inode.i_state &
5715                                   (I_WILL_FREE | I_FREEING)));
5716                         rb_replace_node(parent, new, &root->inode_tree);
5717                         RB_CLEAR_NODE(parent);
5718                         spin_unlock(&root->inode_lock);
5719                         return;
5720                 }
5721         }
5722         rb_link_node(new, parent, p);
5723         rb_insert_color(new, &root->inode_tree);
5724         spin_unlock(&root->inode_lock);
5725 }
5726
5727 static void inode_tree_del(struct inode *inode)
5728 {
5729         struct btrfs_root *root = BTRFS_I(inode)->root;
5730         int empty = 0;
5731
5732         spin_lock(&root->inode_lock);
5733         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5734                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5735                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5736                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5737         }
5738         spin_unlock(&root->inode_lock);
5739
5740         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5741                 spin_lock(&root->inode_lock);
5742                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5743                 spin_unlock(&root->inode_lock);
5744                 if (empty)
5745                         btrfs_add_dead_root(root);
5746         }
5747 }
5748
5749
5750 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5751 {
5752         struct btrfs_iget_args *args = p;
5753         inode->i_ino = args->location->objectid;
5754         memcpy(&BTRFS_I(inode)->location, args->location,
5755                sizeof(*args->location));
5756         BTRFS_I(inode)->root = args->root;
5757         return 0;
5758 }
5759
5760 static int btrfs_find_actor(struct inode *inode, void *opaque)
5761 {
5762         struct btrfs_iget_args *args = opaque;
5763         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5764                 args->root == BTRFS_I(inode)->root;
5765 }
5766
5767 static struct inode *btrfs_iget_locked(struct super_block *s,
5768                                        struct btrfs_key *location,
5769                                        struct btrfs_root *root)
5770 {
5771         struct inode *inode;
5772         struct btrfs_iget_args args;
5773         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5774
5775         args.location = location;
5776         args.root = root;
5777
5778         inode = iget5_locked(s, hashval, btrfs_find_actor,
5779                              btrfs_init_locked_inode,
5780                              (void *)&args);
5781         return inode;
5782 }
5783
5784 /* Get an inode object given its location and corresponding root.
5785  * Returns in *is_new if the inode was read from disk
5786  */
5787 struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
5788                               struct btrfs_root *root, int *new,
5789                               struct btrfs_path *path)
5790 {
5791         struct inode *inode;
5792
5793         inode = btrfs_iget_locked(s, location, root);
5794         if (!inode)
5795                 return ERR_PTR(-ENOMEM);
5796
5797         if (inode->i_state & I_NEW) {
5798                 int ret;
5799
5800                 ret = btrfs_read_locked_inode(inode, path);
5801                 if (!ret) {
5802                         inode_tree_add(inode);
5803                         unlock_new_inode(inode);
5804                         if (new)
5805                                 *new = 1;
5806                 } else {
5807                         iget_failed(inode);
5808                         /*
5809                          * ret > 0 can come from btrfs_search_slot called by
5810                          * btrfs_read_locked_inode, this means the inode item
5811                          * was not found.
5812                          */
5813                         if (ret > 0)
5814                                 ret = -ENOENT;
5815                         inode = ERR_PTR(ret);
5816                 }
5817         }
5818
5819         return inode;
5820 }
5821
5822 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5823                          struct btrfs_root *root, int *new)
5824 {
5825         return btrfs_iget_path(s, location, root, new, NULL);
5826 }
5827
5828 static struct inode *new_simple_dir(struct super_block *s,
5829                                     struct btrfs_key *key,
5830                                     struct btrfs_root *root)
5831 {
5832         struct inode *inode = new_inode(s);
5833
5834         if (!inode)
5835                 return ERR_PTR(-ENOMEM);
5836
5837         BTRFS_I(inode)->root = root;
5838         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5839         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5840
5841         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5842         inode->i_op = &btrfs_dir_ro_inode_operations;
5843         inode->i_opflags &= ~IOP_XATTR;
5844         inode->i_fop = &simple_dir_operations;
5845         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5846         inode->i_mtime = current_time(inode);
5847         inode->i_atime = inode->i_mtime;
5848         inode->i_ctime = inode->i_mtime;
5849         BTRFS_I(inode)->i_otime = inode->i_mtime;
5850
5851         return inode;
5852 }
5853
5854 static inline u8 btrfs_inode_type(struct inode *inode)
5855 {
5856         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
5857 }
5858
5859 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5860 {
5861         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5862         struct inode *inode;
5863         struct btrfs_root *root = BTRFS_I(dir)->root;
5864         struct btrfs_root *sub_root = root;
5865         struct btrfs_key location;
5866         u8 di_type = 0;
5867         int index;
5868         int ret = 0;
5869
5870         if (dentry->d_name.len > BTRFS_NAME_LEN)
5871                 return ERR_PTR(-ENAMETOOLONG);
5872
5873         ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
5874         if (ret < 0)
5875                 return ERR_PTR(ret);
5876
5877         if (location.type == BTRFS_INODE_ITEM_KEY) {
5878                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5879                 if (IS_ERR(inode))
5880                         return inode;
5881
5882                 /* Do extra check against inode mode with di_type */
5883                 if (btrfs_inode_type(inode) != di_type) {
5884                         btrfs_crit(fs_info,
5885 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5886                                   inode->i_mode, btrfs_inode_type(inode),
5887                                   di_type);
5888                         iput(inode);
5889                         return ERR_PTR(-EUCLEAN);
5890                 }
5891                 return inode;
5892         }
5893
5894         index = srcu_read_lock(&fs_info->subvol_srcu);
5895         ret = fixup_tree_root_location(fs_info, dir, dentry,
5896                                        &location, &sub_root);
5897         if (ret < 0) {
5898                 if (ret != -ENOENT)
5899                         inode = ERR_PTR(ret);
5900                 else
5901                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5902         } else {
5903                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5904         }
5905         srcu_read_unlock(&fs_info->subvol_srcu, index);
5906
5907         if (!IS_ERR(inode) && root != sub_root) {
5908                 down_read(&fs_info->cleanup_work_sem);
5909                 if (!sb_rdonly(inode->i_sb))
5910                         ret = btrfs_orphan_cleanup(sub_root);
5911                 up_read(&fs_info->cleanup_work_sem);
5912                 if (ret) {
5913                         iput(inode);
5914                         inode = ERR_PTR(ret);
5915                 }
5916         }
5917
5918         return inode;
5919 }
5920
5921 static int btrfs_dentry_delete(const struct dentry *dentry)
5922 {
5923         struct btrfs_root *root;
5924         struct inode *inode = d_inode(dentry);
5925
5926         if (!inode && !IS_ROOT(dentry))
5927                 inode = d_inode(dentry->d_parent);
5928
5929         if (inode) {
5930                 root = BTRFS_I(inode)->root;
5931                 if (btrfs_root_refs(&root->root_item) == 0)
5932                         return 1;
5933
5934                 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5935                         return 1;
5936         }
5937         return 0;
5938 }
5939
5940 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5941                                    unsigned int flags)
5942 {
5943         struct inode *inode;
5944
5945         inode = btrfs_lookup_dentry(dir, dentry);
5946         if (IS_ERR(inode)) {
5947                 if (PTR_ERR(inode) == -ENOENT)
5948                         inode = NULL;
5949                 else
5950                         return ERR_CAST(inode);
5951         }
5952
5953         return d_splice_alias(inode, dentry);
5954 }
5955
5956 unsigned char btrfs_filetype_table[] = {
5957         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5958 };
5959
5960 /*
5961  * All this infrastructure exists because dir_emit can fault, and we are holding
5962  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5963  * our information into that, and then dir_emit from the buffer.  This is
5964  * similar to what NFS does, only we don't keep the buffer around in pagecache
5965  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5966  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5967  * tree lock.
5968  */
5969 static int btrfs_opendir(struct inode *inode, struct file *file)
5970 {
5971         struct btrfs_file_private *private;
5972
5973         private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5974         if (!private)
5975                 return -ENOMEM;
5976         private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5977         if (!private->filldir_buf) {
5978                 kfree(private);
5979                 return -ENOMEM;
5980         }
5981         file->private_data = private;
5982         return 0;
5983 }
5984
5985 struct dir_entry {
5986         u64 ino;
5987         u64 offset;
5988         unsigned type;
5989         int name_len;
5990 };
5991
5992 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5993 {
5994         while (entries--) {
5995                 struct dir_entry *entry = addr;
5996                 char *name = (char *)(entry + 1);
5997
5998                 ctx->pos = get_unaligned(&entry->offset);
5999                 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
6000                                          get_unaligned(&entry->ino),
6001                                          get_unaligned(&entry->type)))
6002                         return 1;
6003                 addr += sizeof(struct dir_entry) +
6004                         get_unaligned(&entry->name_len);
6005                 ctx->pos++;
6006         }
6007         return 0;
6008 }
6009
6010 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
6011 {
6012         struct inode *inode = file_inode(file);
6013         struct btrfs_root *root = BTRFS_I(inode)->root;
6014         struct btrfs_file_private *private = file->private_data;
6015         struct btrfs_dir_item *di;
6016         struct btrfs_key key;
6017         struct btrfs_key found_key;
6018         struct btrfs_path *path;
6019         void *addr;
6020         struct list_head ins_list;
6021         struct list_head del_list;
6022         int ret;
6023         struct extent_buffer *leaf;
6024         int slot;
6025         char *name_ptr;
6026         int name_len;
6027         int entries = 0;
6028         int total_len = 0;
6029         bool put = false;
6030         struct btrfs_key location;
6031
6032         if (!dir_emit_dots(file, ctx))
6033                 return 0;
6034
6035         path = btrfs_alloc_path();
6036         if (!path)
6037                 return -ENOMEM;
6038
6039         addr = private->filldir_buf;
6040         path->reada = READA_FORWARD;
6041
6042         INIT_LIST_HEAD(&ins_list);
6043         INIT_LIST_HEAD(&del_list);
6044         put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
6045
6046 again:
6047         key.type = BTRFS_DIR_INDEX_KEY;
6048         key.offset = ctx->pos;
6049         key.objectid = btrfs_ino(BTRFS_I(inode));
6050
6051         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6052         if (ret < 0)
6053                 goto err;
6054
6055         while (1) {
6056                 struct dir_entry *entry;
6057
6058                 leaf = path->nodes[0];
6059                 slot = path->slots[0];
6060                 if (slot >= btrfs_header_nritems(leaf)) {
6061                         ret = btrfs_next_leaf(root, path);
6062                         if (ret < 0)
6063                                 goto err;
6064                         else if (ret > 0)
6065                                 break;
6066                         continue;
6067                 }
6068
6069                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6070
6071                 if (found_key.objectid != key.objectid)
6072                         break;
6073                 if (found_key.type != BTRFS_DIR_INDEX_KEY)
6074                         break;
6075                 if (found_key.offset < ctx->pos)
6076                         goto next;
6077                 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6078                         goto next;
6079                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
6080                 name_len = btrfs_dir_name_len(leaf, di);
6081                 if ((total_len + sizeof(struct dir_entry) + name_len) >=
6082                     PAGE_SIZE) {
6083                         btrfs_release_path(path);
6084                         ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6085                         if (ret)
6086                                 goto nopos;
6087                         addr = private->filldir_buf;
6088                         entries = 0;
6089                         total_len = 0;
6090                         goto again;
6091                 }
6092
6093                 entry = addr;
6094                 put_unaligned(name_len, &entry->name_len);
6095                 name_ptr = (char *)(entry + 1);
6096                 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6097                                    name_len);
6098                 put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
6099                                 &entry->type);
6100                 btrfs_dir_item_key_to_cpu(leaf, di, &location);
6101                 put_unaligned(location.objectid, &entry->ino);
6102                 put_unaligned(found_key.offset, &entry->offset);
6103                 entries++;
6104                 addr += sizeof(struct dir_entry) + name_len;
6105                 total_len += sizeof(struct dir_entry) + name_len;
6106 next:
6107                 path->slots[0]++;
6108         }
6109         btrfs_release_path(path);
6110
6111         ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6112         if (ret)
6113                 goto nopos;
6114
6115         ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6116         if (ret)
6117                 goto nopos;
6118
6119         /*
6120          * Stop new entries from being returned after we return the last
6121          * entry.
6122          *
6123          * New directory entries are assigned a strictly increasing
6124          * offset.  This means that new entries created during readdir
6125          * are *guaranteed* to be seen in the future by that readdir.
6126          * This has broken buggy programs which operate on names as
6127          * they're returned by readdir.  Until we re-use freed offsets
6128          * we have this hack to stop new entries from being returned
6129          * under the assumption that they'll never reach this huge
6130          * offset.
6131          *
6132          * This is being careful not to overflow 32bit loff_t unless the
6133          * last entry requires it because doing so has broken 32bit apps
6134          * in the past.
6135          */
6136         if (ctx->pos >= INT_MAX)
6137                 ctx->pos = LLONG_MAX;
6138         else
6139                 ctx->pos = INT_MAX;
6140 nopos:
6141         ret = 0;
6142 err:
6143         if (put)
6144                 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6145         btrfs_free_path(path);
6146         return ret;
6147 }
6148
6149 /*
6150  * This is somewhat expensive, updating the tree every time the
6151  * inode changes.  But, it is most likely to find the inode in cache.
6152  * FIXME, needs more benchmarking...there are no reasons other than performance
6153  * to keep or drop this code.
6154  */
6155 static int btrfs_dirty_inode(struct inode *inode)
6156 {
6157         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6158         struct btrfs_root *root = BTRFS_I(inode)->root;
6159         struct btrfs_trans_handle *trans;
6160         int ret;
6161
6162         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6163                 return 0;
6164
6165         trans = btrfs_join_transaction(root);
6166         if (IS_ERR(trans))
6167                 return PTR_ERR(trans);
6168
6169         ret = btrfs_update_inode(trans, root, inode);
6170         if (ret && ret == -ENOSPC) {
6171                 /* whoops, lets try again with the full transaction */
6172                 btrfs_end_transaction(trans);
6173                 trans = btrfs_start_transaction(root, 1);
6174                 if (IS_ERR(trans))
6175                         return PTR_ERR(trans);
6176
6177                 ret = btrfs_update_inode(trans, root, inode);
6178         }
6179         btrfs_end_transaction(trans);
6180         if (BTRFS_I(inode)->delayed_node)
6181                 btrfs_balance_delayed_items(fs_info);
6182
6183         return ret;
6184 }
6185
6186 /*
6187  * This is a copy of file_update_time.  We need this so we can return error on
6188  * ENOSPC for updating the inode in the case of file write and mmap writes.
6189  */
6190 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
6191                              int flags)
6192 {
6193         struct btrfs_root *root = BTRFS_I(inode)->root;
6194         bool dirty = flags & ~S_VERSION;
6195
6196         if (btrfs_root_readonly(root))
6197                 return -EROFS;
6198
6199         if (flags & S_VERSION)
6200                 dirty |= inode_maybe_inc_iversion(inode, dirty);
6201         if (flags & S_CTIME)
6202                 inode->i_ctime = *now;
6203         if (flags & S_MTIME)
6204                 inode->i_mtime = *now;
6205         if (flags & S_ATIME)
6206                 inode->i_atime = *now;
6207         return dirty ? btrfs_dirty_inode(inode) : 0;
6208 }
6209
6210 /*
6211  * find the highest existing sequence number in a directory
6212  * and then set the in-memory index_cnt variable to reflect
6213  * free sequence numbers
6214  */
6215 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6216 {
6217         struct btrfs_root *root = inode->root;
6218         struct btrfs_key key, found_key;
6219         struct btrfs_path *path;
6220         struct extent_buffer *leaf;
6221         int ret;
6222
6223         key.objectid = btrfs_ino(inode);
6224         key.type = BTRFS_DIR_INDEX_KEY;
6225         key.offset = (u64)-1;
6226
6227         path = btrfs_alloc_path();
6228         if (!path)
6229                 return -ENOMEM;
6230
6231         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6232         if (ret < 0)
6233                 goto out;
6234         /* FIXME: we should be able to handle this */
6235         if (ret == 0)
6236                 goto out;
6237         ret = 0;
6238
6239         /*
6240          * MAGIC NUMBER EXPLANATION:
6241          * since we search a directory based on f_pos we have to start at 2
6242          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6243          * else has to start at 2
6244          */
6245         if (path->slots[0] == 0) {
6246                 inode->index_cnt = 2;
6247                 goto out;
6248         }
6249
6250         path->slots[0]--;
6251
6252         leaf = path->nodes[0];
6253         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6254
6255         if (found_key.objectid != btrfs_ino(inode) ||
6256             found_key.type != BTRFS_DIR_INDEX_KEY) {
6257                 inode->index_cnt = 2;
6258                 goto out;
6259         }
6260
6261         inode->index_cnt = found_key.offset + 1;
6262 out:
6263         btrfs_free_path(path);
6264         return ret;
6265 }
6266
6267 /*
6268  * helper to find a free sequence number in a given directory.  This current
6269  * code is very simple, later versions will do smarter things in the btree
6270  */
6271 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6272 {
6273         int ret = 0;
6274
6275         if (dir->index_cnt == (u64)-1) {
6276                 ret = btrfs_inode_delayed_dir_index_count(dir);
6277                 if (ret) {
6278                         ret = btrfs_set_inode_index_count(dir);
6279                         if (ret)
6280                                 return ret;
6281                 }
6282         }
6283
6284         *index = dir->index_cnt;
6285         dir->index_cnt++;
6286
6287         return ret;
6288 }
6289
6290 static int btrfs_insert_inode_locked(struct inode *inode)
6291 {
6292         struct btrfs_iget_args args;
6293         args.location = &BTRFS_I(inode)->location;
6294         args.root = BTRFS_I(inode)->root;
6295
6296         return insert_inode_locked4(inode,
6297                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6298                    btrfs_find_actor, &args);
6299 }
6300
6301 /*
6302  * Inherit flags from the parent inode.
6303  *
6304  * Currently only the compression flags and the cow flags are inherited.
6305  */
6306 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6307 {
6308         unsigned int flags;
6309
6310         if (!dir)
6311                 return;
6312
6313         flags = BTRFS_I(dir)->flags;
6314
6315         if (flags & BTRFS_INODE_NOCOMPRESS) {
6316                 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6317                 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6318         } else if (flags & BTRFS_INODE_COMPRESS) {
6319                 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6320                 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6321         }
6322
6323         if (flags & BTRFS_INODE_NODATACOW) {
6324                 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6325                 if (S_ISREG(inode->i_mode))
6326                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6327         }
6328
6329         btrfs_sync_inode_flags_to_i_flags(inode);
6330 }
6331
6332 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6333                                      struct btrfs_root *root,
6334                                      struct inode *dir,
6335                                      const char *name, int name_len,
6336                                      u64 ref_objectid, u64 objectid,
6337                                      umode_t mode, u64 *index)
6338 {
6339         struct btrfs_fs_info *fs_info = root->fs_info;
6340         struct inode *inode;
6341         struct btrfs_inode_item *inode_item;
6342         struct btrfs_key *location;
6343         struct btrfs_path *path;
6344         struct btrfs_inode_ref *ref;
6345         struct btrfs_key key[2];
6346         u32 sizes[2];
6347         int nitems = name ? 2 : 1;
6348         unsigned long ptr;
6349         int ret;
6350
6351         path = btrfs_alloc_path();
6352         if (!path)
6353                 return ERR_PTR(-ENOMEM);
6354
6355         inode = new_inode(fs_info->sb);
6356         if (!inode) {
6357                 btrfs_free_path(path);
6358                 return ERR_PTR(-ENOMEM);
6359         }
6360
6361         /*
6362          * O_TMPFILE, set link count to 0, so that after this point,
6363          * we fill in an inode item with the correct link count.
6364          */
6365         if (!name)
6366                 set_nlink(inode, 0);
6367
6368         /*
6369          * we have to initialize this early, so we can reclaim the inode
6370          * number if we fail afterwards in this function.
6371          */
6372         inode->i_ino = objectid;
6373
6374         if (dir && name) {
6375                 trace_btrfs_inode_request(dir);
6376
6377                 ret = btrfs_set_inode_index(BTRFS_I(dir), index);
6378                 if (ret) {
6379                         btrfs_free_path(path);
6380                         iput(inode);
6381                         return ERR_PTR(ret);
6382                 }
6383         } else if (dir) {
6384                 *index = 0;
6385         }
6386         /*
6387          * index_cnt is ignored for everything but a dir,
6388          * btrfs_set_inode_index_count has an explanation for the magic
6389          * number
6390          */
6391         BTRFS_I(inode)->index_cnt = 2;
6392         BTRFS_I(inode)->dir_index = *index;
6393         BTRFS_I(inode)->root = root;
6394         BTRFS_I(inode)->generation = trans->transid;
6395         inode->i_generation = BTRFS_I(inode)->generation;
6396
6397         /*
6398          * We could have gotten an inode number from somebody who was fsynced
6399          * and then removed in this same transaction, so let's just set full
6400          * sync since it will be a full sync anyway and this will blow away the
6401          * old info in the log.
6402          */
6403         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6404
6405         key[0].objectid = objectid;
6406         key[0].type = BTRFS_INODE_ITEM_KEY;
6407         key[0].offset = 0;
6408
6409         sizes[0] = sizeof(struct btrfs_inode_item);
6410
6411         if (name) {
6412                 /*
6413                  * Start new inodes with an inode_ref. This is slightly more
6414                  * efficient for small numbers of hard links since they will
6415                  * be packed into one item. Extended refs will kick in if we
6416                  * add more hard links than can fit in the ref item.
6417                  */
6418                 key[1].objectid = objectid;
6419                 key[1].type = BTRFS_INODE_REF_KEY;
6420                 key[1].offset = ref_objectid;
6421
6422                 sizes[1] = name_len + sizeof(*ref);
6423         }
6424
6425         location = &BTRFS_I(inode)->location;
6426         location->objectid = objectid;
6427         location->offset = 0;
6428         location->type = BTRFS_INODE_ITEM_KEY;
6429
6430         ret = btrfs_insert_inode_locked(inode);
6431         if (ret < 0) {
6432                 iput(inode);
6433                 goto fail;
6434         }
6435
6436         path->leave_spinning = 1;
6437         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6438         if (ret != 0)
6439                 goto fail_unlock;
6440
6441         inode_init_owner(inode, dir, mode);
6442         inode_set_bytes(inode, 0);
6443
6444         inode->i_mtime = current_time(inode);
6445         inode->i_atime = inode->i_mtime;
6446         inode->i_ctime = inode->i_mtime;
6447         BTRFS_I(inode)->i_otime = inode->i_mtime;
6448
6449         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6450                                   struct btrfs_inode_item);
6451         memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6452                              sizeof(*inode_item));
6453         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6454
6455         if (name) {
6456                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6457                                      struct btrfs_inode_ref);
6458                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6459                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6460                 ptr = (unsigned long)(ref + 1);
6461                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6462         }
6463
6464         btrfs_mark_buffer_dirty(path->nodes[0]);
6465         btrfs_free_path(path);
6466
6467         btrfs_inherit_iflags(inode, dir);
6468
6469         if (S_ISREG(mode)) {
6470                 if (btrfs_test_opt(fs_info, NODATASUM))
6471                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6472                 if (btrfs_test_opt(fs_info, NODATACOW))
6473                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6474                                 BTRFS_INODE_NODATASUM;
6475         }
6476
6477         inode_tree_add(inode);
6478
6479         trace_btrfs_inode_new(inode);
6480         btrfs_set_inode_last_trans(trans, inode);
6481
6482         btrfs_update_root_times(trans, root);
6483
6484         ret = btrfs_inode_inherit_props(trans, inode, dir);
6485         if (ret)
6486                 btrfs_err(fs_info,
6487                           "error inheriting props for ino %llu (root %llu): %d",
6488                         btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
6489
6490         return inode;
6491
6492 fail_unlock:
6493         discard_new_inode(inode);
6494 fail:
6495         if (dir && name)
6496                 BTRFS_I(dir)->index_cnt--;
6497         btrfs_free_path(path);
6498         return ERR_PTR(ret);
6499 }
6500
6501 /*
6502  * utility function to add 'inode' into 'parent_inode' with
6503  * a give name and a given sequence number.
6504  * if 'add_backref' is true, also insert a backref from the
6505  * inode to the parent directory.
6506  */
6507 int btrfs_add_link(struct btrfs_trans_handle *trans,
6508                    struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6509                    const char *name, int name_len, int add_backref, u64 index)
6510 {
6511         int ret = 0;
6512         struct btrfs_key key;
6513         struct btrfs_root *root = parent_inode->root;
6514         u64 ino = btrfs_ino(inode);
6515         u64 parent_ino = btrfs_ino(parent_inode);
6516
6517         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6518                 memcpy(&key, &inode->root->root_key, sizeof(key));
6519         } else {
6520                 key.objectid = ino;
6521                 key.type = BTRFS_INODE_ITEM_KEY;
6522                 key.offset = 0;
6523         }
6524
6525         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6526                 ret = btrfs_add_root_ref(trans, key.objectid,
6527                                          root->root_key.objectid, parent_ino,
6528                                          index, name, name_len);
6529         } else if (add_backref) {
6530                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6531                                              parent_ino, index);
6532         }
6533
6534         /* Nothing to clean up yet */
6535         if (ret)
6536                 return ret;
6537
6538         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6539                                     parent_inode, &key,
6540                                     btrfs_inode_type(&inode->vfs_inode), index);
6541         if (ret == -EEXIST || ret == -EOVERFLOW)
6542                 goto fail_dir_item;
6543         else if (ret) {
6544                 btrfs_abort_transaction(trans, ret);
6545                 return ret;
6546         }
6547
6548         btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6549                            name_len * 2);
6550         inode_inc_iversion(&parent_inode->vfs_inode);
6551         /*
6552          * If we are replaying a log tree, we do not want to update the mtime
6553          * and ctime of the parent directory with the current time, since the
6554          * log replay procedure is responsible for setting them to their correct
6555          * values (the ones it had when the fsync was done).
6556          */
6557         if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
6558                 struct timespec64 now = current_time(&parent_inode->vfs_inode);
6559
6560                 parent_inode->vfs_inode.i_mtime = now;
6561                 parent_inode->vfs_inode.i_ctime = now;
6562         }
6563         ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
6564         if (ret)
6565                 btrfs_abort_transaction(trans, ret);
6566         return ret;
6567
6568 fail_dir_item:
6569         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6570                 u64 local_index;
6571                 int err;
6572                 err = btrfs_del_root_ref(trans, key.objectid,
6573                                          root->root_key.objectid, parent_ino,
6574                                          &local_index, name, name_len);
6575                 if (err)
6576                         btrfs_abort_transaction(trans, err);
6577         } else if (add_backref) {
6578                 u64 local_index;
6579                 int err;
6580
6581                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6582                                           ino, parent_ino, &local_index);
6583                 if (err)
6584                         btrfs_abort_transaction(trans, err);
6585         }
6586
6587         /* Return the original error code */
6588         return ret;
6589 }
6590
6591 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6592                             struct btrfs_inode *dir, struct dentry *dentry,
6593                             struct btrfs_inode *inode, int backref, u64 index)
6594 {
6595         int err = btrfs_add_link(trans, dir, inode,
6596                                  dentry->d_name.name, dentry->d_name.len,
6597                                  backref, index);
6598         if (err > 0)
6599                 err = -EEXIST;
6600         return err;
6601 }
6602
6603 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6604                         umode_t mode, dev_t rdev)
6605 {
6606         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6607         struct btrfs_trans_handle *trans;
6608         struct btrfs_root *root = BTRFS_I(dir)->root;
6609         struct inode *inode = NULL;
6610         int err;
6611         u64 objectid;
6612         u64 index = 0;
6613
6614         /*
6615          * 2 for inode item and ref
6616          * 2 for dir items
6617          * 1 for xattr if selinux is on
6618          */
6619         trans = btrfs_start_transaction(root, 5);
6620         if (IS_ERR(trans))
6621                 return PTR_ERR(trans);
6622
6623         err = btrfs_find_free_ino(root, &objectid);
6624         if (err)
6625                 goto out_unlock;
6626
6627         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6628                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6629                         mode, &index);
6630         if (IS_ERR(inode)) {
6631                 err = PTR_ERR(inode);
6632                 inode = NULL;
6633                 goto out_unlock;
6634         }
6635
6636         /*
6637         * If the active LSM wants to access the inode during
6638         * d_instantiate it needs these. Smack checks to see
6639         * if the filesystem supports xattrs by looking at the
6640         * ops vector.
6641         */
6642         inode->i_op = &btrfs_special_inode_operations;
6643         init_special_inode(inode, inode->i_mode, rdev);
6644
6645         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6646         if (err)
6647                 goto out_unlock;
6648
6649         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6650                         0, index);
6651         if (err)
6652                 goto out_unlock;
6653
6654         btrfs_update_inode(trans, root, inode);
6655         d_instantiate_new(dentry, inode);
6656
6657 out_unlock:
6658         btrfs_end_transaction(trans);
6659         btrfs_btree_balance_dirty(fs_info);
6660         if (err && inode) {
6661                 inode_dec_link_count(inode);
6662                 discard_new_inode(inode);
6663         }
6664         return err;
6665 }
6666
6667 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6668                         umode_t mode, bool excl)
6669 {
6670         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6671         struct btrfs_trans_handle *trans;
6672         struct btrfs_root *root = BTRFS_I(dir)->root;
6673         struct inode *inode = NULL;
6674         int err;
6675         u64 objectid;
6676         u64 index = 0;
6677
6678         /*
6679          * 2 for inode item and ref
6680          * 2 for dir items
6681          * 1 for xattr if selinux is on
6682          */
6683         trans = btrfs_start_transaction(root, 5);
6684         if (IS_ERR(trans))
6685                 return PTR_ERR(trans);
6686
6687         err = btrfs_find_free_ino(root, &objectid);
6688         if (err)
6689                 goto out_unlock;
6690
6691         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6692                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6693                         mode, &index);
6694         if (IS_ERR(inode)) {
6695                 err = PTR_ERR(inode);
6696                 inode = NULL;
6697                 goto out_unlock;
6698         }
6699         /*
6700         * If the active LSM wants to access the inode during
6701         * d_instantiate it needs these. Smack checks to see
6702         * if the filesystem supports xattrs by looking at the
6703         * ops vector.
6704         */
6705         inode->i_fop = &btrfs_file_operations;
6706         inode->i_op = &btrfs_file_inode_operations;
6707         inode->i_mapping->a_ops = &btrfs_aops;
6708
6709         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6710         if (err)
6711                 goto out_unlock;
6712
6713         err = btrfs_update_inode(trans, root, inode);
6714         if (err)
6715                 goto out_unlock;
6716
6717         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6718                         0, index);
6719         if (err)
6720                 goto out_unlock;
6721
6722         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6723         d_instantiate_new(dentry, inode);
6724
6725 out_unlock:
6726         btrfs_end_transaction(trans);
6727         if (err && inode) {
6728                 inode_dec_link_count(inode);
6729                 discard_new_inode(inode);
6730         }
6731         btrfs_btree_balance_dirty(fs_info);
6732         return err;
6733 }
6734
6735 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6736                       struct dentry *dentry)
6737 {
6738         struct btrfs_trans_handle *trans = NULL;
6739         struct btrfs_root *root = BTRFS_I(dir)->root;
6740         struct inode *inode = d_inode(old_dentry);
6741         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6742         u64 index;
6743         int err;
6744         int drop_inode = 0;
6745
6746         /* do not allow sys_link's with other subvols of the same device */
6747         if (root->objectid != BTRFS_I(inode)->root->objectid)
6748                 return -EXDEV;
6749
6750         if (inode->i_nlink >= BTRFS_LINK_MAX)
6751                 return -EMLINK;
6752
6753         err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6754         if (err)
6755                 goto fail;
6756
6757         /*
6758          * 2 items for inode and inode ref
6759          * 2 items for dir items
6760          * 1 item for parent inode
6761          * 1 item for orphan item deletion if O_TMPFILE
6762          */
6763         trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6764         if (IS_ERR(trans)) {
6765                 err = PTR_ERR(trans);
6766                 trans = NULL;
6767                 goto fail;
6768         }
6769
6770         /* There are several dir indexes for this inode, clear the cache. */
6771         BTRFS_I(inode)->dir_index = 0ULL;
6772         inc_nlink(inode);
6773         inode_inc_iversion(inode);
6774         inode->i_ctime = current_time(inode);
6775         ihold(inode);
6776         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6777
6778         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6779                         1, index);
6780
6781         if (err) {
6782                 drop_inode = 1;
6783         } else {
6784                 struct dentry *parent = dentry->d_parent;
6785                 int ret;
6786
6787                 err = btrfs_update_inode(trans, root, inode);
6788                 if (err)
6789                         goto fail;
6790                 if (inode->i_nlink == 1) {
6791                         /*
6792                          * If new hard link count is 1, it's a file created
6793                          * with open(2) O_TMPFILE flag.
6794                          */
6795                         err = btrfs_orphan_del(trans, BTRFS_I(inode));
6796                         if (err)
6797                                 goto fail;
6798                 }
6799                 BTRFS_I(inode)->last_link_trans = trans->transid;
6800                 d_instantiate(dentry, inode);
6801                 ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
6802                                          true, NULL);
6803                 if (ret == BTRFS_NEED_TRANS_COMMIT) {
6804                         err = btrfs_commit_transaction(trans);
6805                         trans = NULL;
6806                 }
6807         }
6808
6809 fail:
6810         if (trans)
6811                 btrfs_end_transaction(trans);
6812         if (drop_inode) {
6813                 inode_dec_link_count(inode);
6814                 iput(inode);
6815         }
6816         btrfs_btree_balance_dirty(fs_info);
6817         return err;
6818 }
6819
6820 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6821 {
6822         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6823         struct inode *inode = NULL;
6824         struct btrfs_trans_handle *trans;
6825         struct btrfs_root *root = BTRFS_I(dir)->root;
6826         int err = 0;
6827         int drop_on_err = 0;
6828         u64 objectid = 0;
6829         u64 index = 0;
6830
6831         /*
6832          * 2 items for inode and ref
6833          * 2 items for dir items
6834          * 1 for xattr if selinux is on
6835          */
6836         trans = btrfs_start_transaction(root, 5);
6837         if (IS_ERR(trans))
6838                 return PTR_ERR(trans);
6839
6840         err = btrfs_find_free_ino(root, &objectid);
6841         if (err)
6842                 goto out_fail;
6843
6844         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6845                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6846                         S_IFDIR | mode, &index);
6847         if (IS_ERR(inode)) {
6848                 err = PTR_ERR(inode);
6849                 inode = NULL;
6850                 goto out_fail;
6851         }
6852
6853         drop_on_err = 1;
6854         /* these must be set before we unlock the inode */
6855         inode->i_op = &btrfs_dir_inode_operations;
6856         inode->i_fop = &btrfs_dir_file_operations;
6857
6858         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6859         if (err)
6860                 goto out_fail;
6861
6862         btrfs_i_size_write(BTRFS_I(inode), 0);
6863         err = btrfs_update_inode(trans, root, inode);
6864         if (err)
6865                 goto out_fail;
6866
6867         err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6868                         dentry->d_name.name,
6869                         dentry->d_name.len, 0, index);
6870         if (err)
6871                 goto out_fail;
6872
6873         d_instantiate_new(dentry, inode);
6874         drop_on_err = 0;
6875
6876 out_fail:
6877         btrfs_end_transaction(trans);
6878         if (err && inode) {
6879                 inode_dec_link_count(inode);
6880                 discard_new_inode(inode);
6881         }
6882         btrfs_btree_balance_dirty(fs_info);
6883         return err;
6884 }
6885
6886 static noinline int uncompress_inline(struct btrfs_path *path,
6887                                       struct page *page,
6888                                       size_t pg_offset, u64 extent_offset,
6889                                       struct btrfs_file_extent_item *item)
6890 {
6891         int ret;
6892         struct extent_buffer *leaf = path->nodes[0];
6893         char *tmp;
6894         size_t max_size;
6895         unsigned long inline_size;
6896         unsigned long ptr;
6897         int compress_type;
6898
6899         WARN_ON(pg_offset != 0);
6900         compress_type = btrfs_file_extent_compression(leaf, item);
6901         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6902         inline_size = btrfs_file_extent_inline_item_len(leaf,
6903                                         btrfs_item_nr(path->slots[0]));
6904         tmp = kmalloc(inline_size, GFP_NOFS);
6905         if (!tmp)
6906                 return -ENOMEM;
6907         ptr = btrfs_file_extent_inline_start(item);
6908
6909         read_extent_buffer(leaf, tmp, ptr, inline_size);
6910
6911         max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6912         ret = btrfs_decompress(compress_type, tmp, page,
6913                                extent_offset, inline_size, max_size);
6914
6915         /*
6916          * decompression code contains a memset to fill in any space between the end
6917          * of the uncompressed data and the end of max_size in case the decompressed
6918          * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6919          * the end of an inline extent and the beginning of the next block, so we
6920          * cover that region here.
6921          */
6922
6923         if (max_size + pg_offset < PAGE_SIZE) {
6924                 char *map = kmap(page);
6925                 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
6926                 kunmap(page);
6927         }
6928         kfree(tmp);
6929         return ret;
6930 }
6931
6932 /*
6933  * a bit scary, this does extent mapping from logical file offset to the disk.
6934  * the ugly parts come from merging extents from the disk with the in-ram
6935  * representation.  This gets more complex because of the data=ordered code,
6936  * where the in-ram extents might be locked pending data=ordered completion.
6937  *
6938  * This also copies inline extents directly into the page.
6939  */
6940 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6941                 struct page *page,
6942             size_t pg_offset, u64 start, u64 len,
6943                 int create)
6944 {
6945         struct btrfs_fs_info *fs_info = inode->root->fs_info;
6946         int ret;
6947         int err = 0;
6948         u64 extent_start = 0;
6949         u64 extent_end = 0;
6950         u64 objectid = btrfs_ino(inode);
6951         u32 found_type;
6952         struct btrfs_path *path = NULL;
6953         struct btrfs_root *root = inode->root;
6954         struct btrfs_file_extent_item *item;
6955         struct extent_buffer *leaf;
6956         struct btrfs_key found_key;
6957         struct extent_map *em = NULL;
6958         struct extent_map_tree *em_tree = &inode->extent_tree;
6959         struct extent_io_tree *io_tree = &inode->io_tree;
6960         const bool new_inline = !page || create;
6961
6962         read_lock(&em_tree->lock);
6963         em = lookup_extent_mapping(em_tree, start, len);
6964         if (em)
6965                 em->bdev = fs_info->fs_devices->latest_bdev;
6966         read_unlock(&em_tree->lock);
6967
6968         if (em) {
6969                 if (em->start > start || em->start + em->len <= start)
6970                         free_extent_map(em);
6971                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6972                         free_extent_map(em);
6973                 else
6974                         goto out;
6975         }
6976         em = alloc_extent_map();
6977         if (!em) {
6978                 err = -ENOMEM;
6979                 goto out;
6980         }
6981         em->bdev = fs_info->fs_devices->latest_bdev;
6982         em->start = EXTENT_MAP_HOLE;
6983         em->orig_start = EXTENT_MAP_HOLE;
6984         em->len = (u64)-1;
6985         em->block_len = (u64)-1;
6986
6987         if (!path) {
6988                 path = btrfs_alloc_path();
6989                 if (!path) {
6990                         err = -ENOMEM;
6991                         goto out;
6992                 }
6993                 /*
6994                  * Chances are we'll be called again, so go ahead and do
6995                  * readahead
6996                  */
6997                 path->reada = READA_FORWARD;
6998         }
6999
7000         ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
7001         if (ret < 0) {
7002                 err = ret;
7003                 goto out;
7004         }
7005
7006         if (ret != 0) {
7007                 if (path->slots[0] == 0)
7008                         goto not_found;
7009                 path->slots[0]--;
7010         }
7011
7012         leaf = path->nodes[0];
7013         item = btrfs_item_ptr(leaf, path->slots[0],
7014                               struct btrfs_file_extent_item);
7015         /* are we inside the extent that was found? */
7016         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7017         found_type = found_key.type;
7018         if (found_key.objectid != objectid ||
7019             found_type != BTRFS_EXTENT_DATA_KEY) {
7020                 /*
7021                  * If we backup past the first extent we want to move forward
7022                  * and see if there is an extent in front of us, otherwise we'll
7023                  * say there is a hole for our whole search range which can
7024                  * cause problems.
7025                  */
7026                 extent_end = start;
7027                 goto next;
7028         }
7029
7030         found_type = btrfs_file_extent_type(leaf, item);
7031         extent_start = found_key.offset;
7032         if (found_type == BTRFS_FILE_EXTENT_REG ||
7033             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7034                 /* Only regular file could have regular/prealloc extent */
7035                 if (!S_ISREG(inode->vfs_inode.i_mode)) {
7036                         err = -EUCLEAN;
7037                         btrfs_crit(fs_info,
7038                 "regular/prealloc extent found for non-regular inode %llu",
7039                                    btrfs_ino(inode));
7040                         goto out;
7041                 }
7042                 extent_end = extent_start +
7043                        btrfs_file_extent_num_bytes(leaf, item);
7044
7045                 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
7046                                                        extent_start);
7047         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
7048                 size_t size;
7049
7050                 size = btrfs_file_extent_ram_bytes(leaf, item);
7051                 extent_end = ALIGN(extent_start + size,
7052                                    fs_info->sectorsize);
7053
7054                 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
7055                                                       path->slots[0],
7056                                                       extent_start);
7057         }
7058 next:
7059         if (start >= extent_end) {
7060                 path->slots[0]++;
7061                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
7062                         ret = btrfs_next_leaf(root, path);
7063                         if (ret < 0) {
7064                                 err = ret;
7065                                 goto out;
7066                         }
7067                         if (ret > 0)
7068                                 goto not_found;
7069                         leaf = path->nodes[0];
7070                 }
7071                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7072                 if (found_key.objectid != objectid ||
7073                     found_key.type != BTRFS_EXTENT_DATA_KEY)
7074                         goto not_found;
7075                 if (start + len <= found_key.offset)
7076                         goto not_found;
7077                 if (start > found_key.offset)
7078                         goto next;
7079                 em->start = start;
7080                 em->orig_start = start;
7081                 em->len = found_key.offset - start;
7082                 goto not_found_em;
7083         }
7084
7085         btrfs_extent_item_to_extent_map(inode, path, item,
7086                         new_inline, em);
7087
7088         if (found_type == BTRFS_FILE_EXTENT_REG ||
7089             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7090                 goto insert;
7091         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
7092                 unsigned long ptr;
7093                 char *map;
7094                 size_t size;
7095                 size_t extent_offset;
7096                 size_t copy_size;
7097
7098                 if (new_inline)
7099                         goto out;
7100
7101                 size = btrfs_file_extent_ram_bytes(leaf, item);
7102                 extent_offset = page_offset(page) + pg_offset - extent_start;
7103                 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
7104                                   size - extent_offset);
7105                 em->start = extent_start + extent_offset;
7106                 em->len = ALIGN(copy_size, fs_info->sectorsize);
7107                 em->orig_block_len = em->len;
7108                 em->orig_start = em->start;
7109                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
7110                 if (!PageUptodate(page)) {
7111                         if (btrfs_file_extent_compression(leaf, item) !=
7112                             BTRFS_COMPRESS_NONE) {
7113                                 ret = uncompress_inline(path, page, pg_offset,
7114                                                         extent_offset, item);
7115                                 if (ret) {
7116                                         err = ret;
7117                                         goto out;
7118                                 }
7119                         } else {
7120                                 map = kmap(page);
7121                                 read_extent_buffer(leaf, map + pg_offset, ptr,
7122                                                    copy_size);
7123                                 if (pg_offset + copy_size < PAGE_SIZE) {
7124                                         memset(map + pg_offset + copy_size, 0,
7125                                                PAGE_SIZE - pg_offset -
7126                                                copy_size);
7127                                 }
7128                                 kunmap(page);
7129                         }
7130                         flush_dcache_page(page);
7131                 }
7132                 set_extent_uptodate(io_tree, em->start,
7133                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
7134                 goto insert;
7135         }
7136 not_found:
7137         em->start = start;
7138         em->orig_start = start;
7139         em->len = len;
7140 not_found_em:
7141         em->block_start = EXTENT_MAP_HOLE;
7142 insert:
7143         btrfs_release_path(path);
7144         if (em->start > start || extent_map_end(em) <= start) {
7145                 btrfs_err(fs_info,
7146                           "bad extent! em: [%llu %llu] passed [%llu %llu]",
7147                           em->start, em->len, start, len);
7148                 err = -EIO;
7149                 goto out;
7150         }
7151
7152         err = 0;
7153         write_lock(&em_tree->lock);
7154         err = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
7155         write_unlock(&em_tree->lock);
7156 out:
7157
7158         trace_btrfs_get_extent(root, inode, em);
7159
7160         btrfs_free_path(path);
7161         if (err) {
7162                 free_extent_map(em);
7163                 return ERR_PTR(err);
7164         }
7165         BUG_ON(!em); /* Error is always set */
7166         return em;
7167 }
7168
7169 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
7170                 struct page *page,
7171                 size_t pg_offset, u64 start, u64 len,
7172                 int create)
7173 {
7174         struct extent_map *em;
7175         struct extent_map *hole_em = NULL;
7176         u64 range_start = start;
7177         u64 end;
7178         u64 found;
7179         u64 found_end;
7180         int err = 0;
7181
7182         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7183         if (IS_ERR(em))
7184                 return em;
7185         /*
7186          * If our em maps to:
7187          * - a hole or
7188          * - a pre-alloc extent,
7189          * there might actually be delalloc bytes behind it.
7190          */
7191         if (em->block_start != EXTENT_MAP_HOLE &&
7192             !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7193                 return em;
7194         else
7195                 hole_em = em;
7196
7197         /* check to see if we've wrapped (len == -1 or similar) */
7198         end = start + len;
7199         if (end < start)
7200                 end = (u64)-1;
7201         else
7202                 end -= 1;
7203
7204         em = NULL;
7205
7206         /* ok, we didn't find anything, lets look for delalloc */
7207         found = count_range_bits(&inode->io_tree, &range_start,
7208                                  end, len, EXTENT_DELALLOC, 1);
7209         found_end = range_start + found;
7210         if (found_end < range_start)
7211                 found_end = (u64)-1;
7212
7213         /*
7214          * we didn't find anything useful, return
7215          * the original results from get_extent()
7216          */
7217         if (range_start > end || found_end <= start) {
7218                 em = hole_em;
7219                 hole_em = NULL;
7220                 goto out;
7221         }
7222
7223         /* adjust the range_start to make sure it doesn't
7224          * go backwards from the start they passed in
7225          */
7226         range_start = max(start, range_start);
7227         found = found_end - range_start;
7228
7229         if (found > 0) {
7230                 u64 hole_start = start;
7231                 u64 hole_len = len;
7232
7233                 em = alloc_extent_map();
7234                 if (!em) {
7235                         err = -ENOMEM;
7236                         goto out;
7237                 }
7238                 /*
7239                  * when btrfs_get_extent can't find anything it
7240                  * returns one huge hole
7241                  *
7242                  * make sure what it found really fits our range, and
7243                  * adjust to make sure it is based on the start from
7244                  * the caller
7245                  */
7246                 if (hole_em) {
7247                         u64 calc_end = extent_map_end(hole_em);
7248
7249                         if (calc_end <= start || (hole_em->start > end)) {
7250                                 free_extent_map(hole_em);
7251                                 hole_em = NULL;
7252                         } else {
7253                                 hole_start = max(hole_em->start, start);
7254                                 hole_len = calc_end - hole_start;
7255                         }
7256                 }
7257                 em->bdev = NULL;
7258                 if (hole_em && range_start > hole_start) {
7259                         /* our hole starts before our delalloc, so we
7260                          * have to return just the parts of the hole
7261                          * that go until  the delalloc starts
7262                          */
7263                         em->len = min(hole_len,
7264                                       range_start - hole_start);
7265                         em->start = hole_start;
7266                         em->orig_start = hole_start;
7267                         /*
7268                          * don't adjust block start at all,
7269                          * it is fixed at EXTENT_MAP_HOLE
7270                          */
7271                         em->block_start = hole_em->block_start;
7272                         em->block_len = hole_len;
7273                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7274                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7275                 } else {
7276                         em->start = range_start;
7277                         em->len = found;
7278                         em->orig_start = range_start;
7279                         em->block_start = EXTENT_MAP_DELALLOC;
7280                         em->block_len = found;
7281                 }
7282         } else {
7283                 return hole_em;
7284         }
7285 out:
7286
7287         free_extent_map(hole_em);
7288         if (err) {
7289                 free_extent_map(em);
7290                 return ERR_PTR(err);
7291         }
7292         return em;
7293 }
7294
7295 static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7296                                                   const u64 start,
7297                                                   const u64 len,
7298                                                   const u64 orig_start,
7299                                                   const u64 block_start,
7300                                                   const u64 block_len,
7301                                                   const u64 orig_block_len,
7302                                                   const u64 ram_bytes,
7303                                                   const int type)
7304 {
7305         struct extent_map *em = NULL;
7306         int ret;
7307
7308         if (type != BTRFS_ORDERED_NOCOW) {
7309                 em = create_io_em(inode, start, len, orig_start,
7310                                   block_start, block_len, orig_block_len,
7311                                   ram_bytes,
7312                                   BTRFS_COMPRESS_NONE, /* compress_type */
7313                                   type);
7314                 if (IS_ERR(em))
7315                         goto out;
7316         }
7317         ret = btrfs_add_ordered_extent_dio(inode, start, block_start,
7318                                            len, block_len, type);
7319         if (ret) {
7320                 if (em) {
7321                         free_extent_map(em);
7322                         btrfs_drop_extent_cache(BTRFS_I(inode), start,
7323                                                 start + len - 1, 0);
7324                 }
7325                 em = ERR_PTR(ret);
7326         }
7327  out:
7328
7329         return em;
7330 }
7331
7332 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7333                                                   u64 start, u64 len)
7334 {
7335         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7336         struct btrfs_root *root = BTRFS_I(inode)->root;
7337         struct extent_map *em;
7338         struct btrfs_key ins;
7339         u64 alloc_hint;
7340         int ret;
7341
7342         alloc_hint = get_extent_allocation_hint(inode, start, len);
7343         ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7344                                    0, alloc_hint, &ins, 1, 1);
7345         if (ret)
7346                 return ERR_PTR(ret);
7347
7348         em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7349                                      ins.objectid, ins.offset, ins.offset,
7350                                      ins.offset, BTRFS_ORDERED_REGULAR);
7351         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7352         if (IS_ERR(em))
7353                 btrfs_free_reserved_extent(fs_info, ins.objectid,
7354                                            ins.offset, 1);
7355
7356         return em;
7357 }
7358
7359 /*
7360  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7361  * block must be cow'd
7362  */
7363 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7364                               u64 *orig_start, u64 *orig_block_len,
7365                               u64 *ram_bytes)
7366 {
7367         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7368         struct btrfs_path *path;
7369         int ret;
7370         struct extent_buffer *leaf;
7371         struct btrfs_root *root = BTRFS_I(inode)->root;
7372         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7373         struct btrfs_file_extent_item *fi;
7374         struct btrfs_key key;
7375         u64 disk_bytenr;
7376         u64 backref_offset;
7377         u64 extent_end;
7378         u64 num_bytes;
7379         int slot;
7380         int found_type;
7381         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7382
7383         path = btrfs_alloc_path();
7384         if (!path)
7385                 return -ENOMEM;
7386
7387         ret = btrfs_lookup_file_extent(NULL, root, path,
7388                         btrfs_ino(BTRFS_I(inode)), offset, 0);
7389         if (ret < 0)
7390                 goto out;
7391
7392         slot = path->slots[0];
7393         if (ret == 1) {
7394                 if (slot == 0) {
7395                         /* can't find the item, must cow */
7396                         ret = 0;
7397                         goto out;
7398                 }
7399                 slot--;
7400         }
7401         ret = 0;
7402         leaf = path->nodes[0];
7403         btrfs_item_key_to_cpu(leaf, &key, slot);
7404         if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7405             key.type != BTRFS_EXTENT_DATA_KEY) {
7406                 /* not our file or wrong item type, must cow */
7407                 goto out;
7408         }
7409
7410         if (key.offset > offset) {
7411                 /* Wrong offset, must cow */
7412                 goto out;
7413         }
7414
7415         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7416         found_type = btrfs_file_extent_type(leaf, fi);
7417         if (found_type != BTRFS_FILE_EXTENT_REG &&
7418             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7419                 /* not a regular extent, must cow */
7420                 goto out;
7421         }
7422
7423         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7424                 goto out;
7425
7426         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7427         if (extent_end <= offset)
7428                 goto out;
7429
7430         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7431         if (disk_bytenr == 0)
7432                 goto out;
7433
7434         if (btrfs_file_extent_compression(leaf, fi) ||
7435             btrfs_file_extent_encryption(leaf, fi) ||
7436             btrfs_file_extent_other_encoding(leaf, fi))
7437                 goto out;
7438
7439         /*
7440          * Do the same check as in btrfs_cross_ref_exist but without the
7441          * unnecessary search.
7442          */
7443         if (btrfs_file_extent_generation(leaf, fi) <=
7444             btrfs_root_last_snapshot(&root->root_item))
7445                 goto out;
7446
7447         backref_offset = btrfs_file_extent_offset(leaf, fi);
7448
7449         if (orig_start) {
7450                 *orig_start = key.offset - backref_offset;
7451                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7452                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7453         }
7454
7455         if (btrfs_extent_readonly(fs_info, disk_bytenr))
7456                 goto out;
7457
7458         num_bytes = min(offset + *len, extent_end) - offset;
7459         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7460                 u64 range_end;
7461
7462                 range_end = round_up(offset + num_bytes,
7463                                      root->fs_info->sectorsize) - 1;
7464                 ret = test_range_bit(io_tree, offset, range_end,
7465                                      EXTENT_DELALLOC, 0, NULL);
7466                 if (ret) {
7467                         ret = -EAGAIN;
7468                         goto out;
7469                 }
7470         }
7471
7472         btrfs_release_path(path);
7473
7474         /*
7475          * look for other files referencing this extent, if we
7476          * find any we must cow
7477          */
7478
7479         ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
7480                                     key.offset - backref_offset, disk_bytenr);
7481         if (ret) {
7482                 ret = 0;
7483                 goto out;
7484         }
7485
7486         /*
7487          * adjust disk_bytenr and num_bytes to cover just the bytes
7488          * in this extent we are about to write.  If there
7489          * are any csums in that range we have to cow in order
7490          * to keep the csums correct
7491          */
7492         disk_bytenr += backref_offset;
7493         disk_bytenr += offset - key.offset;
7494         if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
7495                 goto out;
7496         /*
7497          * all of the above have passed, it is safe to overwrite this extent
7498          * without cow
7499          */
7500         *len = num_bytes;
7501         ret = 1;
7502 out:
7503         btrfs_free_path(path);
7504         return ret;
7505 }
7506
7507 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7508                               struct extent_state **cached_state, int writing)
7509 {
7510         struct btrfs_ordered_extent *ordered;
7511         int ret = 0;
7512
7513         while (1) {
7514                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7515                                  cached_state);
7516                 /*
7517                  * We're concerned with the entire range that we're going to be
7518                  * doing DIO to, so we need to make sure there's no ordered
7519                  * extents in this range.
7520                  */
7521                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7522                                                      lockend - lockstart + 1);
7523
7524                 /*
7525                  * We need to make sure there are no buffered pages in this
7526                  * range either, we could have raced between the invalidate in
7527                  * generic_file_direct_write and locking the extent.  The
7528                  * invalidate needs to happen so that reads after a write do not
7529                  * get stale data.
7530                  */
7531                 if (!ordered &&
7532                     (!writing || !filemap_range_has_page(inode->i_mapping,
7533                                                          lockstart, lockend)))
7534                         break;
7535
7536                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7537                                      cached_state);
7538
7539                 if (ordered) {
7540                         /*
7541                          * If we are doing a DIO read and the ordered extent we
7542                          * found is for a buffered write, we can not wait for it
7543                          * to complete and retry, because if we do so we can
7544                          * deadlock with concurrent buffered writes on page
7545                          * locks. This happens only if our DIO read covers more
7546                          * than one extent map, if at this point has already
7547                          * created an ordered extent for a previous extent map
7548                          * and locked its range in the inode's io tree, and a
7549                          * concurrent write against that previous extent map's
7550                          * range and this range started (we unlock the ranges
7551                          * in the io tree only when the bios complete and
7552                          * buffered writes always lock pages before attempting
7553                          * to lock range in the io tree).
7554                          */
7555                         if (writing ||
7556                             test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7557                                 btrfs_start_ordered_extent(inode, ordered, 1);
7558                         else
7559                                 ret = -ENOTBLK;
7560                         btrfs_put_ordered_extent(ordered);
7561                 } else {
7562                         /*
7563                          * We could trigger writeback for this range (and wait
7564                          * for it to complete) and then invalidate the pages for
7565                          * this range (through invalidate_inode_pages2_range()),
7566                          * but that can lead us to a deadlock with a concurrent
7567                          * call to readpages() (a buffered read or a defrag call
7568                          * triggered a readahead) on a page lock due to an
7569                          * ordered dio extent we created before but did not have
7570                          * yet a corresponding bio submitted (whence it can not
7571                          * complete), which makes readpages() wait for that
7572                          * ordered extent to complete while holding a lock on
7573                          * that page.
7574                          */
7575                         ret = -ENOTBLK;
7576                 }
7577
7578                 if (ret)
7579                         break;
7580
7581                 cond_resched();
7582         }
7583
7584         return ret;
7585 }
7586
7587 /* The callers of this must take lock_extent() */
7588 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
7589                                        u64 orig_start, u64 block_start,
7590                                        u64 block_len, u64 orig_block_len,
7591                                        u64 ram_bytes, int compress_type,
7592                                        int type)
7593 {
7594         struct extent_map_tree *em_tree;
7595         struct extent_map *em;
7596         struct btrfs_root *root = BTRFS_I(inode)->root;
7597         int ret;
7598
7599         ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7600                type == BTRFS_ORDERED_COMPRESSED ||
7601                type == BTRFS_ORDERED_NOCOW ||
7602                type == BTRFS_ORDERED_REGULAR);
7603
7604         em_tree = &BTRFS_I(inode)->extent_tree;
7605         em = alloc_extent_map();
7606         if (!em)
7607                 return ERR_PTR(-ENOMEM);
7608
7609         em->start = start;
7610         em->orig_start = orig_start;
7611         em->len = len;
7612         em->block_len = block_len;
7613         em->block_start = block_start;
7614         em->bdev = root->fs_info->fs_devices->latest_bdev;
7615         em->orig_block_len = orig_block_len;
7616         em->ram_bytes = ram_bytes;
7617         em->generation = -1;
7618         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7619         if (type == BTRFS_ORDERED_PREALLOC) {
7620                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7621         } else if (type == BTRFS_ORDERED_COMPRESSED) {
7622                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7623                 em->compress_type = compress_type;
7624         }
7625
7626         do {
7627                 btrfs_drop_extent_cache(BTRFS_I(inode), em->start,
7628                                 em->start + em->len - 1, 0);
7629                 write_lock(&em_tree->lock);
7630                 ret = add_extent_mapping(em_tree, em, 1);
7631                 write_unlock(&em_tree->lock);
7632                 /*
7633                  * The caller has taken lock_extent(), who could race with us
7634                  * to add em?
7635                  */
7636         } while (ret == -EEXIST);
7637
7638         if (ret) {
7639                 free_extent_map(em);
7640                 return ERR_PTR(ret);
7641         }
7642
7643         /* em got 2 refs now, callers needs to do free_extent_map once. */
7644         return em;
7645 }
7646
7647
7648 static int btrfs_get_blocks_direct_read(struct extent_map *em,
7649                                         struct buffer_head *bh_result,
7650                                         struct inode *inode,
7651                                         u64 start, u64 len)
7652 {
7653         if (em->block_start == EXTENT_MAP_HOLE ||
7654                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7655                 return -ENOENT;
7656
7657         len = min(len, em->len - (start - em->start));
7658
7659         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7660                 inode->i_blkbits;
7661         bh_result->b_size = len;
7662         bh_result->b_bdev = em->bdev;
7663         set_buffer_mapped(bh_result);
7664
7665         return 0;
7666 }
7667
7668 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7669                                          struct buffer_head *bh_result,
7670                                          struct inode *inode,
7671                                          struct btrfs_dio_data *dio_data,
7672                                          u64 start, u64 len)
7673 {
7674         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7675         struct extent_map *em = *map;
7676         int ret = 0;
7677
7678         /*
7679          * We don't allocate a new extent in the following cases
7680          *
7681          * 1) The inode is marked as NODATACOW. In this case we'll just use the
7682          * existing extent.
7683          * 2) The extent is marked as PREALLOC. We're good to go here and can
7684          * just use the extent.
7685          *
7686          */
7687         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7688             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7689              em->block_start != EXTENT_MAP_HOLE)) {
7690                 int type;
7691                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7692
7693                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7694                         type = BTRFS_ORDERED_PREALLOC;
7695                 else
7696                         type = BTRFS_ORDERED_NOCOW;
7697                 len = min(len, em->len - (start - em->start));
7698                 block_start = em->block_start + (start - em->start);
7699
7700                 if (can_nocow_extent(inode, start, &len, &orig_start,
7701                                      &orig_block_len, &ram_bytes) == 1 &&
7702                     btrfs_inc_nocow_writers(fs_info, block_start)) {
7703                         struct extent_map *em2;
7704
7705                         em2 = btrfs_create_dio_extent(inode, start, len,
7706                                                       orig_start, block_start,
7707                                                       len, orig_block_len,
7708                                                       ram_bytes, type);
7709                         btrfs_dec_nocow_writers(fs_info, block_start);
7710                         if (type == BTRFS_ORDERED_PREALLOC) {
7711                                 free_extent_map(em);
7712                                 *map = em = em2;
7713                         }
7714
7715                         if (em2 && IS_ERR(em2)) {
7716                                 ret = PTR_ERR(em2);
7717                                 goto out;
7718                         }
7719                         /*
7720                          * For inode marked NODATACOW or extent marked PREALLOC,
7721                          * use the existing or preallocated extent, so does not
7722                          * need to adjust btrfs_space_info's bytes_may_use.
7723                          */
7724                         btrfs_free_reserved_data_space_noquota(inode, start,
7725                                                                len);
7726                         goto skip_cow;
7727                 }
7728         }
7729
7730         /* this will cow the extent */
7731         len = bh_result->b_size;
7732         free_extent_map(em);
7733         *map = em = btrfs_new_extent_direct(inode, start, len);
7734         if (IS_ERR(em)) {
7735                 ret = PTR_ERR(em);
7736                 goto out;
7737         }
7738
7739         len = min(len, em->len - (start - em->start));
7740
7741 skip_cow:
7742         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7743                 inode->i_blkbits;
7744         bh_result->b_size = len;
7745         bh_result->b_bdev = em->bdev;
7746         set_buffer_mapped(bh_result);
7747
7748         if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7749                 set_buffer_new(bh_result);
7750
7751         /*
7752          * Need to update the i_size under the extent lock so buffered
7753          * readers will get the updated i_size when we unlock.
7754          */
7755         if (!dio_data->overwrite && start + len > i_size_read(inode))
7756                 i_size_write(inode, start + len);
7757
7758         WARN_ON(dio_data->reserve < len);
7759         dio_data->reserve -= len;
7760         dio_data->unsubmitted_oe_range_end = start + len;
7761         current->journal_info = dio_data;
7762 out:
7763         return ret;
7764 }
7765
7766 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7767                                    struct buffer_head *bh_result, int create)
7768 {
7769         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7770         struct extent_map *em;
7771         struct extent_state *cached_state = NULL;
7772         struct btrfs_dio_data *dio_data = NULL;
7773         u64 start = iblock << inode->i_blkbits;
7774         u64 lockstart, lockend;
7775         u64 len = bh_result->b_size;
7776         int unlock_bits = EXTENT_LOCKED;
7777         int ret = 0;
7778
7779         if (create)
7780                 unlock_bits |= EXTENT_DIRTY;
7781         else
7782                 len = min_t(u64, len, fs_info->sectorsize);
7783
7784         lockstart = start;
7785         lockend = start + len - 1;
7786
7787         if (current->journal_info) {
7788                 /*
7789                  * Need to pull our outstanding extents and set journal_info to NULL so
7790                  * that anything that needs to check if there's a transaction doesn't get
7791                  * confused.
7792                  */
7793                 dio_data = current->journal_info;
7794                 current->journal_info = NULL;
7795         }
7796
7797         /*
7798          * If this errors out it's because we couldn't invalidate pagecache for
7799          * this range and we need to fallback to buffered.
7800          */
7801         if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7802                                create)) {
7803                 ret = -ENOTBLK;
7804                 goto err;
7805         }
7806
7807         em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
7808         if (IS_ERR(em)) {
7809                 ret = PTR_ERR(em);
7810                 goto unlock_err;
7811         }
7812
7813         /*
7814          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7815          * io.  INLINE is special, and we could probably kludge it in here, but
7816          * it's still buffered so for safety lets just fall back to the generic
7817          * buffered path.
7818          *
7819          * For COMPRESSED we _have_ to read the entire extent in so we can
7820          * decompress it, so there will be buffering required no matter what we
7821          * do, so go ahead and fallback to buffered.
7822          *
7823          * We return -ENOTBLK because that's what makes DIO go ahead and go back
7824          * to buffered IO.  Don't blame me, this is the price we pay for using
7825          * the generic code.
7826          */
7827         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7828             em->block_start == EXTENT_MAP_INLINE) {
7829                 free_extent_map(em);
7830                 ret = -ENOTBLK;
7831                 goto unlock_err;
7832         }
7833
7834         if (create) {
7835                 ret = btrfs_get_blocks_direct_write(&em, bh_result, inode,
7836                                                     dio_data, start, len);
7837                 if (ret < 0)
7838                         goto unlock_err;
7839
7840                 /* clear and unlock the entire range */
7841                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7842                                  unlock_bits, 1, 0, &cached_state);
7843         } else {
7844                 ret = btrfs_get_blocks_direct_read(em, bh_result, inode,
7845                                                    start, len);
7846                 /* Can be negative only if we read from a hole */
7847                 if (ret < 0) {
7848                         ret = 0;
7849                         free_extent_map(em);
7850                         goto unlock_err;
7851                 }
7852                 /*
7853                  * We need to unlock only the end area that we aren't using.
7854                  * The rest is going to be unlocked by the endio routine.
7855                  */
7856                 lockstart = start + bh_result->b_size;
7857                 if (lockstart < lockend) {
7858                         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7859                                          lockend, unlock_bits, 1, 0,
7860                                          &cached_state);
7861                 } else {
7862                         free_extent_state(cached_state);
7863                 }
7864         }
7865
7866         free_extent_map(em);
7867
7868         return 0;
7869
7870 unlock_err:
7871         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7872                          unlock_bits, 1, 0, &cached_state);
7873 err:
7874         if (dio_data)
7875                 current->journal_info = dio_data;
7876         return ret;
7877 }
7878
7879 static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
7880                                                  struct bio *bio,
7881                                                  int mirror_num)
7882 {
7883         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7884         blk_status_t ret;
7885
7886         BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7887
7888         ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
7889         if (ret)
7890                 return ret;
7891
7892         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
7893
7894         return ret;
7895 }
7896
7897 static int btrfs_check_dio_repairable(struct inode *inode,
7898                                       struct bio *failed_bio,
7899                                       struct io_failure_record *failrec,
7900                                       int failed_mirror)
7901 {
7902         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7903         int num_copies;
7904
7905         num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
7906         if (num_copies == 1) {
7907                 /*
7908                  * we only have a single copy of the data, so don't bother with
7909                  * all the retry and error correction code that follows. no
7910                  * matter what the error is, it is very likely to persist.
7911                  */
7912                 btrfs_debug(fs_info,
7913                         "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
7914                         num_copies, failrec->this_mirror, failed_mirror);
7915                 return 0;
7916         }
7917
7918         failrec->failed_mirror = failed_mirror;
7919         failrec->this_mirror++;
7920         if (failrec->this_mirror == failed_mirror)
7921                 failrec->this_mirror++;
7922
7923         if (failrec->this_mirror > num_copies) {
7924                 btrfs_debug(fs_info,
7925                         "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
7926                         num_copies, failrec->this_mirror, failed_mirror);
7927                 return 0;
7928         }
7929
7930         return 1;
7931 }
7932
7933 static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
7934                                    struct page *page, unsigned int pgoff,
7935                                    u64 start, u64 end, int failed_mirror,
7936                                    bio_end_io_t *repair_endio, void *repair_arg)
7937 {
7938         struct io_failure_record *failrec;
7939         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7940         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
7941         struct bio *bio;
7942         int isector;
7943         unsigned int read_mode = 0;
7944         int segs;
7945         int ret;
7946         blk_status_t status;
7947         struct bio_vec bvec;
7948
7949         BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
7950
7951         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7952         if (ret)
7953                 return errno_to_blk_status(ret);
7954
7955         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7956                                          failed_mirror);
7957         if (!ret) {
7958                 free_io_failure(failure_tree, io_tree, failrec);
7959                 return BLK_STS_IOERR;
7960         }
7961
7962         segs = bio_segments(failed_bio);
7963         bio_get_first_bvec(failed_bio, &bvec);
7964         if (segs > 1 ||
7965             (bvec.bv_len > btrfs_inode_sectorsize(inode)))
7966                 read_mode |= REQ_FAILFAST_DEV;
7967
7968         isector = start - btrfs_io_bio(failed_bio)->logical;
7969         isector >>= inode->i_sb->s_blocksize_bits;
7970         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7971                                 pgoff, isector, repair_endio, repair_arg);
7972         bio->bi_opf = REQ_OP_READ | read_mode;
7973
7974         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7975                     "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
7976                     read_mode, failrec->this_mirror, failrec->in_validation);
7977
7978         status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
7979         if (status) {
7980                 free_io_failure(failure_tree, io_tree, failrec);
7981                 bio_put(bio);
7982         }
7983
7984         return status;
7985 }
7986
7987 struct btrfs_retry_complete {
7988         struct completion done;
7989         struct inode *inode;
7990         u64 start;
7991         int uptodate;
7992 };
7993
7994 static void btrfs_retry_endio_nocsum(struct bio *bio)
7995 {
7996         struct btrfs_retry_complete *done = bio->bi_private;
7997         struct inode *inode = done->inode;
7998         struct bio_vec *bvec;
7999         struct extent_io_tree *io_tree, *failure_tree;
8000         int i;
8001
8002         if (bio->bi_status)
8003                 goto end;
8004
8005         ASSERT(bio->bi_vcnt == 1);
8006         io_tree = &BTRFS_I(inode)->io_tree;
8007         failure_tree = &BTRFS_I(inode)->io_failure_tree;
8008         ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
8009
8010         done->uptodate = 1;
8011         ASSERT(!bio_flagged(bio, BIO_CLONED));
8012         bio_for_each_segment_all(bvec, bio, i)
8013                 clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
8014                                  io_tree, done->start, bvec->bv_page,
8015                                  btrfs_ino(BTRFS_I(inode)), 0);
8016 end:
8017         complete(&done->done);
8018         bio_put(bio);
8019 }
8020
8021 static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
8022                                                 struct btrfs_io_bio *io_bio)
8023 {
8024         struct btrfs_fs_info *fs_info;
8025         struct bio_vec bvec;
8026         struct bvec_iter iter;
8027         struct btrfs_retry_complete done;
8028         u64 start;
8029         unsigned int pgoff;
8030         u32 sectorsize;
8031         int nr_sectors;
8032         blk_status_t ret;
8033         blk_status_t err = BLK_STS_OK;
8034
8035         fs_info = BTRFS_I(inode)->root->fs_info;
8036         sectorsize = fs_info->sectorsize;
8037
8038         start = io_bio->logical;
8039         done.inode = inode;
8040         io_bio->bio.bi_iter = io_bio->iter;
8041
8042         bio_for_each_segment(bvec, &io_bio->bio, iter) {
8043                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
8044                 pgoff = bvec.bv_offset;
8045
8046 next_block_or_try_again:
8047                 done.uptodate = 0;
8048                 done.start = start;
8049                 init_completion(&done.done);
8050
8051                 ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
8052                                 pgoff, start, start + sectorsize - 1,
8053                                 io_bio->mirror_num,
8054                                 btrfs_retry_endio_nocsum, &done);
8055                 if (ret) {
8056                         err = ret;
8057                         goto next;
8058                 }
8059
8060                 wait_for_completion_io(&done.done);
8061
8062                 if (!done.uptodate) {
8063                         /* We might have another mirror, so try again */
8064                         goto next_block_or_try_again;
8065                 }
8066
8067 next:
8068                 start += sectorsize;
8069
8070                 nr_sectors--;
8071                 if (nr_sectors) {
8072                         pgoff += sectorsize;
8073                         ASSERT(pgoff < PAGE_SIZE);
8074                         goto next_block_or_try_again;
8075                 }
8076         }
8077
8078         return err;
8079 }
8080
8081 static void btrfs_retry_endio(struct bio *bio)
8082 {
8083         struct btrfs_retry_complete *done = bio->bi_private;
8084         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8085         struct extent_io_tree *io_tree, *failure_tree;
8086         struct inode *inode = done->inode;
8087         struct bio_vec *bvec;
8088         int uptodate;
8089         int ret;
8090         int i;
8091
8092         if (bio->bi_status)
8093                 goto end;
8094
8095         uptodate = 1;
8096
8097         ASSERT(bio->bi_vcnt == 1);
8098         ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
8099
8100         io_tree = &BTRFS_I(inode)->io_tree;
8101         failure_tree = &BTRFS_I(inode)->io_failure_tree;
8102
8103         ASSERT(!bio_flagged(bio, BIO_CLONED));
8104         bio_for_each_segment_all(bvec, bio, i) {
8105                 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
8106                                              bvec->bv_offset, done->start,
8107                                              bvec->bv_len);
8108                 if (!ret)
8109                         clean_io_failure(BTRFS_I(inode)->root->fs_info,
8110                                          failure_tree, io_tree, done->start,
8111                                          bvec->bv_page,
8112                                          btrfs_ino(BTRFS_I(inode)),
8113                                          bvec->bv_offset);
8114                 else
8115                         uptodate = 0;
8116         }
8117
8118         done->uptodate = uptodate;
8119 end:
8120         complete(&done->done);
8121         bio_put(bio);
8122 }
8123
8124 static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
8125                 struct btrfs_io_bio *io_bio, blk_status_t err)
8126 {
8127         struct btrfs_fs_info *fs_info;
8128         struct bio_vec bvec;
8129         struct bvec_iter iter;
8130         struct btrfs_retry_complete done;
8131         u64 start;
8132         u64 offset = 0;
8133         u32 sectorsize;
8134         int nr_sectors;
8135         unsigned int pgoff;
8136         int csum_pos;
8137         bool uptodate = (err == 0);
8138         int ret;
8139         blk_status_t status;
8140
8141         fs_info = BTRFS_I(inode)->root->fs_info;
8142         sectorsize = fs_info->sectorsize;
8143
8144         err = BLK_STS_OK;
8145         start = io_bio->logical;
8146         done.inode = inode;
8147         io_bio->bio.bi_iter = io_bio->iter;
8148
8149         bio_for_each_segment(bvec, &io_bio->bio, iter) {
8150                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
8151
8152                 pgoff = bvec.bv_offset;
8153 next_block:
8154                 if (uptodate) {
8155                         csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
8156                         ret = __readpage_endio_check(inode, io_bio, csum_pos,
8157                                         bvec.bv_page, pgoff, start, sectorsize);
8158                         if (likely(!ret))
8159                                 goto next;
8160                 }
8161 try_again:
8162                 done.uptodate = 0;
8163                 done.start = start;
8164                 init_completion(&done.done);
8165
8166                 status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
8167                                         pgoff, start, start + sectorsize - 1,
8168                                         io_bio->mirror_num, btrfs_retry_endio,
8169                                         &done);
8170                 if (status) {
8171                         err = status;
8172                         goto next;
8173                 }
8174
8175                 wait_for_completion_io(&done.done);
8176
8177                 if (!done.uptodate) {
8178                         /* We might have another mirror, so try again */
8179                         goto try_again;
8180                 }
8181 next:
8182                 offset += sectorsize;
8183                 start += sectorsize;
8184
8185                 ASSERT(nr_sectors);
8186
8187                 nr_sectors--;
8188                 if (nr_sectors) {
8189                         pgoff += sectorsize;
8190                         ASSERT(pgoff < PAGE_SIZE);
8191                         goto next_block;
8192                 }
8193         }
8194
8195         return err;
8196 }
8197
8198 static blk_status_t btrfs_subio_endio_read(struct inode *inode,
8199                 struct btrfs_io_bio *io_bio, blk_status_t err)
8200 {
8201         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8202
8203         if (skip_csum) {
8204                 if (unlikely(err))
8205                         return __btrfs_correct_data_nocsum(inode, io_bio);
8206                 else
8207                         return BLK_STS_OK;
8208         } else {
8209                 return __btrfs_subio_endio_read(inode, io_bio, err);
8210         }
8211 }
8212
8213 static void btrfs_endio_direct_read(struct bio *bio)
8214 {
8215         struct btrfs_dio_private *dip = bio->bi_private;
8216         struct inode *inode = dip->inode;
8217         struct bio *dio_bio;
8218         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8219         blk_status_t err = bio->bi_status;
8220
8221         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8222                 err = btrfs_subio_endio_read(inode, io_bio, err);
8223
8224         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8225                       dip->logical_offset + dip->bytes - 1);
8226         dio_bio = dip->dio_bio;
8227
8228         kfree(dip);
8229
8230         dio_bio->bi_status = err;
8231         dio_end_io(dio_bio);
8232
8233         if (io_bio->end_io)
8234                 io_bio->end_io(io_bio, blk_status_to_errno(err));
8235         bio_put(bio);
8236 }
8237
8238 static void __endio_write_update_ordered(struct inode *inode,
8239                                          const u64 offset, const u64 bytes,
8240                                          const bool uptodate)
8241 {
8242         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8243         struct btrfs_ordered_extent *ordered = NULL;
8244         struct btrfs_workqueue *wq;
8245         btrfs_work_func_t func;
8246         u64 ordered_offset = offset;
8247         u64 ordered_bytes = bytes;
8248         u64 last_offset;
8249
8250         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
8251                 wq = fs_info->endio_freespace_worker;
8252                 func = btrfs_freespace_write_helper;
8253         } else {
8254                 wq = fs_info->endio_write_workers;
8255                 func = btrfs_endio_write_helper;
8256         }
8257
8258         while (ordered_offset < offset + bytes) {
8259                 last_offset = ordered_offset;
8260                 if (btrfs_dec_test_first_ordered_pending(inode, &ordered,
8261                                                            &ordered_offset,
8262                                                            ordered_bytes,
8263                                                            uptodate)) {
8264                         btrfs_init_work(&ordered->work, func,
8265                                         finish_ordered_fn,
8266                                         NULL, NULL);
8267                         btrfs_queue_work(wq, &ordered->work);
8268                 }
8269                 /*
8270                  * If btrfs_dec_test_ordered_pending does not find any ordered
8271                  * extent in the range, we can exit.
8272                  */
8273                 if (ordered_offset == last_offset)
8274                         return;
8275                 /*
8276                  * Our bio might span multiple ordered extents. In this case
8277                  * we keep goin until we have accounted the whole dio.
8278                  */
8279                 if (ordered_offset < offset + bytes) {
8280                         ordered_bytes = offset + bytes - ordered_offset;
8281                         ordered = NULL;
8282                 }
8283         }
8284 }
8285
8286 static void btrfs_endio_direct_write(struct bio *bio)
8287 {
8288         struct btrfs_dio_private *dip = bio->bi_private;
8289         struct bio *dio_bio = dip->dio_bio;
8290
8291         __endio_write_update_ordered(dip->inode, dip->logical_offset,
8292                                      dip->bytes, !bio->bi_status);
8293
8294         kfree(dip);
8295
8296         dio_bio->bi_status = bio->bi_status;
8297         dio_end_io(dio_bio);
8298         bio_put(bio);
8299 }
8300
8301 static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data,
8302                                     struct bio *bio, u64 offset)
8303 {
8304         struct inode *inode = private_data;
8305         blk_status_t ret;
8306         ret = btrfs_csum_one_bio(inode, bio, offset, 1);
8307         BUG_ON(ret); /* -ENOMEM */
8308         return 0;
8309 }
8310
8311 static void btrfs_end_dio_bio(struct bio *bio)
8312 {
8313         struct btrfs_dio_private *dip = bio->bi_private;
8314         blk_status_t err = bio->bi_status;
8315
8316         if (err)
8317                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8318                            "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8319                            btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
8320                            bio->bi_opf,
8321                            (unsigned long long)bio->bi_iter.bi_sector,
8322                            bio->bi_iter.bi_size, err);
8323
8324         if (dip->subio_endio)
8325                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8326
8327         if (err) {
8328                 /*
8329                  * We want to perceive the errors flag being set before
8330                  * decrementing the reference count. We don't need a barrier
8331                  * since atomic operations with a return value are fully
8332                  * ordered as per atomic_t.txt
8333                  */
8334                 dip->errors = 1;
8335         }
8336
8337         /* if there are more bios still pending for this dio, just exit */
8338         if (!atomic_dec_and_test(&dip->pending_bios))
8339                 goto out;
8340
8341         if (dip->errors) {
8342                 bio_io_error(dip->orig_bio);
8343         } else {
8344                 dip->dio_bio->bi_status = BLK_STS_OK;
8345                 bio_endio(dip->orig_bio);
8346         }
8347 out:
8348         bio_put(bio);
8349 }
8350
8351 static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
8352                                                  struct btrfs_dio_private *dip,
8353                                                  struct bio *bio,
8354                                                  u64 file_offset)
8355 {
8356         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8357         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8358         blk_status_t ret;
8359
8360         /*
8361          * We load all the csum data we need when we submit
8362          * the first bio to reduce the csum tree search and
8363          * contention.
8364          */
8365         if (dip->logical_offset == file_offset) {
8366                 ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
8367                                                 file_offset);
8368                 if (ret)
8369                         return ret;
8370         }
8371
8372         if (bio == dip->orig_bio)
8373                 return 0;
8374
8375         file_offset -= dip->logical_offset;
8376         file_offset >>= inode->i_sb->s_blocksize_bits;
8377         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8378
8379         return 0;
8380 }
8381
8382 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
8383                 struct inode *inode, u64 file_offset, int async_submit)
8384 {
8385         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8386         struct btrfs_dio_private *dip = bio->bi_private;
8387         bool write = bio_op(bio) == REQ_OP_WRITE;
8388         blk_status_t ret;
8389
8390         /* Check btrfs_submit_bio_hook() for rules about async submit. */
8391         if (async_submit)
8392                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8393
8394         if (!write) {
8395                 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
8396                 if (ret)
8397                         goto err;
8398         }
8399
8400         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
8401                 goto map;
8402
8403         if (write && async_submit) {
8404                 ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
8405                                           file_offset, inode,
8406                                           btrfs_submit_bio_start_direct_io);
8407                 goto err;
8408         } else if (write) {
8409                 /*
8410                  * If we aren't doing async submit, calculate the csum of the
8411                  * bio now.
8412                  */
8413                 ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
8414                 if (ret)
8415                         goto err;
8416         } else {
8417                 ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
8418                                                      file_offset);
8419                 if (ret)
8420                         goto err;
8421         }
8422 map:
8423         ret = btrfs_map_bio(fs_info, bio, 0, 0);
8424 err:
8425         return ret;
8426 }
8427
8428 static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
8429 {
8430         struct inode *inode = dip->inode;
8431         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8432         struct bio *bio;
8433         struct bio *orig_bio = dip->orig_bio;
8434         u64 start_sector = orig_bio->bi_iter.bi_sector;
8435         u64 file_offset = dip->logical_offset;
8436         u64 map_length;
8437         int async_submit = 0;
8438         u64 submit_len;
8439         int clone_offset = 0;
8440         int clone_len;
8441         int ret;
8442         blk_status_t status;
8443
8444         map_length = orig_bio->bi_iter.bi_size;
8445         submit_len = map_length;
8446         ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9,
8447                               &map_length, NULL, 0);
8448         if (ret)
8449                 return -EIO;
8450
8451         if (map_length >= submit_len) {
8452                 bio = orig_bio;
8453                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8454                 goto submit;
8455         }
8456
8457         /* async crcs make it difficult to collect full stripe writes. */
8458         if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8459                 async_submit = 0;
8460         else
8461                 async_submit = 1;
8462
8463         /* bio split */
8464         ASSERT(map_length <= INT_MAX);
8465         do {
8466                 clone_len = min_t(int, submit_len, map_length);
8467
8468                 /*
8469                  * This will never fail as it's passing GPF_NOFS and
8470                  * the allocation is backed by btrfs_bioset.
8471                  */
8472                 bio = btrfs_bio_clone_partial(orig_bio, clone_offset,
8473                                               clone_len);
8474                 bio->bi_private = dip;
8475                 bio->bi_end_io = btrfs_end_dio_bio;
8476                 btrfs_io_bio(bio)->logical = file_offset;
8477
8478                 ASSERT(submit_len >= clone_len);
8479                 submit_len -= clone_len;
8480                 if (submit_len == 0)
8481                         break;
8482
8483                 /*
8484                  * Increase the count before we submit the bio so we know
8485                  * the end IO handler won't happen before we increase the
8486                  * count. Otherwise, the dip might get freed before we're
8487                  * done setting it up.
8488                  */
8489                 atomic_inc(&dip->pending_bios);
8490
8491                 status = btrfs_submit_dio_bio(bio, inode, file_offset,
8492                                                 async_submit);
8493                 if (status) {
8494                         bio_put(bio);
8495                         atomic_dec(&dip->pending_bios);
8496                         goto out_err;
8497                 }
8498
8499                 clone_offset += clone_len;
8500                 start_sector += clone_len >> 9;
8501                 file_offset += clone_len;
8502
8503                 map_length = submit_len;
8504                 ret = btrfs_map_block(fs_info, btrfs_op(orig_bio),
8505                                       start_sector << 9, &map_length, NULL, 0);
8506                 if (ret)
8507                         goto out_err;
8508         } while (submit_len > 0);
8509
8510 submit:
8511         status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
8512         if (!status)
8513                 return 0;
8514
8515         if (bio != orig_bio)
8516                 bio_put(bio);
8517 out_err:
8518         dip->errors = 1;
8519         /*
8520          * Before atomic variable goto zero, we must  make sure dip->errors is
8521          * perceived to be set. This ordering is ensured by the fact that an
8522          * atomic operations with a return value are fully ordered as per
8523          * atomic_t.txt
8524          */
8525         if (atomic_dec_and_test(&dip->pending_bios))
8526                 bio_io_error(dip->orig_bio);
8527
8528         /* bio_end_io() will handle error, so we needn't return it */
8529         return 0;
8530 }
8531
8532 static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
8533                                 loff_t file_offset)
8534 {
8535         struct btrfs_dio_private *dip = NULL;
8536         struct bio *bio = NULL;
8537         struct btrfs_io_bio *io_bio;
8538         bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
8539         int ret = 0;
8540
8541         bio = btrfs_bio_clone(dio_bio);
8542
8543         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8544         if (!dip) {
8545                 ret = -ENOMEM;
8546                 goto free_ordered;
8547         }
8548
8549         dip->private = dio_bio->bi_private;
8550         dip->inode = inode;
8551         dip->logical_offset = file_offset;
8552         dip->bytes = dio_bio->bi_iter.bi_size;
8553         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8554         bio->bi_private = dip;
8555         dip->orig_bio = bio;
8556         dip->dio_bio = dio_bio;
8557         atomic_set(&dip->pending_bios, 1);
8558         io_bio = btrfs_io_bio(bio);
8559         io_bio->logical = file_offset;
8560
8561         if (write) {
8562                 bio->bi_end_io = btrfs_endio_direct_write;
8563         } else {
8564                 bio->bi_end_io = btrfs_endio_direct_read;
8565                 dip->subio_endio = btrfs_subio_endio_read;
8566         }
8567
8568         /*
8569          * Reset the range for unsubmitted ordered extents (to a 0 length range)
8570          * even if we fail to submit a bio, because in such case we do the
8571          * corresponding error handling below and it must not be done a second
8572          * time by btrfs_direct_IO().
8573          */
8574         if (write) {
8575                 struct btrfs_dio_data *dio_data = current->journal_info;
8576
8577                 dio_data->unsubmitted_oe_range_end = dip->logical_offset +
8578                         dip->bytes;
8579                 dio_data->unsubmitted_oe_range_start =
8580                         dio_data->unsubmitted_oe_range_end;
8581         }
8582
8583         ret = btrfs_submit_direct_hook(dip);
8584         if (!ret)
8585                 return;
8586
8587         if (io_bio->end_io)
8588                 io_bio->end_io(io_bio, ret);
8589
8590 free_ordered:
8591         /*
8592          * If we arrived here it means either we failed to submit the dip
8593          * or we either failed to clone the dio_bio or failed to allocate the
8594          * dip. If we cloned the dio_bio and allocated the dip, we can just
8595          * call bio_endio against our io_bio so that we get proper resource
8596          * cleanup if we fail to submit the dip, otherwise, we must do the
8597          * same as btrfs_endio_direct_[write|read] because we can't call these
8598          * callbacks - they require an allocated dip and a clone of dio_bio.
8599          */
8600         if (bio && dip) {
8601                 bio_io_error(bio);
8602                 /*
8603                  * The end io callbacks free our dip, do the final put on bio
8604                  * and all the cleanup and final put for dio_bio (through
8605                  * dio_end_io()).
8606                  */
8607                 dip = NULL;
8608                 bio = NULL;
8609         } else {
8610                 if (write)
8611                         __endio_write_update_ordered(inode,
8612                                                 file_offset,
8613                                                 dio_bio->bi_iter.bi_size,
8614                                                 false);
8615                 else
8616                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8617                               file_offset + dio_bio->bi_iter.bi_size - 1);
8618
8619                 dio_bio->bi_status = BLK_STS_IOERR;
8620                 /*
8621                  * Releases and cleans up our dio_bio, no need to bio_put()
8622                  * nor bio_endio()/bio_io_error() against dio_bio.
8623                  */
8624                 dio_end_io(dio_bio);
8625         }
8626         if (bio)
8627                 bio_put(bio);
8628         kfree(dip);
8629 }
8630
8631 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
8632                                const struct iov_iter *iter, loff_t offset)
8633 {
8634         int seg;
8635         int i;
8636         unsigned int blocksize_mask = fs_info->sectorsize - 1;
8637         ssize_t retval = -EINVAL;
8638
8639         if (offset & blocksize_mask)
8640                 goto out;
8641
8642         if (iov_iter_alignment(iter) & blocksize_mask)
8643                 goto out;
8644
8645         /* If this is a write we don't need to check anymore */
8646         if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
8647                 return 0;
8648         /*
8649          * Check to make sure we don't have duplicate iov_base's in this
8650          * iovec, if so return EINVAL, otherwise we'll get csum errors
8651          * when reading back.
8652          */
8653         for (seg = 0; seg < iter->nr_segs; seg++) {
8654                 for (i = seg + 1; i < iter->nr_segs; i++) {
8655                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8656                                 goto out;
8657                 }
8658         }
8659         retval = 0;
8660 out:
8661         return retval;
8662 }
8663
8664 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8665 {
8666         struct file *file = iocb->ki_filp;
8667         struct inode *inode = file->f_mapping->host;
8668         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8669         struct btrfs_dio_data dio_data = { 0 };
8670         struct extent_changeset *data_reserved = NULL;
8671         loff_t offset = iocb->ki_pos;
8672         size_t count = 0;
8673         int flags = 0;
8674         bool wakeup = true;
8675         bool relock = false;
8676         ssize_t ret;
8677
8678         if (check_direct_IO(fs_info, iter, offset))
8679                 return 0;
8680
8681         inode_dio_begin(inode);
8682
8683         /*
8684          * The generic stuff only does filemap_write_and_wait_range, which
8685          * isn't enough if we've written compressed pages to this area, so
8686          * we need to flush the dirty pages again to make absolutely sure
8687          * that any outstanding dirty pages are on disk.
8688          */
8689         count = iov_iter_count(iter);
8690         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8691                      &BTRFS_I(inode)->runtime_flags))
8692                 filemap_fdatawrite_range(inode->i_mapping, offset,
8693                                          offset + count - 1);
8694
8695         if (iov_iter_rw(iter) == WRITE) {
8696                 /*
8697                  * If the write DIO is beyond the EOF, we need update
8698                  * the isize, but it is protected by i_mutex. So we can
8699                  * not unlock the i_mutex at this case.
8700                  */
8701                 if (offset + count <= inode->i_size) {
8702                         dio_data.overwrite = 1;
8703                         inode_unlock(inode);
8704                         relock = true;
8705                 }
8706                 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
8707                                                    offset, count);
8708                 if (ret)
8709                         goto out;
8710
8711                 /*
8712                  * We need to know how many extents we reserved so that we can
8713                  * do the accounting properly if we go over the number we
8714                  * originally calculated.  Abuse current->journal_info for this.
8715                  */
8716                 dio_data.reserve = round_up(count,
8717                                             fs_info->sectorsize);
8718                 dio_data.unsubmitted_oe_range_start = (u64)offset;
8719                 dio_data.unsubmitted_oe_range_end = (u64)offset;
8720                 current->journal_info = &dio_data;
8721                 down_read(&BTRFS_I(inode)->dio_sem);
8722         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8723                                      &BTRFS_I(inode)->runtime_flags)) {
8724                 inode_dio_end(inode);
8725                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8726                 wakeup = false;
8727         }
8728
8729         ret = __blockdev_direct_IO(iocb, inode,
8730                                    fs_info->fs_devices->latest_bdev,
8731                                    iter, btrfs_get_blocks_direct, NULL,
8732                                    btrfs_submit_direct, flags);
8733         if (iov_iter_rw(iter) == WRITE) {
8734                 up_read(&BTRFS_I(inode)->dio_sem);
8735                 current->journal_info = NULL;
8736                 if (ret < 0 && ret != -EIOCBQUEUED) {
8737                         if (dio_data.reserve)
8738                                 btrfs_delalloc_release_space(inode, data_reserved,
8739                                         offset, dio_data.reserve, true);
8740                         /*
8741                          * On error we might have left some ordered extents
8742                          * without submitting corresponding bios for them, so
8743                          * cleanup them up to avoid other tasks getting them
8744                          * and waiting for them to complete forever.
8745                          */
8746                         if (dio_data.unsubmitted_oe_range_start <
8747                             dio_data.unsubmitted_oe_range_end)
8748                                 __endio_write_update_ordered(inode,
8749                                         dio_data.unsubmitted_oe_range_start,
8750                                         dio_data.unsubmitted_oe_range_end -
8751                                         dio_data.unsubmitted_oe_range_start,
8752                                         false);
8753                 } else if (ret >= 0 && (size_t)ret < count)
8754                         btrfs_delalloc_release_space(inode, data_reserved,
8755                                         offset, count - (size_t)ret, true);
8756                 btrfs_delalloc_release_extents(BTRFS_I(inode), count);
8757         }
8758 out:
8759         if (wakeup)
8760                 inode_dio_end(inode);
8761         if (relock)
8762                 inode_lock(inode);
8763
8764         extent_changeset_free(data_reserved);
8765         return ret;
8766 }
8767
8768 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8769
8770 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8771                 __u64 start, __u64 len)
8772 {
8773         int     ret;
8774
8775         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8776         if (ret)
8777                 return ret;
8778
8779         return extent_fiemap(inode, fieinfo, start, len);
8780 }
8781
8782 int btrfs_readpage(struct file *file, struct page *page)
8783 {
8784         struct extent_io_tree *tree;
8785         tree = &BTRFS_I(page->mapping->host)->io_tree;
8786         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8787 }
8788
8789 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8790 {
8791         struct inode *inode = page->mapping->host;
8792         int ret;
8793
8794         if (current->flags & PF_MEMALLOC) {
8795                 redirty_page_for_writepage(wbc, page);
8796                 unlock_page(page);
8797                 return 0;
8798         }
8799
8800         /*
8801          * If we are under memory pressure we will call this directly from the
8802          * VM, we need to make sure we have the inode referenced for the ordered
8803          * extent.  If not just return like we didn't do anything.
8804          */
8805         if (!igrab(inode)) {
8806                 redirty_page_for_writepage(wbc, page);
8807                 return AOP_WRITEPAGE_ACTIVATE;
8808         }
8809         ret = extent_write_full_page(page, wbc);
8810         btrfs_add_delayed_iput(inode);
8811         return ret;
8812 }
8813
8814 static int btrfs_writepages(struct address_space *mapping,
8815                             struct writeback_control *wbc)
8816 {
8817         return extent_writepages(mapping, wbc);
8818 }
8819
8820 static int
8821 btrfs_readpages(struct file *file, struct address_space *mapping,
8822                 struct list_head *pages, unsigned nr_pages)
8823 {
8824         return extent_readpages(mapping, pages, nr_pages);
8825 }
8826
8827 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8828 {
8829         int ret = try_release_extent_mapping(page, gfp_flags);
8830         if (ret == 1) {
8831                 ClearPagePrivate(page);
8832                 set_page_private(page, 0);
8833                 put_page(page);
8834         }
8835         return ret;
8836 }
8837
8838 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8839 {
8840         if (PageWriteback(page) || PageDirty(page))
8841                 return 0;
8842         return __btrfs_releasepage(page, gfp_flags);
8843 }
8844
8845 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8846                                  unsigned int length)
8847 {
8848         struct inode *inode = page->mapping->host;
8849         struct extent_io_tree *tree;
8850         struct btrfs_ordered_extent *ordered;
8851         struct extent_state *cached_state = NULL;
8852         u64 page_start = page_offset(page);
8853         u64 page_end = page_start + PAGE_SIZE - 1;
8854         u64 start;
8855         u64 end;
8856         int inode_evicting = inode->i_state & I_FREEING;
8857
8858         /*
8859          * we have the page locked, so new writeback can't start,
8860          * and the dirty bit won't be cleared while we are here.
8861          *
8862          * Wait for IO on this page so that we can safely clear
8863          * the PagePrivate2 bit and do ordered accounting
8864          */
8865         wait_on_page_writeback(page);
8866
8867         tree = &BTRFS_I(inode)->io_tree;
8868         if (offset) {
8869                 btrfs_releasepage(page, GFP_NOFS);
8870                 return;
8871         }
8872
8873         if (!inode_evicting)
8874                 lock_extent_bits(tree, page_start, page_end, &cached_state);
8875 again:
8876         start = page_start;
8877         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
8878                                         page_end - start + 1);
8879         if (ordered) {
8880                 end = min(page_end, ordered->file_offset + ordered->len - 1);
8881                 /*
8882                  * IO on this page will never be started, so we need
8883                  * to account for any ordered extents now
8884                  */
8885                 if (!inode_evicting)
8886                         clear_extent_bit(tree, start, end,
8887                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8888                                          EXTENT_DELALLOC_NEW |
8889                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8890                                          EXTENT_DEFRAG, 1, 0, &cached_state);
8891                 /*
8892                  * whoever cleared the private bit is responsible
8893                  * for the finish_ordered_io
8894                  */
8895                 if (TestClearPagePrivate2(page)) {
8896                         struct btrfs_ordered_inode_tree *tree;
8897                         u64 new_len;
8898
8899                         tree = &BTRFS_I(inode)->ordered_tree;
8900
8901                         spin_lock_irq(&tree->lock);
8902                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8903                         new_len = start - ordered->file_offset;
8904                         if (new_len < ordered->truncated_len)
8905                                 ordered->truncated_len = new_len;
8906                         spin_unlock_irq(&tree->lock);
8907
8908                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8909                                                            start,
8910                                                            end - start + 1, 1))
8911                                 btrfs_finish_ordered_io(ordered);
8912                 }
8913                 btrfs_put_ordered_extent(ordered);
8914                 if (!inode_evicting) {
8915                         cached_state = NULL;
8916                         lock_extent_bits(tree, start, end,
8917                                          &cached_state);
8918                 }
8919
8920                 start = end + 1;
8921                 if (start < page_end)
8922                         goto again;
8923         }
8924
8925         /*
8926          * Qgroup reserved space handler
8927          * Page here will be either
8928          * 1) Already written to disk or ordered extent already submitted
8929          *    Then its QGROUP_RESERVED bit in io_tree is already cleaned.
8930          *    Qgroup will be handled by its qgroup_record then.
8931          *    btrfs_qgroup_free_data() call will do nothing here.
8932          *
8933          * 2) Not written to disk yet
8934          *    Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
8935          *    bit of its io_tree, and free the qgroup reserved data space.
8936          *    Since the IO will never happen for this page.
8937          */
8938         btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
8939         if (!inode_evicting) {
8940                 clear_extent_bit(tree, page_start, page_end,
8941                                  EXTENT_LOCKED | EXTENT_DIRTY |
8942                                  EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
8943                                  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
8944                                  &cached_state);
8945
8946                 __btrfs_releasepage(page, GFP_NOFS);
8947         }
8948
8949         ClearPageChecked(page);
8950         if (PagePrivate(page)) {
8951                 ClearPagePrivate(page);
8952                 set_page_private(page, 0);
8953                 put_page(page);
8954         }
8955 }
8956
8957 /*
8958  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8959  * called from a page fault handler when a page is first dirtied. Hence we must
8960  * be careful to check for EOF conditions here. We set the page up correctly
8961  * for a written page which means we get ENOSPC checking when writing into
8962  * holes and correct delalloc and unwritten extent mapping on filesystems that
8963  * support these features.
8964  *
8965  * We are not allowed to take the i_mutex here so we have to play games to
8966  * protect against truncate races as the page could now be beyond EOF.  Because
8967  * truncate_setsize() writes the inode size before removing pages, once we have
8968  * the page lock we can determine safely if the page is beyond EOF. If it is not
8969  * beyond EOF, then the page is guaranteed safe against truncation until we
8970  * unlock the page.
8971  */
8972 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8973 {
8974         struct page *page = vmf->page;
8975         struct inode *inode = file_inode(vmf->vma->vm_file);
8976         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8977         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8978         struct btrfs_ordered_extent *ordered;
8979         struct extent_state *cached_state = NULL;
8980         struct extent_changeset *data_reserved = NULL;
8981         char *kaddr;
8982         unsigned long zero_start;
8983         loff_t size;
8984         vm_fault_t ret;
8985         int ret2;
8986         int reserved = 0;
8987         u64 reserved_space;
8988         u64 page_start;
8989         u64 page_end;
8990         u64 end;
8991
8992         reserved_space = PAGE_SIZE;
8993
8994         sb_start_pagefault(inode->i_sb);
8995         page_start = page_offset(page);
8996         page_end = page_start + PAGE_SIZE - 1;
8997         end = page_end;
8998
8999         /*
9000          * Reserving delalloc space after obtaining the page lock can lead to
9001          * deadlock. For example, if a dirty page is locked by this function
9002          * and the call to btrfs_delalloc_reserve_space() ends up triggering
9003          * dirty page write out, then the btrfs_writepage() function could
9004          * end up waiting indefinitely to get a lock on the page currently
9005          * being processed by btrfs_page_mkwrite() function.
9006          */
9007         ret2 = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
9008                                            reserved_space);
9009         if (!ret2) {
9010                 ret2 = file_update_time(vmf->vma->vm_file);
9011                 reserved = 1;
9012         }
9013         if (ret2) {
9014                 ret = vmf_error(ret2);
9015                 if (reserved)
9016                         goto out;
9017                 goto out_noreserve;
9018         }
9019
9020         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
9021 again:
9022         lock_page(page);
9023         size = i_size_read(inode);
9024
9025         if ((page->mapping != inode->i_mapping) ||
9026             (page_start >= size)) {
9027                 /* page got truncated out from underneath us */
9028                 goto out_unlock;
9029         }
9030         wait_on_page_writeback(page);
9031
9032         lock_extent_bits(io_tree, page_start, page_end, &cached_state);
9033         set_page_extent_mapped(page);
9034
9035         /*
9036          * we can't set the delalloc bits if there are pending ordered
9037          * extents.  Drop our locks and wait for them to finish
9038          */
9039         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
9040                         PAGE_SIZE);
9041         if (ordered) {
9042                 unlock_extent_cached(io_tree, page_start, page_end,
9043                                      &cached_state);
9044                 unlock_page(page);
9045                 btrfs_start_ordered_extent(inode, ordered, 1);
9046                 btrfs_put_ordered_extent(ordered);
9047                 goto again;
9048         }
9049
9050         if (page->index == ((size - 1) >> PAGE_SHIFT)) {
9051                 reserved_space = round_up(size - page_start,
9052                                           fs_info->sectorsize);
9053                 if (reserved_space < PAGE_SIZE) {
9054                         end = page_start + reserved_space - 1;
9055                         btrfs_delalloc_release_space(inode, data_reserved,
9056                                         page_start, PAGE_SIZE - reserved_space,
9057                                         true);
9058                 }
9059         }
9060
9061         /*
9062          * page_mkwrite gets called when the page is firstly dirtied after it's
9063          * faulted in, but write(2) could also dirty a page and set delalloc
9064          * bits, thus in this case for space account reason, we still need to
9065          * clear any delalloc bits within this page range since we have to
9066          * reserve data&meta space before lock_page() (see above comments).
9067          */
9068         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
9069                           EXTENT_DIRTY | EXTENT_DELALLOC |
9070                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
9071                           0, 0, &cached_state);
9072
9073         ret2 = btrfs_set_extent_delalloc(inode, page_start, end, 0,
9074                                         &cached_state, 0);
9075         if (ret2) {
9076                 unlock_extent_cached(io_tree, page_start, page_end,
9077                                      &cached_state);
9078                 ret = VM_FAULT_SIGBUS;
9079                 goto out_unlock;
9080         }
9081         ret2 = 0;
9082
9083         /* page is wholly or partially inside EOF */
9084         if (page_start + PAGE_SIZE > size)
9085                 zero_start = size & ~PAGE_MASK;
9086         else
9087                 zero_start = PAGE_SIZE;
9088
9089         if (zero_start != PAGE_SIZE) {
9090                 kaddr = kmap(page);
9091                 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
9092                 flush_dcache_page(page);
9093                 kunmap(page);
9094         }
9095         ClearPageChecked(page);
9096         set_page_dirty(page);
9097         SetPageUptodate(page);
9098
9099         BTRFS_I(inode)->last_trans = fs_info->generation;
9100         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
9101         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
9102
9103         unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
9104
9105         if (!ret2) {
9106                 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
9107                 sb_end_pagefault(inode->i_sb);
9108                 extent_changeset_free(data_reserved);
9109                 return VM_FAULT_LOCKED;
9110         }
9111
9112 out_unlock:
9113         unlock_page(page);
9114 out:
9115         btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
9116         btrfs_delalloc_release_space(inode, data_reserved, page_start,
9117                                      reserved_space, (ret != 0));
9118 out_noreserve:
9119         sb_end_pagefault(inode->i_sb);
9120         extent_changeset_free(data_reserved);
9121         return ret;
9122 }
9123
9124 static int btrfs_truncate(struct inode *inode, bool skip_writeback)
9125 {
9126         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9127         struct btrfs_root *root = BTRFS_I(inode)->root;
9128         struct btrfs_block_rsv *rsv;
9129         int ret;
9130         struct btrfs_trans_handle *trans;
9131         u64 mask = fs_info->sectorsize - 1;
9132         u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
9133
9134         if (!skip_writeback) {
9135                 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
9136                                                (u64)-1);
9137                 if (ret)
9138                         return ret;
9139         }
9140
9141         /*
9142          * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
9143          * things going on here:
9144          *
9145          * 1) We need to reserve space to update our inode.
9146          *
9147          * 2) We need to have something to cache all the space that is going to
9148          * be free'd up by the truncate operation, but also have some slack
9149          * space reserved in case it uses space during the truncate (thank you
9150          * very much snapshotting).
9151          *
9152          * And we need these to be separate.  The fact is we can use a lot of
9153          * space doing the truncate, and we have no earthly idea how much space
9154          * we will use, so we need the truncate reservation to be separate so it
9155          * doesn't end up using space reserved for updating the inode.  We also
9156          * need to be able to stop the transaction and start a new one, which
9157          * means we need to be able to update the inode several times, and we
9158          * have no idea of knowing how many times that will be, so we can't just
9159          * reserve 1 item for the entirety of the operation, so that has to be
9160          * done separately as well.
9161          *
9162          * So that leaves us with
9163          *
9164          * 1) rsv - for the truncate reservation, which we will steal from the
9165          * transaction reservation.
9166          * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
9167          * updating the inode.
9168          */
9169         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
9170         if (!rsv)
9171                 return -ENOMEM;
9172         rsv->size = min_size;
9173         rsv->failfast = 1;
9174
9175         /*
9176          * 1 for the truncate slack space
9177          * 1 for updating the inode.
9178          */
9179         trans = btrfs_start_transaction(root, 2);
9180         if (IS_ERR(trans)) {
9181                 ret = PTR_ERR(trans);
9182                 goto out;
9183         }
9184
9185         /* Migrate the slack space for the truncate to our reserve */
9186         ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
9187                                       min_size, 0);
9188         BUG_ON(ret);
9189
9190         /*
9191          * So if we truncate and then write and fsync we normally would just
9192          * write the extents that changed, which is a problem if we need to
9193          * first truncate that entire inode.  So set this flag so we write out
9194          * all of the extents in the inode to the sync log so we're completely
9195          * safe.
9196          */
9197         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
9198         trans->block_rsv = rsv;
9199
9200         while (1) {
9201                 ret = btrfs_truncate_inode_items(trans, root, inode,
9202                                                  inode->i_size,
9203                                                  BTRFS_EXTENT_DATA_KEY);
9204                 trans->block_rsv = &fs_info->trans_block_rsv;
9205                 if (ret != -ENOSPC && ret != -EAGAIN)
9206                         break;
9207
9208                 ret = btrfs_update_inode(trans, root, inode);
9209                 if (ret)
9210                         break;
9211
9212                 btrfs_end_transaction(trans);
9213                 btrfs_btree_balance_dirty(fs_info);
9214
9215                 trans = btrfs_start_transaction(root, 2);
9216                 if (IS_ERR(trans)) {
9217                         ret = PTR_ERR(trans);
9218                         trans = NULL;
9219                         break;
9220                 }
9221
9222                 btrfs_block_rsv_release(fs_info, rsv, -1);
9223                 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
9224                                               rsv, min_size, 0);
9225                 BUG_ON(ret);    /* shouldn't happen */
9226                 trans->block_rsv = rsv;
9227         }
9228
9229         /*
9230          * We can't call btrfs_truncate_block inside a trans handle as we could
9231          * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know
9232          * we've truncated everything except the last little bit, and can do
9233          * btrfs_truncate_block and then update the disk_i_size.
9234          */
9235         if (ret == NEED_TRUNCATE_BLOCK) {
9236                 btrfs_end_transaction(trans);
9237                 btrfs_btree_balance_dirty(fs_info);
9238
9239                 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
9240                 if (ret)
9241                         goto out;
9242                 trans = btrfs_start_transaction(root, 1);
9243                 if (IS_ERR(trans)) {
9244                         ret = PTR_ERR(trans);
9245                         goto out;
9246                 }
9247                 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
9248         }
9249
9250         if (trans) {
9251                 int ret2;
9252
9253                 trans->block_rsv = &fs_info->trans_block_rsv;
9254                 ret2 = btrfs_update_inode(trans, root, inode);
9255                 if (ret2 && !ret)
9256                         ret = ret2;
9257
9258                 ret2 = btrfs_end_transaction(trans);
9259                 if (ret2 && !ret)
9260                         ret = ret2;
9261                 btrfs_btree_balance_dirty(fs_info);
9262         }
9263 out:
9264         btrfs_free_block_rsv(fs_info, rsv);
9265
9266         return ret;
9267 }
9268
9269 /*
9270  * create a new subvolume directory/inode (helper for the ioctl).
9271  */
9272 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
9273                              struct btrfs_root *new_root,
9274                              struct btrfs_root *parent_root,
9275                              u64 new_dirid)
9276 {
9277         struct inode *inode;
9278         int err;
9279         u64 index = 0;
9280
9281         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9282                                 new_dirid, new_dirid,
9283                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
9284                                 &index);
9285         if (IS_ERR(inode))
9286                 return PTR_ERR(inode);
9287         inode->i_op = &btrfs_dir_inode_operations;
9288         inode->i_fop = &btrfs_dir_file_operations;
9289
9290         set_nlink(inode, 1);
9291         btrfs_i_size_write(BTRFS_I(inode), 0);
9292         unlock_new_inode(inode);
9293
9294         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9295         if (err)
9296                 btrfs_err(new_root->fs_info,
9297                           "error inheriting subvolume %llu properties: %d",
9298                           new_root->root_key.objectid, err);
9299
9300         err = btrfs_update_inode(trans, new_root, inode);
9301
9302         iput(inode);
9303         return err;
9304 }
9305
9306 struct inode *btrfs_alloc_inode(struct super_block *sb)
9307 {
9308         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
9309         struct btrfs_inode *ei;
9310         struct inode *inode;
9311
9312         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL);
9313         if (!ei)
9314                 return NULL;
9315
9316         ei->root = NULL;
9317         ei->generation = 0;
9318         ei->last_trans = 0;
9319         ei->last_sub_trans = 0;
9320         ei->logged_trans = 0;
9321         ei->delalloc_bytes = 0;
9322         ei->new_delalloc_bytes = 0;
9323         ei->defrag_bytes = 0;
9324         ei->disk_i_size = 0;
9325         ei->flags = 0;
9326         ei->csum_bytes = 0;
9327         ei->index_cnt = (u64)-1;
9328         ei->dir_index = 0;
9329         ei->last_unlink_trans = 0;
9330         ei->last_link_trans = 0;
9331         ei->last_log_commit = 0;
9332
9333         spin_lock_init(&ei->lock);
9334         ei->outstanding_extents = 0;
9335         if (sb->s_magic != BTRFS_TEST_MAGIC)
9336                 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
9337                                               BTRFS_BLOCK_RSV_DELALLOC);
9338         ei->runtime_flags = 0;
9339         ei->prop_compress = BTRFS_COMPRESS_NONE;
9340         ei->defrag_compress = BTRFS_COMPRESS_NONE;
9341
9342         ei->delayed_node = NULL;
9343
9344         ei->i_otime.tv_sec = 0;
9345         ei->i_otime.tv_nsec = 0;
9346
9347         inode = &ei->vfs_inode;
9348         extent_map_tree_init(&ei->extent_tree);
9349         extent_io_tree_init(&ei->io_tree, inode);
9350         extent_io_tree_init(&ei->io_failure_tree, inode);
9351         ei->io_tree.track_uptodate = 1;
9352         ei->io_failure_tree.track_uptodate = 1;
9353         atomic_set(&ei->sync_writers, 0);
9354         mutex_init(&ei->log_mutex);
9355         mutex_init(&ei->delalloc_mutex);
9356         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9357         INIT_LIST_HEAD(&ei->delalloc_inodes);
9358         INIT_LIST_HEAD(&ei->delayed_iput);
9359         RB_CLEAR_NODE(&ei->rb_node);
9360         init_rwsem(&ei->dio_sem);
9361
9362         return inode;
9363 }
9364
9365 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9366 void btrfs_test_destroy_inode(struct inode *inode)
9367 {
9368         btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9369         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9370 }
9371 #endif
9372
9373 static void btrfs_i_callback(struct rcu_head *head)
9374 {
9375         struct inode *inode = container_of(head, struct inode, i_rcu);
9376         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9377 }
9378
9379 void btrfs_destroy_inode(struct inode *inode)
9380 {
9381         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9382         struct btrfs_ordered_extent *ordered;
9383         struct btrfs_root *root = BTRFS_I(inode)->root;
9384
9385         WARN_ON(!hlist_empty(&inode->i_dentry));
9386         WARN_ON(inode->i_data.nrpages);
9387         WARN_ON(BTRFS_I(inode)->block_rsv.reserved);
9388         WARN_ON(BTRFS_I(inode)->block_rsv.size);
9389         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9390         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9391         WARN_ON(BTRFS_I(inode)->new_delalloc_bytes);
9392         WARN_ON(BTRFS_I(inode)->csum_bytes);
9393         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9394
9395         /*
9396          * This can happen where we create an inode, but somebody else also
9397          * created the same inode and we need to destroy the one we already
9398          * created.
9399          */
9400         if (!root)
9401                 goto free;
9402
9403         while (1) {
9404                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9405                 if (!ordered)
9406                         break;
9407                 else {
9408                         btrfs_err(fs_info,
9409                                   "found ordered extent %llu %llu on inode cleanup",
9410                                   ordered->file_offset, ordered->len);
9411                         btrfs_remove_ordered_extent(inode, ordered);
9412                         btrfs_put_ordered_extent(ordered);
9413                         btrfs_put_ordered_extent(ordered);
9414                 }
9415         }
9416         btrfs_qgroup_check_reserved_leak(inode);
9417         inode_tree_del(inode);
9418         btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9419 free:
9420         call_rcu(&inode->i_rcu, btrfs_i_callback);
9421 }
9422
9423 int btrfs_drop_inode(struct inode *inode)
9424 {
9425         struct btrfs_root *root = BTRFS_I(inode)->root;
9426
9427         if (root == NULL)
9428                 return 1;
9429
9430         /* the snap/subvol tree is on deleting */
9431         if (btrfs_root_refs(&root->root_item) == 0)
9432                 return 1;
9433         else
9434                 return generic_drop_inode(inode);
9435 }
9436
9437 static void init_once(void *foo)
9438 {
9439         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9440
9441         inode_init_once(&ei->vfs_inode);
9442 }
9443
9444 void __cold btrfs_destroy_cachep(void)
9445 {
9446         /*
9447          * Make sure all delayed rcu free inodes are flushed before we
9448          * destroy cache.
9449          */
9450         rcu_barrier();
9451         kmem_cache_destroy(btrfs_inode_cachep);
9452         kmem_cache_destroy(btrfs_trans_handle_cachep);
9453         kmem_cache_destroy(btrfs_path_cachep);
9454         kmem_cache_destroy(btrfs_free_space_cachep);
9455         kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
9456 }
9457
9458 int __init btrfs_init_cachep(void)
9459 {
9460         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9461                         sizeof(struct btrfs_inode), 0,
9462                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
9463                         init_once);
9464         if (!btrfs_inode_cachep)
9465                 goto fail;
9466
9467         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9468                         sizeof(struct btrfs_trans_handle), 0,
9469                         SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9470         if (!btrfs_trans_handle_cachep)
9471                 goto fail;
9472
9473         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9474                         sizeof(struct btrfs_path), 0,
9475                         SLAB_MEM_SPREAD, NULL);
9476         if (!btrfs_path_cachep)
9477                 goto fail;
9478
9479         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9480                         sizeof(struct btrfs_free_space), 0,
9481                         SLAB_MEM_SPREAD, NULL);
9482         if (!btrfs_free_space_cachep)
9483                 goto fail;
9484
9485         btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
9486                                                         PAGE_SIZE, PAGE_SIZE,
9487                                                         SLAB_MEM_SPREAD, NULL);
9488         if (!btrfs_free_space_bitmap_cachep)
9489                 goto fail;
9490
9491         return 0;
9492 fail:
9493         btrfs_destroy_cachep();
9494         return -ENOMEM;
9495 }
9496
9497 static int btrfs_getattr(const struct path *path, struct kstat *stat,
9498                          u32 request_mask, unsigned int flags)
9499 {
9500         u64 delalloc_bytes;
9501         struct inode *inode = d_inode(path->dentry);
9502         u32 blocksize = inode->i_sb->s_blocksize;
9503         u32 bi_flags = BTRFS_I(inode)->flags;
9504
9505         stat->result_mask |= STATX_BTIME;
9506         stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
9507         stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
9508         if (bi_flags & BTRFS_INODE_APPEND)
9509                 stat->attributes |= STATX_ATTR_APPEND;
9510         if (bi_flags & BTRFS_INODE_COMPRESS)
9511                 stat->attributes |= STATX_ATTR_COMPRESSED;
9512         if (bi_flags & BTRFS_INODE_IMMUTABLE)
9513                 stat->attributes |= STATX_ATTR_IMMUTABLE;
9514         if (bi_flags & BTRFS_INODE_NODUMP)
9515                 stat->attributes |= STATX_ATTR_NODUMP;
9516
9517         stat->attributes_mask |= (STATX_ATTR_APPEND |
9518                                   STATX_ATTR_COMPRESSED |
9519                                   STATX_ATTR_IMMUTABLE |
9520                                   STATX_ATTR_NODUMP);
9521
9522         generic_fillattr(inode, stat);
9523         stat->dev = BTRFS_I(inode)->root->anon_dev;
9524
9525         spin_lock(&BTRFS_I(inode)->lock);
9526         delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
9527         spin_unlock(&BTRFS_I(inode)->lock);
9528         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9529                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9530         return 0;
9531 }
9532
9533 static int btrfs_rename_exchange(struct inode *old_dir,
9534                               struct dentry *old_dentry,
9535                               struct inode *new_dir,
9536                               struct dentry *new_dentry)
9537 {
9538         struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9539         struct btrfs_trans_handle *trans;
9540         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9541         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9542         struct inode *new_inode = new_dentry->d_inode;
9543         struct inode *old_inode = old_dentry->d_inode;
9544         struct timespec64 ctime = current_time(old_inode);
9545         struct dentry *parent;
9546         u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9547         u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9548         u64 old_idx = 0;
9549         u64 new_idx = 0;
9550         int ret;
9551         bool root_log_pinned = false;
9552         bool dest_log_pinned = false;
9553         struct btrfs_log_ctx ctx_root;
9554         struct btrfs_log_ctx ctx_dest;
9555         bool sync_log_root = false;
9556         bool sync_log_dest = false;
9557         bool commit_transaction = false;
9558
9559         /*
9560          * For non-subvolumes allow exchange only within one subvolume, in the
9561          * same inode namespace. Two subvolumes (represented as directory) can
9562          * be exchanged as they're a logical link and have a fixed inode number.
9563          */
9564         if (root != dest &&
9565             (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
9566              new_ino != BTRFS_FIRST_FREE_OBJECTID))
9567                 return -EXDEV;
9568
9569         btrfs_init_log_ctx(&ctx_root, old_inode);
9570         btrfs_init_log_ctx(&ctx_dest, new_inode);
9571
9572         /* close the race window with snapshot create/destroy ioctl */
9573         if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
9574             new_ino == BTRFS_FIRST_FREE_OBJECTID)
9575                 down_read(&fs_info->subvol_sem);
9576
9577         /*
9578          * We want to reserve the absolute worst case amount of items.  So if
9579          * both inodes are subvols and we need to unlink them then that would
9580          * require 4 item modifications, but if they are both normal inodes it
9581          * would require 5 item modifications, so we'll assume their normal
9582          * inodes.  So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9583          * should cover the worst case number of items we'll modify.
9584          */
9585         trans = btrfs_start_transaction(root, 12);
9586         if (IS_ERR(trans)) {
9587                 ret = PTR_ERR(trans);
9588                 goto out_notrans;
9589         }
9590
9591         if (dest != root)
9592                 btrfs_record_root_in_trans(trans, dest);
9593
9594         /*
9595          * We need to find a free sequence number both in the source and
9596          * in the destination directory for the exchange.
9597          */
9598         ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9599         if (ret)
9600                 goto out_fail;
9601         ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9602         if (ret)
9603                 goto out_fail;
9604
9605         BTRFS_I(old_inode)->dir_index = 0ULL;
9606         BTRFS_I(new_inode)->dir_index = 0ULL;
9607
9608         /* Reference for the source. */
9609         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9610                 /* force full log commit if subvolume involved. */
9611                 btrfs_set_log_full_commit(fs_info, trans);
9612         } else {
9613                 btrfs_pin_log_trans(root);
9614                 root_log_pinned = true;
9615                 ret = btrfs_insert_inode_ref(trans, dest,
9616                                              new_dentry->d_name.name,
9617                                              new_dentry->d_name.len,
9618                                              old_ino,
9619                                              btrfs_ino(BTRFS_I(new_dir)),
9620                                              old_idx);
9621                 if (ret)
9622                         goto out_fail;
9623         }
9624
9625         /* And now for the dest. */
9626         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9627                 /* force full log commit if subvolume involved. */
9628                 btrfs_set_log_full_commit(fs_info, trans);
9629         } else {
9630                 btrfs_pin_log_trans(dest);
9631                 dest_log_pinned = true;
9632                 ret = btrfs_insert_inode_ref(trans, root,
9633                                              old_dentry->d_name.name,
9634                                              old_dentry->d_name.len,
9635                                              new_ino,
9636                                              btrfs_ino(BTRFS_I(old_dir)),
9637                                              new_idx);
9638                 if (ret)
9639                         goto out_fail;
9640         }
9641
9642         /* Update inode version and ctime/mtime. */
9643         inode_inc_iversion(old_dir);
9644         inode_inc_iversion(new_dir);
9645         inode_inc_iversion(old_inode);
9646         inode_inc_iversion(new_inode);
9647         old_dir->i_ctime = old_dir->i_mtime = ctime;
9648         new_dir->i_ctime = new_dir->i_mtime = ctime;
9649         old_inode->i_ctime = ctime;
9650         new_inode->i_ctime = ctime;
9651
9652         if (old_dentry->d_parent != new_dentry->d_parent) {
9653                 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9654                                 BTRFS_I(old_inode), 1);
9655                 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9656                                 BTRFS_I(new_inode), 1);
9657         }
9658
9659         /* src is a subvolume */
9660         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9661                 ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9662         } else { /* src is an inode */
9663                 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9664                                            BTRFS_I(old_dentry->d_inode),
9665                                            old_dentry->d_name.name,
9666                                            old_dentry->d_name.len);
9667                 if (!ret)
9668                         ret = btrfs_update_inode(trans, root, old_inode);
9669         }
9670         if (ret) {
9671                 btrfs_abort_transaction(trans, ret);
9672                 goto out_fail;
9673         }
9674
9675         /* dest is a subvolume */
9676         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9677                 ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9678         } else { /* dest is an inode */
9679                 ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9680                                            BTRFS_I(new_dentry->d_inode),
9681                                            new_dentry->d_name.name,
9682                                            new_dentry->d_name.len);
9683                 if (!ret)
9684                         ret = btrfs_update_inode(trans, dest, new_inode);
9685         }
9686         if (ret) {
9687                 btrfs_abort_transaction(trans, ret);
9688                 goto out_fail;
9689         }
9690
9691         ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9692                              new_dentry->d_name.name,
9693                              new_dentry->d_name.len, 0, old_idx);
9694         if (ret) {
9695                 btrfs_abort_transaction(trans, ret);
9696                 goto out_fail;
9697         }
9698
9699         ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9700                              old_dentry->d_name.name,
9701                              old_dentry->d_name.len, 0, new_idx);
9702         if (ret) {
9703                 btrfs_abort_transaction(trans, ret);
9704                 goto out_fail;
9705         }
9706
9707         if (old_inode->i_nlink == 1)
9708                 BTRFS_I(old_inode)->dir_index = old_idx;
9709         if (new_inode->i_nlink == 1)
9710                 BTRFS_I(new_inode)->dir_index = new_idx;
9711
9712         if (root_log_pinned) {
9713                 parent = new_dentry->d_parent;
9714                 ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
9715                                          BTRFS_I(old_dir), parent,
9716                                          false, &ctx_root);
9717                 if (ret == BTRFS_NEED_LOG_SYNC)
9718                         sync_log_root = true;
9719                 else if (ret == BTRFS_NEED_TRANS_COMMIT)
9720                         commit_transaction = true;
9721                 ret = 0;
9722                 btrfs_end_log_trans(root);
9723                 root_log_pinned = false;
9724         }
9725         if (dest_log_pinned) {
9726                 if (!commit_transaction) {
9727                         parent = old_dentry->d_parent;
9728                         ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
9729                                                  BTRFS_I(new_dir), parent,
9730                                                  false, &ctx_dest);
9731                         if (ret == BTRFS_NEED_LOG_SYNC)
9732                                 sync_log_dest = true;
9733                         else if (ret == BTRFS_NEED_TRANS_COMMIT)
9734                                 commit_transaction = true;
9735                         ret = 0;
9736                 }
9737                 btrfs_end_log_trans(dest);
9738                 dest_log_pinned = false;
9739         }
9740 out_fail:
9741         /*
9742          * If we have pinned a log and an error happened, we unpin tasks
9743          * trying to sync the log and force them to fallback to a transaction
9744          * commit if the log currently contains any of the inodes involved in
9745          * this rename operation (to ensure we do not persist a log with an
9746          * inconsistent state for any of these inodes or leading to any
9747          * inconsistencies when replayed). If the transaction was aborted, the
9748          * abortion reason is propagated to userspace when attempting to commit
9749          * the transaction. If the log does not contain any of these inodes, we
9750          * allow the tasks to sync it.
9751          */
9752         if (ret && (root_log_pinned || dest_log_pinned)) {
9753                 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9754                     btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9755                     btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9756                     (new_inode &&
9757                      btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9758                         btrfs_set_log_full_commit(fs_info, trans);
9759
9760                 if (root_log_pinned) {
9761                         btrfs_end_log_trans(root);
9762                         root_log_pinned = false;
9763                 }
9764                 if (dest_log_pinned) {
9765                         btrfs_end_log_trans(dest);
9766                         dest_log_pinned = false;
9767                 }
9768         }
9769         if (!ret && sync_log_root && !commit_transaction) {
9770                 ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
9771                                      &ctx_root);
9772                 if (ret)
9773                         commit_transaction = true;
9774         }
9775         if (!ret && sync_log_dest && !commit_transaction) {
9776                 ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
9777                                      &ctx_dest);
9778                 if (ret)
9779                         commit_transaction = true;
9780         }
9781         if (commit_transaction) {
9782                 /*
9783                  * We may have set commit_transaction when logging the new name
9784                  * in the destination root, in which case we left the source
9785                  * root context in the list of log contextes. So make sure we
9786                  * remove it to avoid invalid memory accesses, since the context
9787                  * was allocated in our stack frame.
9788                  */
9789                 if (sync_log_root) {
9790                         mutex_lock(&root->log_mutex);
9791                         list_del_init(&ctx_root.list);
9792                         mutex_unlock(&root->log_mutex);
9793                 }
9794                 ret = btrfs_commit_transaction(trans);
9795         } else {
9796                 int ret2;
9797
9798                 ret2 = btrfs_end_transaction(trans);
9799                 ret = ret ? ret : ret2;
9800         }
9801 out_notrans:
9802         if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9803             old_ino == BTRFS_FIRST_FREE_OBJECTID)
9804                 up_read(&fs_info->subvol_sem);
9805
9806         ASSERT(list_empty(&ctx_root.list));
9807         ASSERT(list_empty(&ctx_dest.list));
9808
9809         return ret;
9810 }
9811
9812 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
9813                                      struct btrfs_root *root,
9814                                      struct inode *dir,
9815                                      struct dentry *dentry)
9816 {
9817         int ret;
9818         struct inode *inode;
9819         u64 objectid;
9820         u64 index;
9821
9822         ret = btrfs_find_free_ino(root, &objectid);
9823         if (ret)
9824                 return ret;
9825
9826         inode = btrfs_new_inode(trans, root, dir,
9827                                 dentry->d_name.name,
9828                                 dentry->d_name.len,
9829                                 btrfs_ino(BTRFS_I(dir)),
9830                                 objectid,
9831                                 S_IFCHR | WHITEOUT_MODE,
9832                                 &index);
9833
9834         if (IS_ERR(inode)) {
9835                 ret = PTR_ERR(inode);
9836                 return ret;
9837         }
9838
9839         inode->i_op = &btrfs_special_inode_operations;
9840         init_special_inode(inode, inode->i_mode,
9841                 WHITEOUT_DEV);
9842
9843         ret = btrfs_init_inode_security(trans, inode, dir,
9844                                 &dentry->d_name);
9845         if (ret)
9846                 goto out;
9847
9848         ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
9849                                 BTRFS_I(inode), 0, index);
9850         if (ret)
9851                 goto out;
9852
9853         ret = btrfs_update_inode(trans, root, inode);
9854 out:
9855         unlock_new_inode(inode);
9856         if (ret)
9857                 inode_dec_link_count(inode);
9858         iput(inode);
9859
9860         return ret;
9861 }
9862
9863 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9864                            struct inode *new_dir, struct dentry *new_dentry,
9865                            unsigned int flags)
9866 {
9867         struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9868         struct btrfs_trans_handle *trans;
9869         unsigned int trans_num_items;
9870         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9871         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9872         struct inode *new_inode = d_inode(new_dentry);
9873         struct inode *old_inode = d_inode(old_dentry);
9874         u64 index = 0;
9875         int ret;
9876         u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9877         bool log_pinned = false;
9878         struct btrfs_log_ctx ctx;
9879         bool sync_log = false;
9880         bool commit_transaction = false;
9881
9882         if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9883                 return -EPERM;
9884
9885         /* we only allow rename subvolume link between subvolumes */
9886         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9887                 return -EXDEV;
9888
9889         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9890             (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9891                 return -ENOTEMPTY;
9892
9893         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9894             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9895                 return -ENOTEMPTY;
9896
9897
9898         /* check for collisions, even if the  name isn't there */
9899         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9900                              new_dentry->d_name.name,
9901                              new_dentry->d_name.len);
9902
9903         if (ret) {
9904                 if (ret == -EEXIST) {
9905                         /* we shouldn't get
9906                          * eexist without a new_inode */
9907                         if (WARN_ON(!new_inode)) {
9908                                 return ret;
9909                         }
9910                 } else {
9911                         /* maybe -EOVERFLOW */
9912                         return ret;
9913                 }
9914         }
9915         ret = 0;
9916
9917         /*
9918          * we're using rename to replace one file with another.  Start IO on it
9919          * now so  we don't add too much work to the end of the transaction
9920          */
9921         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9922                 filemap_flush(old_inode->i_mapping);
9923
9924         /* close the racy window with snapshot create/destroy ioctl */
9925         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9926                 down_read(&fs_info->subvol_sem);
9927         /*
9928          * We want to reserve the absolute worst case amount of items.  So if
9929          * both inodes are subvols and we need to unlink them then that would
9930          * require 4 item modifications, but if they are both normal inodes it
9931          * would require 5 item modifications, so we'll assume they are normal
9932          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9933          * should cover the worst case number of items we'll modify.
9934          * If our rename has the whiteout flag, we need more 5 units for the
9935          * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
9936          * when selinux is enabled).
9937          */
9938         trans_num_items = 11;
9939         if (flags & RENAME_WHITEOUT)
9940                 trans_num_items += 5;
9941         trans = btrfs_start_transaction(root, trans_num_items);
9942         if (IS_ERR(trans)) {
9943                 ret = PTR_ERR(trans);
9944                 goto out_notrans;
9945         }
9946
9947         if (dest != root)
9948                 btrfs_record_root_in_trans(trans, dest);
9949
9950         ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9951         if (ret)
9952                 goto out_fail;
9953
9954         BTRFS_I(old_inode)->dir_index = 0ULL;
9955         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9956                 /* force full log commit if subvolume involved. */
9957                 btrfs_set_log_full_commit(fs_info, trans);
9958         } else {
9959                 btrfs_pin_log_trans(root);
9960                 log_pinned = true;
9961                 ret = btrfs_insert_inode_ref(trans, dest,
9962                                              new_dentry->d_name.name,
9963                                              new_dentry->d_name.len,
9964                                              old_ino,
9965                                              btrfs_ino(BTRFS_I(new_dir)), index);
9966                 if (ret)
9967                         goto out_fail;
9968         }
9969
9970         inode_inc_iversion(old_dir);
9971         inode_inc_iversion(new_dir);
9972         inode_inc_iversion(old_inode);
9973         old_dir->i_ctime = old_dir->i_mtime =
9974         new_dir->i_ctime = new_dir->i_mtime =
9975         old_inode->i_ctime = current_time(old_dir);
9976
9977         if (old_dentry->d_parent != new_dentry->d_parent)
9978                 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9979                                 BTRFS_I(old_inode), 1);
9980
9981         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9982                 ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9983         } else {
9984                 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9985                                         BTRFS_I(d_inode(old_dentry)),
9986                                         old_dentry->d_name.name,
9987                                         old_dentry->d_name.len);
9988                 if (!ret)
9989                         ret = btrfs_update_inode(trans, root, old_inode);
9990         }
9991         if (ret) {
9992                 btrfs_abort_transaction(trans, ret);
9993                 goto out_fail;
9994         }
9995
9996         if (new_inode) {
9997                 inode_inc_iversion(new_inode);
9998                 new_inode->i_ctime = current_time(new_inode);
9999                 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
10000                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
10001                         ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
10002                         BUG_ON(new_inode->i_nlink == 0);
10003                 } else {
10004                         ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
10005                                                  BTRFS_I(d_inode(new_dentry)),
10006                                                  new_dentry->d_name.name,
10007                                                  new_dentry->d_name.len);
10008                 }
10009                 if (!ret && new_inode->i_nlink == 0)
10010                         ret = btrfs_orphan_add(trans,
10011                                         BTRFS_I(d_inode(new_dentry)));
10012                 if (ret) {
10013                         btrfs_abort_transaction(trans, ret);
10014                         goto out_fail;
10015                 }
10016         }
10017
10018         ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
10019                              new_dentry->d_name.name,
10020                              new_dentry->d_name.len, 0, index);
10021         if (ret) {
10022                 btrfs_abort_transaction(trans, ret);
10023                 goto out_fail;
10024         }
10025
10026         if (old_inode->i_nlink == 1)
10027                 BTRFS_I(old_inode)->dir_index = index;
10028
10029         if (log_pinned) {
10030                 struct dentry *parent = new_dentry->d_parent;
10031
10032                 btrfs_init_log_ctx(&ctx, old_inode);
10033                 ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
10034                                          BTRFS_I(old_dir), parent,
10035                                          false, &ctx);
10036                 if (ret == BTRFS_NEED_LOG_SYNC)
10037                         sync_log = true;
10038                 else if (ret == BTRFS_NEED_TRANS_COMMIT)
10039                         commit_transaction = true;
10040                 ret = 0;
10041                 btrfs_end_log_trans(root);
10042                 log_pinned = false;
10043         }
10044
10045         if (flags & RENAME_WHITEOUT) {
10046                 ret = btrfs_whiteout_for_rename(trans, root, old_dir,
10047                                                 old_dentry);
10048
10049                 if (ret) {
10050                         btrfs_abort_transaction(trans, ret);
10051                         goto out_fail;
10052                 }
10053         }
10054 out_fail:
10055         /*
10056          * If we have pinned the log and an error happened, we unpin tasks
10057          * trying to sync the log and force them to fallback to a transaction
10058          * commit if the log currently contains any of the inodes involved in
10059          * this rename operation (to ensure we do not persist a log with an
10060          * inconsistent state for any of these inodes or leading to any
10061          * inconsistencies when replayed). If the transaction was aborted, the
10062          * abortion reason is propagated to userspace when attempting to commit
10063          * the transaction. If the log does not contain any of these inodes, we
10064          * allow the tasks to sync it.
10065          */
10066         if (ret && log_pinned) {
10067                 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
10068                     btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
10069                     btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
10070                     (new_inode &&
10071                      btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
10072                         btrfs_set_log_full_commit(fs_info, trans);
10073
10074                 btrfs_end_log_trans(root);
10075                 log_pinned = false;
10076         }
10077         if (!ret && sync_log) {
10078                 ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
10079                 if (ret)
10080                         commit_transaction = true;
10081         } else if (sync_log) {
10082                 mutex_lock(&root->log_mutex);
10083                 list_del(&ctx.list);
10084                 mutex_unlock(&root->log_mutex);
10085         }
10086         if (commit_transaction) {
10087                 ret = btrfs_commit_transaction(trans);
10088         } else {
10089                 int ret2;
10090
10091                 ret2 = btrfs_end_transaction(trans);
10092                 ret = ret ? ret : ret2;
10093         }
10094 out_notrans:
10095         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
10096                 up_read(&fs_info->subvol_sem);
10097
10098         return ret;
10099 }
10100
10101 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
10102                          struct inode *new_dir, struct dentry *new_dentry,
10103                          unsigned int flags)
10104 {
10105         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
10106                 return -EINVAL;
10107
10108         if (flags & RENAME_EXCHANGE)
10109                 return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
10110                                           new_dentry);
10111
10112         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
10113 }
10114
10115 struct btrfs_delalloc_work {
10116         struct inode *inode;
10117         struct completion completion;
10118         struct list_head list;
10119         struct btrfs_work work;
10120 };
10121
10122 static void btrfs_run_delalloc_work(struct btrfs_work *work)
10123 {
10124         struct btrfs_delalloc_work *delalloc_work;
10125         struct inode *inode;
10126
10127         delalloc_work = container_of(work, struct btrfs_delalloc_work,
10128                                      work);
10129         inode = delalloc_work->inode;
10130         filemap_flush(inode->i_mapping);
10131         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
10132                                 &BTRFS_I(inode)->runtime_flags))
10133                 filemap_flush(inode->i_mapping);
10134
10135         iput(inode);
10136         complete(&delalloc_work->completion);
10137 }
10138
10139 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
10140 {
10141         struct btrfs_delalloc_work *work;
10142
10143         work = kmalloc(sizeof(*work), GFP_NOFS);
10144         if (!work)
10145                 return NULL;
10146
10147         init_completion(&work->completion);
10148         INIT_LIST_HEAD(&work->list);
10149         work->inode = inode;
10150         WARN_ON_ONCE(!inode);
10151         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
10152                         btrfs_run_delalloc_work, NULL, NULL);
10153
10154         return work;
10155 }
10156
10157 /*
10158  * some fairly slow code that needs optimization. This walks the list
10159  * of all the inodes with pending delalloc and forces them to disk.
10160  */
10161 static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot)
10162 {
10163         struct btrfs_inode *binode;
10164         struct inode *inode;
10165         struct btrfs_delalloc_work *work, *next;
10166         struct list_head works;
10167         struct list_head splice;
10168         int ret = 0;
10169
10170         INIT_LIST_HEAD(&works);
10171         INIT_LIST_HEAD(&splice);
10172
10173         mutex_lock(&root->delalloc_mutex);
10174         spin_lock(&root->delalloc_lock);
10175         list_splice_init(&root->delalloc_inodes, &splice);
10176         while (!list_empty(&splice)) {
10177                 binode = list_entry(splice.next, struct btrfs_inode,
10178                                     delalloc_inodes);
10179
10180                 list_move_tail(&binode->delalloc_inodes,
10181                                &root->delalloc_inodes);
10182                 inode = igrab(&binode->vfs_inode);
10183                 if (!inode) {
10184                         cond_resched_lock(&root->delalloc_lock);
10185                         continue;
10186                 }
10187                 spin_unlock(&root->delalloc_lock);
10188
10189                 if (snapshot)
10190                         set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
10191                                 &binode->runtime_flags);
10192                 work = btrfs_alloc_delalloc_work(inode);
10193                 if (!work) {
10194                         iput(inode);
10195                         ret = -ENOMEM;
10196                         goto out;
10197                 }
10198                 list_add_tail(&work->list, &works);
10199                 btrfs_queue_work(root->fs_info->flush_workers,
10200                                  &work->work);
10201                 ret++;
10202                 if (nr != -1 && ret >= nr)
10203                         goto out;
10204                 cond_resched();
10205                 spin_lock(&root->delalloc_lock);
10206         }
10207         spin_unlock(&root->delalloc_lock);
10208
10209 out:
10210         list_for_each_entry_safe(work, next, &works, list) {
10211                 list_del_init(&work->list);
10212                 wait_for_completion(&work->completion);
10213                 kfree(work);
10214         }
10215
10216         if (!list_empty(&splice)) {
10217                 spin_lock(&root->delalloc_lock);
10218                 list_splice_tail(&splice, &root->delalloc_inodes);
10219                 spin_unlock(&root->delalloc_lock);
10220         }
10221         mutex_unlock(&root->delalloc_mutex);
10222         return ret;
10223 }
10224
10225 int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
10226 {
10227         struct btrfs_fs_info *fs_info = root->fs_info;
10228         int ret;
10229
10230         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10231                 return -EROFS;
10232
10233         ret = start_delalloc_inodes(root, -1, true);
10234         if (ret > 0)
10235                 ret = 0;
10236         return ret;
10237 }
10238
10239 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr)
10240 {
10241         struct btrfs_root *root;
10242         struct list_head splice;
10243         int ret;
10244
10245         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10246                 return -EROFS;
10247
10248         INIT_LIST_HEAD(&splice);
10249
10250         mutex_lock(&fs_info->delalloc_root_mutex);
10251         spin_lock(&fs_info->delalloc_root_lock);
10252         list_splice_init(&fs_info->delalloc_roots, &splice);
10253         while (!list_empty(&splice) && nr) {
10254                 root = list_first_entry(&splice, struct btrfs_root,
10255                                         delalloc_root);
10256                 root = btrfs_grab_fs_root(root);
10257                 BUG_ON(!root);
10258                 list_move_tail(&root->delalloc_root,
10259                                &fs_info->delalloc_roots);
10260                 spin_unlock(&fs_info->delalloc_root_lock);
10261
10262                 ret = start_delalloc_inodes(root, nr, false);
10263                 btrfs_put_fs_root(root);
10264                 if (ret < 0)
10265                         goto out;
10266
10267                 if (nr != -1) {
10268                         nr -= ret;
10269                         WARN_ON(nr < 0);
10270                 }
10271                 spin_lock(&fs_info->delalloc_root_lock);
10272         }
10273         spin_unlock(&fs_info->delalloc_root_lock);
10274
10275         ret = 0;
10276 out:
10277         if (!list_empty(&splice)) {
10278                 spin_lock(&fs_info->delalloc_root_lock);
10279                 list_splice_tail(&splice, &fs_info->delalloc_roots);
10280                 spin_unlock(&fs_info->delalloc_root_lock);
10281         }
10282         mutex_unlock(&fs_info->delalloc_root_mutex);
10283         return ret;
10284 }
10285
10286 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
10287                          const char *symname)
10288 {
10289         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10290         struct btrfs_trans_handle *trans;
10291         struct btrfs_root *root = BTRFS_I(dir)->root;
10292         struct btrfs_path *path;
10293         struct btrfs_key key;
10294         struct inode *inode = NULL;
10295         int err;
10296         u64 objectid;
10297         u64 index = 0;
10298         int name_len;
10299         int datasize;
10300         unsigned long ptr;
10301         struct btrfs_file_extent_item *ei;
10302         struct extent_buffer *leaf;
10303
10304         name_len = strlen(symname);
10305         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
10306                 return -ENAMETOOLONG;
10307
10308         /*
10309          * 2 items for inode item and ref
10310          * 2 items for dir items
10311          * 1 item for updating parent inode item
10312          * 1 item for the inline extent item
10313          * 1 item for xattr if selinux is on
10314          */
10315         trans = btrfs_start_transaction(root, 7);
10316         if (IS_ERR(trans))
10317                 return PTR_ERR(trans);
10318
10319         err = btrfs_find_free_ino(root, &objectid);
10320         if (err)
10321                 goto out_unlock;
10322
10323         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
10324                                 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)),
10325                                 objectid, S_IFLNK|S_IRWXUGO, &index);
10326         if (IS_ERR(inode)) {
10327                 err = PTR_ERR(inode);
10328                 inode = NULL;
10329                 goto out_unlock;
10330         }
10331
10332         /*
10333         * If the active LSM wants to access the inode during
10334         * d_instantiate it needs these. Smack checks to see
10335         * if the filesystem supports xattrs by looking at the
10336         * ops vector.
10337         */
10338         inode->i_fop = &btrfs_file_operations;
10339         inode->i_op = &btrfs_file_inode_operations;
10340         inode->i_mapping->a_ops = &btrfs_aops;
10341         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10342
10343         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
10344         if (err)
10345                 goto out_unlock;
10346
10347         path = btrfs_alloc_path();
10348         if (!path) {
10349                 err = -ENOMEM;
10350                 goto out_unlock;
10351         }
10352         key.objectid = btrfs_ino(BTRFS_I(inode));
10353         key.offset = 0;
10354         key.type = BTRFS_EXTENT_DATA_KEY;
10355         datasize = btrfs_file_extent_calc_inline_size(name_len);
10356         err = btrfs_insert_empty_item(trans, root, path, &key,
10357                                       datasize);
10358         if (err) {
10359                 btrfs_free_path(path);
10360                 goto out_unlock;
10361         }
10362         leaf = path->nodes[0];
10363         ei = btrfs_item_ptr(leaf, path->slots[0],
10364                             struct btrfs_file_extent_item);
10365         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
10366         btrfs_set_file_extent_type(leaf, ei,
10367                                    BTRFS_FILE_EXTENT_INLINE);
10368         btrfs_set_file_extent_encryption(leaf, ei, 0);
10369         btrfs_set_file_extent_compression(leaf, ei, 0);
10370         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
10371         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
10372
10373         ptr = btrfs_file_extent_inline_start(ei);
10374         write_extent_buffer(leaf, symname, ptr, name_len);
10375         btrfs_mark_buffer_dirty(leaf);
10376         btrfs_free_path(path);
10377
10378         inode->i_op = &btrfs_symlink_inode_operations;
10379         inode_nohighmem(inode);
10380         inode->i_mapping->a_ops = &btrfs_symlink_aops;
10381         inode_set_bytes(inode, name_len);
10382         btrfs_i_size_write(BTRFS_I(inode), name_len);
10383         err = btrfs_update_inode(trans, root, inode);
10384         /*
10385          * Last step, add directory indexes for our symlink inode. This is the
10386          * last step to avoid extra cleanup of these indexes if an error happens
10387          * elsewhere above.
10388          */
10389         if (!err)
10390                 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
10391                                 BTRFS_I(inode), 0, index);
10392         if (err)
10393                 goto out_unlock;
10394
10395         d_instantiate_new(dentry, inode);
10396
10397 out_unlock:
10398         btrfs_end_transaction(trans);
10399         if (err && inode) {
10400                 inode_dec_link_count(inode);
10401                 discard_new_inode(inode);
10402         }
10403         btrfs_btree_balance_dirty(fs_info);
10404         return err;
10405 }
10406
10407 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10408                                        u64 start, u64 num_bytes, u64 min_size,
10409                                        loff_t actual_len, u64 *alloc_hint,
10410                                        struct btrfs_trans_handle *trans)
10411 {
10412         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
10413         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
10414         struct extent_map *em;
10415         struct btrfs_root *root = BTRFS_I(inode)->root;
10416         struct btrfs_key ins;
10417         u64 cur_offset = start;
10418         u64 clear_offset = start;
10419         u64 i_size;
10420         u64 cur_bytes;
10421         u64 last_alloc = (u64)-1;
10422         int ret = 0;
10423         bool own_trans = true;
10424         u64 end = start + num_bytes - 1;
10425
10426         if (trans)
10427                 own_trans = false;
10428         while (num_bytes > 0) {
10429                 if (own_trans) {
10430                         trans = btrfs_start_transaction(root, 3);
10431                         if (IS_ERR(trans)) {
10432                                 ret = PTR_ERR(trans);
10433                                 break;
10434                         }
10435                 }
10436
10437                 cur_bytes = min_t(u64, num_bytes, SZ_256M);
10438                 cur_bytes = max(cur_bytes, min_size);
10439                 /*
10440                  * If we are severely fragmented we could end up with really
10441                  * small allocations, so if the allocator is returning small
10442                  * chunks lets make its job easier by only searching for those
10443                  * sized chunks.
10444                  */
10445                 cur_bytes = min(cur_bytes, last_alloc);
10446                 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
10447                                 min_size, 0, *alloc_hint, &ins, 1, 0);
10448                 if (ret) {
10449                         if (own_trans)
10450                                 btrfs_end_transaction(trans);
10451                         break;
10452                 }
10453
10454                 /*
10455                  * We've reserved this space, and thus converted it from
10456                  * ->bytes_may_use to ->bytes_reserved.  Any error that happens
10457                  * from here on out we will only need to clear our reservation
10458                  * for the remaining unreserved area, so advance our
10459                  * clear_offset by our extent size.
10460                  */
10461                 clear_offset += ins.offset;
10462                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10463
10464                 last_alloc = ins.offset;
10465                 ret = insert_reserved_file_extent(trans, inode,
10466                                                   cur_offset, ins.objectid,
10467                                                   ins.offset, ins.offset,
10468                                                   ins.offset, 0, 0, 0,
10469                                                   BTRFS_FILE_EXTENT_PREALLOC);
10470                 if (ret) {
10471                         btrfs_free_reserved_extent(fs_info, ins.objectid,
10472                                                    ins.offset, 0);
10473                         btrfs_abort_transaction(trans, ret);
10474                         if (own_trans)
10475                                 btrfs_end_transaction(trans);
10476                         break;
10477                 }
10478
10479                 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10480                                         cur_offset + ins.offset -1, 0);
10481
10482                 em = alloc_extent_map();
10483                 if (!em) {
10484                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
10485                                 &BTRFS_I(inode)->runtime_flags);
10486                         goto next;
10487                 }
10488
10489                 em->start = cur_offset;
10490                 em->orig_start = cur_offset;
10491                 em->len = ins.offset;
10492                 em->block_start = ins.objectid;
10493                 em->block_len = ins.offset;
10494                 em->orig_block_len = ins.offset;
10495                 em->ram_bytes = ins.offset;
10496                 em->bdev = fs_info->fs_devices->latest_bdev;
10497                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10498                 em->generation = trans->transid;
10499
10500                 while (1) {
10501                         write_lock(&em_tree->lock);
10502                         ret = add_extent_mapping(em_tree, em, 1);
10503                         write_unlock(&em_tree->lock);
10504                         if (ret != -EEXIST)
10505                                 break;
10506                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10507                                                 cur_offset + ins.offset - 1,
10508                                                 0);
10509                 }
10510                 free_extent_map(em);
10511 next:
10512                 num_bytes -= ins.offset;
10513                 cur_offset += ins.offset;
10514                 *alloc_hint = ins.objectid + ins.offset;
10515
10516                 inode_inc_iversion(inode);
10517                 inode->i_ctime = current_time(inode);
10518                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10519                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10520                     (actual_len > inode->i_size) &&
10521                     (cur_offset > inode->i_size)) {
10522                         if (cur_offset > actual_len)
10523                                 i_size = actual_len;
10524                         else
10525                                 i_size = cur_offset;
10526                         i_size_write(inode, i_size);
10527                         btrfs_ordered_update_i_size(inode, i_size, NULL);
10528                 }
10529
10530                 ret = btrfs_update_inode(trans, root, inode);
10531
10532                 if (ret) {
10533                         btrfs_abort_transaction(trans, ret);
10534                         if (own_trans)
10535                                 btrfs_end_transaction(trans);
10536                         break;
10537                 }
10538
10539                 if (own_trans)
10540                         btrfs_end_transaction(trans);
10541         }
10542         if (clear_offset < end)
10543                 btrfs_free_reserved_data_space(inode, NULL, clear_offset,
10544                         end - clear_offset + 1);
10545         return ret;
10546 }
10547
10548 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10549                               u64 start, u64 num_bytes, u64 min_size,
10550                               loff_t actual_len, u64 *alloc_hint)
10551 {
10552         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10553                                            min_size, actual_len, alloc_hint,
10554                                            NULL);
10555 }
10556
10557 int btrfs_prealloc_file_range_trans(struct inode *inode,
10558                                     struct btrfs_trans_handle *trans, int mode,
10559                                     u64 start, u64 num_bytes, u64 min_size,
10560                                     loff_t actual_len, u64 *alloc_hint)
10561 {
10562         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10563                                            min_size, actual_len, alloc_hint, trans);
10564 }
10565
10566 static int btrfs_set_page_dirty(struct page *page)
10567 {
10568         return __set_page_dirty_nobuffers(page);
10569 }
10570
10571 static int btrfs_permission(struct inode *inode, int mask)
10572 {
10573         struct btrfs_root *root = BTRFS_I(inode)->root;
10574         umode_t mode = inode->i_mode;
10575
10576         if (mask & MAY_WRITE &&
10577             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10578                 if (btrfs_root_readonly(root))
10579                         return -EROFS;
10580                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10581                         return -EACCES;
10582         }
10583         return generic_permission(inode, mask);
10584 }
10585
10586 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
10587 {
10588         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10589         struct btrfs_trans_handle *trans;
10590         struct btrfs_root *root = BTRFS_I(dir)->root;
10591         struct inode *inode = NULL;
10592         u64 objectid;
10593         u64 index;
10594         int ret = 0;
10595
10596         /*
10597          * 5 units required for adding orphan entry
10598          */
10599         trans = btrfs_start_transaction(root, 5);
10600         if (IS_ERR(trans))
10601                 return PTR_ERR(trans);
10602
10603         ret = btrfs_find_free_ino(root, &objectid);
10604         if (ret)
10605                 goto out;
10606
10607         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
10608                         btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
10609         if (IS_ERR(inode)) {
10610                 ret = PTR_ERR(inode);
10611                 inode = NULL;
10612                 goto out;
10613         }
10614
10615         inode->i_fop = &btrfs_file_operations;
10616         inode->i_op = &btrfs_file_inode_operations;
10617
10618         inode->i_mapping->a_ops = &btrfs_aops;
10619         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10620
10621         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
10622         if (ret)
10623                 goto out;
10624
10625         ret = btrfs_update_inode(trans, root, inode);
10626         if (ret)
10627                 goto out;
10628         ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10629         if (ret)
10630                 goto out;
10631
10632         /*
10633          * We set number of links to 0 in btrfs_new_inode(), and here we set
10634          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10635          * through:
10636          *
10637          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10638          */
10639         set_nlink(inode, 1);
10640         d_tmpfile(dentry, inode);
10641         unlock_new_inode(inode);
10642         mark_inode_dirty(inode);
10643 out:
10644         btrfs_end_transaction(trans);
10645         if (ret && inode)
10646                 discard_new_inode(inode);
10647         btrfs_btree_balance_dirty(fs_info);
10648         return ret;
10649 }
10650
10651 __attribute__((const))
10652 static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
10653 {
10654         return -EAGAIN;
10655 }
10656
10657 static void btrfs_check_extent_io_range(void *private_data, const char *caller,
10658                                         u64 start, u64 end)
10659 {
10660         struct inode *inode = private_data;
10661         u64 isize;
10662
10663         isize = i_size_read(inode);
10664         if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
10665                 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
10666                     "%s: ino %llu isize %llu odd range [%llu,%llu]",
10667                         caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
10668         }
10669 }
10670
10671 void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
10672 {
10673         struct inode *inode = tree->private_data;
10674         unsigned long index = start >> PAGE_SHIFT;
10675         unsigned long end_index = end >> PAGE_SHIFT;
10676         struct page *page;
10677
10678         while (index <= end_index) {
10679                 page = find_get_page(inode->i_mapping, index);
10680                 ASSERT(page); /* Pages should be in the extent_io_tree */
10681                 set_page_writeback(page);
10682                 put_page(page);
10683                 index++;
10684         }
10685 }
10686
10687 static const struct inode_operations btrfs_dir_inode_operations = {
10688         .getattr        = btrfs_getattr,
10689         .lookup         = btrfs_lookup,
10690         .create         = btrfs_create,
10691         .unlink         = btrfs_unlink,
10692         .link           = btrfs_link,
10693         .mkdir          = btrfs_mkdir,
10694         .rmdir          = btrfs_rmdir,
10695         .rename         = btrfs_rename2,
10696         .symlink        = btrfs_symlink,
10697         .setattr        = btrfs_setattr,
10698         .mknod          = btrfs_mknod,
10699         .listxattr      = btrfs_listxattr,
10700         .permission     = btrfs_permission,
10701         .get_acl        = btrfs_get_acl,
10702         .set_acl        = btrfs_set_acl,
10703         .update_time    = btrfs_update_time,
10704         .tmpfile        = btrfs_tmpfile,
10705 };
10706 static const struct inode_operations btrfs_dir_ro_inode_operations = {
10707         .lookup         = btrfs_lookup,
10708         .permission     = btrfs_permission,
10709         .update_time    = btrfs_update_time,
10710 };
10711
10712 static const struct file_operations btrfs_dir_file_operations = {
10713         .llseek         = generic_file_llseek,
10714         .read           = generic_read_dir,
10715         .iterate_shared = btrfs_real_readdir,
10716         .open           = btrfs_opendir,
10717         .unlocked_ioctl = btrfs_ioctl,
10718 #ifdef CONFIG_COMPAT
10719         .compat_ioctl   = btrfs_compat_ioctl,
10720 #endif
10721         .release        = btrfs_release_file,
10722         .fsync          = btrfs_sync_file,
10723 };
10724
10725 static const struct extent_io_ops btrfs_extent_io_ops = {
10726         /* mandatory callbacks */
10727         .submit_bio_hook = btrfs_submit_bio_hook,
10728         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10729         .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
10730
10731         /* optional callbacks */
10732         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10733         .writepage_start_hook = btrfs_writepage_start_hook,
10734         .set_bit_hook = btrfs_set_bit_hook,
10735         .clear_bit_hook = btrfs_clear_bit_hook,
10736         .merge_extent_hook = btrfs_merge_extent_hook,
10737         .split_extent_hook = btrfs_split_extent_hook,
10738         .check_extent_io_range = btrfs_check_extent_io_range,
10739 };
10740
10741 /*
10742  * btrfs doesn't support the bmap operation because swapfiles
10743  * use bmap to make a mapping of extents in the file.  They assume
10744  * these extents won't change over the life of the file and they
10745  * use the bmap result to do IO directly to the drive.
10746  *
10747  * the btrfs bmap call would return logical addresses that aren't
10748  * suitable for IO and they also will change frequently as COW
10749  * operations happen.  So, swapfile + btrfs == corruption.
10750  *
10751  * For now we're avoiding this by dropping bmap.
10752  */
10753 static const struct address_space_operations btrfs_aops = {
10754         .readpage       = btrfs_readpage,
10755         .writepage      = btrfs_writepage,
10756         .writepages     = btrfs_writepages,
10757         .readpages      = btrfs_readpages,
10758         .direct_IO      = btrfs_direct_IO,
10759         .invalidatepage = btrfs_invalidatepage,
10760         .releasepage    = btrfs_releasepage,
10761         .set_page_dirty = btrfs_set_page_dirty,
10762         .error_remove_page = generic_error_remove_page,
10763 };
10764
10765 static const struct address_space_operations btrfs_symlink_aops = {
10766         .readpage       = btrfs_readpage,
10767         .writepage      = btrfs_writepage,
10768         .invalidatepage = btrfs_invalidatepage,
10769         .releasepage    = btrfs_releasepage,
10770 };
10771
10772 static const struct inode_operations btrfs_file_inode_operations = {
10773         .getattr        = btrfs_getattr,
10774         .setattr        = btrfs_setattr,
10775         .listxattr      = btrfs_listxattr,
10776         .permission     = btrfs_permission,
10777         .fiemap         = btrfs_fiemap,
10778         .get_acl        = btrfs_get_acl,
10779         .set_acl        = btrfs_set_acl,
10780         .update_time    = btrfs_update_time,
10781 };
10782 static const struct inode_operations btrfs_special_inode_operations = {
10783         .getattr        = btrfs_getattr,
10784         .setattr        = btrfs_setattr,
10785         .permission     = btrfs_permission,
10786         .listxattr      = btrfs_listxattr,
10787         .get_acl        = btrfs_get_acl,
10788         .set_acl        = btrfs_set_acl,
10789         .update_time    = btrfs_update_time,
10790 };
10791 static const struct inode_operations btrfs_symlink_inode_operations = {
10792         .get_link       = page_get_link,
10793         .getattr        = btrfs_getattr,
10794         .setattr        = btrfs_setattr,
10795         .permission     = btrfs_permission,
10796         .listxattr      = btrfs_listxattr,
10797         .update_time    = btrfs_update_time,
10798 };
10799
10800 const struct dentry_operations btrfs_dentry_operations = {
10801         .d_delete       = btrfs_dentry_delete,
10802 };