GNU Linux-libre 4.19.264-gnu1
[releases.git] / fs / btrfs / volumes.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/buffer_head.h>
11 #include <linux/blkdev.h>
12 #include <linux/ratelimit.h>
13 #include <linux/kthread.h>
14 #include <linux/raid/pq.h>
15 #include <linux/semaphore.h>
16 #include <linux/uuid.h>
17 #include <linux/list_sort.h>
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "math.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32
33 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
34         [BTRFS_RAID_RAID10] = {
35                 .sub_stripes    = 2,
36                 .dev_stripes    = 1,
37                 .devs_max       = 0,    /* 0 == as many as possible */
38                 .devs_min       = 4,
39                 .tolerated_failures = 1,
40                 .devs_increment = 2,
41                 .ncopies        = 2,
42                 .raid_name      = "raid10",
43                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID10,
44                 .mindev_error   = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
45         },
46         [BTRFS_RAID_RAID1] = {
47                 .sub_stripes    = 1,
48                 .dev_stripes    = 1,
49                 .devs_max       = 2,
50                 .devs_min       = 2,
51                 .tolerated_failures = 1,
52                 .devs_increment = 2,
53                 .ncopies        = 2,
54                 .raid_name      = "raid1",
55                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1,
56                 .mindev_error   = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
57         },
58         [BTRFS_RAID_DUP] = {
59                 .sub_stripes    = 1,
60                 .dev_stripes    = 2,
61                 .devs_max       = 1,
62                 .devs_min       = 1,
63                 .tolerated_failures = 0,
64                 .devs_increment = 1,
65                 .ncopies        = 2,
66                 .raid_name      = "dup",
67                 .bg_flag        = BTRFS_BLOCK_GROUP_DUP,
68                 .mindev_error   = 0,
69         },
70         [BTRFS_RAID_RAID0] = {
71                 .sub_stripes    = 1,
72                 .dev_stripes    = 1,
73                 .devs_max       = 0,
74                 .devs_min       = 2,
75                 .tolerated_failures = 0,
76                 .devs_increment = 1,
77                 .ncopies        = 1,
78                 .raid_name      = "raid0",
79                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID0,
80                 .mindev_error   = 0,
81         },
82         [BTRFS_RAID_SINGLE] = {
83                 .sub_stripes    = 1,
84                 .dev_stripes    = 1,
85                 .devs_max       = 1,
86                 .devs_min       = 1,
87                 .tolerated_failures = 0,
88                 .devs_increment = 1,
89                 .ncopies        = 1,
90                 .raid_name      = "single",
91                 .bg_flag        = 0,
92                 .mindev_error   = 0,
93         },
94         [BTRFS_RAID_RAID5] = {
95                 .sub_stripes    = 1,
96                 .dev_stripes    = 1,
97                 .devs_max       = 0,
98                 .devs_min       = 2,
99                 .tolerated_failures = 1,
100                 .devs_increment = 1,
101                 .ncopies        = 1,
102                 .raid_name      = "raid5",
103                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID5,
104                 .mindev_error   = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
105         },
106         [BTRFS_RAID_RAID6] = {
107                 .sub_stripes    = 1,
108                 .dev_stripes    = 1,
109                 .devs_max       = 0,
110                 .devs_min       = 3,
111                 .tolerated_failures = 2,
112                 .devs_increment = 1,
113                 .ncopies        = 1,
114                 .raid_name      = "raid6",
115                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID6,
116                 .mindev_error   = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
117         },
118 };
119
120 const char *get_raid_name(enum btrfs_raid_types type)
121 {
122         if (type >= BTRFS_NR_RAID_TYPES)
123                 return NULL;
124
125         return btrfs_raid_array[type].raid_name;
126 }
127
128 static int init_first_rw_device(struct btrfs_trans_handle *trans,
129                                 struct btrfs_fs_info *fs_info);
130 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
131 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
132 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
133 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
134 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
135                              enum btrfs_map_op op,
136                              u64 logical, u64 *length,
137                              struct btrfs_bio **bbio_ret,
138                              int mirror_num, int need_raid_map);
139
140 /*
141  * Device locking
142  * ==============
143  *
144  * There are several mutexes that protect manipulation of devices and low-level
145  * structures like chunks but not block groups, extents or files
146  *
147  * uuid_mutex (global lock)
148  * ------------------------
149  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
150  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
151  * device) or requested by the device= mount option
152  *
153  * the mutex can be very coarse and can cover long-running operations
154  *
155  * protects: updates to fs_devices counters like missing devices, rw devices,
156  * seeding, structure cloning, openning/closing devices at mount/umount time
157  *
158  * global::fs_devs - add, remove, updates to the global list
159  *
160  * does not protect: manipulation of the fs_devices::devices list in general
161  * but in mount context it could be used to exclude list modifications by eg.
162  * scan ioctl
163  *
164  * btrfs_device::name - renames (write side), read is RCU
165  *
166  * fs_devices::device_list_mutex (per-fs, with RCU)
167  * ------------------------------------------------
168  * protects updates to fs_devices::devices, ie. adding and deleting
169  *
170  * simple list traversal with read-only actions can be done with RCU protection
171  *
172  * may be used to exclude some operations from running concurrently without any
173  * modifications to the list (see write_all_supers)
174  *
175  * Is not required at mount and close times, because our device list is
176  * protected by the uuid_mutex at that point.
177  *
178  * balance_mutex
179  * -------------
180  * protects balance structures (status, state) and context accessed from
181  * several places (internally, ioctl)
182  *
183  * chunk_mutex
184  * -----------
185  * protects chunks, adding or removing during allocation, trim or when a new
186  * device is added/removed
187  *
188  * cleaner_mutex
189  * -------------
190  * a big lock that is held by the cleaner thread and prevents running subvolume
191  * cleaning together with relocation or delayed iputs
192  *
193  *
194  * Lock nesting
195  * ============
196  *
197  * uuid_mutex
198  *   volume_mutex
199  *     device_list_mutex
200  *       chunk_mutex
201  *     balance_mutex
202  *
203  *
204  * Exclusive operations, BTRFS_FS_EXCL_OP
205  * ======================================
206  *
207  * Maintains the exclusivity of the following operations that apply to the
208  * whole filesystem and cannot run in parallel.
209  *
210  * - Balance (*)
211  * - Device add
212  * - Device remove
213  * - Device replace (*)
214  * - Resize
215  *
216  * The device operations (as above) can be in one of the following states:
217  *
218  * - Running state
219  * - Paused state
220  * - Completed state
221  *
222  * Only device operations marked with (*) can go into the Paused state for the
223  * following reasons:
224  *
225  * - ioctl (only Balance can be Paused through ioctl)
226  * - filesystem remounted as read-only
227  * - filesystem unmounted and mounted as read-only
228  * - system power-cycle and filesystem mounted as read-only
229  * - filesystem or device errors leading to forced read-only
230  *
231  * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
232  * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
233  * A device operation in Paused or Running state can be canceled or resumed
234  * either by ioctl (Balance only) or when remounted as read-write.
235  * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
236  * completed.
237  */
238
239 DEFINE_MUTEX(uuid_mutex);
240 static LIST_HEAD(fs_uuids);
241 struct list_head *btrfs_get_fs_uuids(void)
242 {
243         return &fs_uuids;
244 }
245
246 /*
247  * alloc_fs_devices - allocate struct btrfs_fs_devices
248  * @fsid:       if not NULL, copy the uuid to fs_devices::fsid
249  *
250  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
251  * The returned struct is not linked onto any lists and can be destroyed with
252  * kfree() right away.
253  */
254 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
255 {
256         struct btrfs_fs_devices *fs_devs;
257
258         fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
259         if (!fs_devs)
260                 return ERR_PTR(-ENOMEM);
261
262         mutex_init(&fs_devs->device_list_mutex);
263
264         INIT_LIST_HEAD(&fs_devs->devices);
265         INIT_LIST_HEAD(&fs_devs->resized_devices);
266         INIT_LIST_HEAD(&fs_devs->alloc_list);
267         INIT_LIST_HEAD(&fs_devs->fs_list);
268         if (fsid)
269                 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
270
271         return fs_devs;
272 }
273
274 void btrfs_free_device(struct btrfs_device *device)
275 {
276         rcu_string_free(device->name);
277         bio_put(device->flush_bio);
278         kfree(device);
279 }
280
281 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
282 {
283         struct btrfs_device *device;
284         WARN_ON(fs_devices->opened);
285         while (!list_empty(&fs_devices->devices)) {
286                 device = list_entry(fs_devices->devices.next,
287                                     struct btrfs_device, dev_list);
288                 list_del(&device->dev_list);
289                 btrfs_free_device(device);
290         }
291         kfree(fs_devices);
292 }
293
294 static void btrfs_kobject_uevent(struct block_device *bdev,
295                                  enum kobject_action action)
296 {
297         int ret;
298
299         ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
300         if (ret)
301                 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
302                         action,
303                         kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
304                         &disk_to_dev(bdev->bd_disk)->kobj);
305 }
306
307 void __exit btrfs_cleanup_fs_uuids(void)
308 {
309         struct btrfs_fs_devices *fs_devices;
310
311         while (!list_empty(&fs_uuids)) {
312                 fs_devices = list_entry(fs_uuids.next,
313                                         struct btrfs_fs_devices, fs_list);
314                 list_del(&fs_devices->fs_list);
315                 free_fs_devices(fs_devices);
316         }
317 }
318
319 /*
320  * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
321  * Returned struct is not linked onto any lists and must be destroyed using
322  * btrfs_free_device.
323  */
324 static struct btrfs_device *__alloc_device(void)
325 {
326         struct btrfs_device *dev;
327
328         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
329         if (!dev)
330                 return ERR_PTR(-ENOMEM);
331
332         /*
333          * Preallocate a bio that's always going to be used for flushing device
334          * barriers and matches the device lifespan
335          */
336         dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
337         if (!dev->flush_bio) {
338                 kfree(dev);
339                 return ERR_PTR(-ENOMEM);
340         }
341
342         INIT_LIST_HEAD(&dev->dev_list);
343         INIT_LIST_HEAD(&dev->dev_alloc_list);
344         INIT_LIST_HEAD(&dev->resized_list);
345
346         spin_lock_init(&dev->io_lock);
347
348         atomic_set(&dev->reada_in_flight, 0);
349         atomic_set(&dev->dev_stats_ccnt, 0);
350         btrfs_device_data_ordered_init(dev);
351         INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
352         INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
353
354         return dev;
355 }
356
357 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
358 {
359         struct btrfs_fs_devices *fs_devices;
360
361         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
362                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
363                         return fs_devices;
364         }
365         return NULL;
366 }
367
368 static int
369 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
370                       int flush, struct block_device **bdev,
371                       struct buffer_head **bh)
372 {
373         int ret;
374
375         *bdev = blkdev_get_by_path(device_path, flags, holder);
376
377         if (IS_ERR(*bdev)) {
378                 ret = PTR_ERR(*bdev);
379                 goto error;
380         }
381
382         if (flush)
383                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
384         ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
385         if (ret) {
386                 blkdev_put(*bdev, flags);
387                 goto error;
388         }
389         invalidate_bdev(*bdev);
390         *bh = btrfs_read_dev_super(*bdev);
391         if (IS_ERR(*bh)) {
392                 ret = PTR_ERR(*bh);
393                 blkdev_put(*bdev, flags);
394                 goto error;
395         }
396
397         return 0;
398
399 error:
400         *bdev = NULL;
401         *bh = NULL;
402         return ret;
403 }
404
405 static void requeue_list(struct btrfs_pending_bios *pending_bios,
406                         struct bio *head, struct bio *tail)
407 {
408
409         struct bio *old_head;
410
411         old_head = pending_bios->head;
412         pending_bios->head = head;
413         if (pending_bios->tail)
414                 tail->bi_next = old_head;
415         else
416                 pending_bios->tail = tail;
417 }
418
419 /*
420  * we try to collect pending bios for a device so we don't get a large
421  * number of procs sending bios down to the same device.  This greatly
422  * improves the schedulers ability to collect and merge the bios.
423  *
424  * But, it also turns into a long list of bios to process and that is sure
425  * to eventually make the worker thread block.  The solution here is to
426  * make some progress and then put this work struct back at the end of
427  * the list if the block device is congested.  This way, multiple devices
428  * can make progress from a single worker thread.
429  */
430 static noinline void run_scheduled_bios(struct btrfs_device *device)
431 {
432         struct btrfs_fs_info *fs_info = device->fs_info;
433         struct bio *pending;
434         struct backing_dev_info *bdi;
435         struct btrfs_pending_bios *pending_bios;
436         struct bio *tail;
437         struct bio *cur;
438         int again = 0;
439         unsigned long num_run;
440         unsigned long batch_run = 0;
441         unsigned long last_waited = 0;
442         int force_reg = 0;
443         int sync_pending = 0;
444         struct blk_plug plug;
445
446         /*
447          * this function runs all the bios we've collected for
448          * a particular device.  We don't want to wander off to
449          * another device without first sending all of these down.
450          * So, setup a plug here and finish it off before we return
451          */
452         blk_start_plug(&plug);
453
454         bdi = device->bdev->bd_bdi;
455
456 loop:
457         spin_lock(&device->io_lock);
458
459 loop_lock:
460         num_run = 0;
461
462         /* take all the bios off the list at once and process them
463          * later on (without the lock held).  But, remember the
464          * tail and other pointers so the bios can be properly reinserted
465          * into the list if we hit congestion
466          */
467         if (!force_reg && device->pending_sync_bios.head) {
468                 pending_bios = &device->pending_sync_bios;
469                 force_reg = 1;
470         } else {
471                 pending_bios = &device->pending_bios;
472                 force_reg = 0;
473         }
474
475         pending = pending_bios->head;
476         tail = pending_bios->tail;
477         WARN_ON(pending && !tail);
478
479         /*
480          * if pending was null this time around, no bios need processing
481          * at all and we can stop.  Otherwise it'll loop back up again
482          * and do an additional check so no bios are missed.
483          *
484          * device->running_pending is used to synchronize with the
485          * schedule_bio code.
486          */
487         if (device->pending_sync_bios.head == NULL &&
488             device->pending_bios.head == NULL) {
489                 again = 0;
490                 device->running_pending = 0;
491         } else {
492                 again = 1;
493                 device->running_pending = 1;
494         }
495
496         pending_bios->head = NULL;
497         pending_bios->tail = NULL;
498
499         spin_unlock(&device->io_lock);
500
501         while (pending) {
502
503                 rmb();
504                 /* we want to work on both lists, but do more bios on the
505                  * sync list than the regular list
506                  */
507                 if ((num_run > 32 &&
508                     pending_bios != &device->pending_sync_bios &&
509                     device->pending_sync_bios.head) ||
510                    (num_run > 64 && pending_bios == &device->pending_sync_bios &&
511                     device->pending_bios.head)) {
512                         spin_lock(&device->io_lock);
513                         requeue_list(pending_bios, pending, tail);
514                         goto loop_lock;
515                 }
516
517                 cur = pending;
518                 pending = pending->bi_next;
519                 cur->bi_next = NULL;
520
521                 BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
522
523                 /*
524                  * if we're doing the sync list, record that our
525                  * plug has some sync requests on it
526                  *
527                  * If we're doing the regular list and there are
528                  * sync requests sitting around, unplug before
529                  * we add more
530                  */
531                 if (pending_bios == &device->pending_sync_bios) {
532                         sync_pending = 1;
533                 } else if (sync_pending) {
534                         blk_finish_plug(&plug);
535                         blk_start_plug(&plug);
536                         sync_pending = 0;
537                 }
538
539                 btrfsic_submit_bio(cur);
540                 num_run++;
541                 batch_run++;
542
543                 cond_resched();
544
545                 /*
546                  * we made progress, there is more work to do and the bdi
547                  * is now congested.  Back off and let other work structs
548                  * run instead
549                  */
550                 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
551                     fs_info->fs_devices->open_devices > 1) {
552                         struct io_context *ioc;
553
554                         ioc = current->io_context;
555
556                         /*
557                          * the main goal here is that we don't want to
558                          * block if we're going to be able to submit
559                          * more requests without blocking.
560                          *
561                          * This code does two great things, it pokes into
562                          * the elevator code from a filesystem _and_
563                          * it makes assumptions about how batching works.
564                          */
565                         if (ioc && ioc->nr_batch_requests > 0 &&
566                             time_before(jiffies, ioc->last_waited + HZ/50UL) &&
567                             (last_waited == 0 ||
568                              ioc->last_waited == last_waited)) {
569                                 /*
570                                  * we want to go through our batch of
571                                  * requests and stop.  So, we copy out
572                                  * the ioc->last_waited time and test
573                                  * against it before looping
574                                  */
575                                 last_waited = ioc->last_waited;
576                                 cond_resched();
577                                 continue;
578                         }
579                         spin_lock(&device->io_lock);
580                         requeue_list(pending_bios, pending, tail);
581                         device->running_pending = 1;
582
583                         spin_unlock(&device->io_lock);
584                         btrfs_queue_work(fs_info->submit_workers,
585                                          &device->work);
586                         goto done;
587                 }
588         }
589
590         cond_resched();
591         if (again)
592                 goto loop;
593
594         spin_lock(&device->io_lock);
595         if (device->pending_bios.head || device->pending_sync_bios.head)
596                 goto loop_lock;
597         spin_unlock(&device->io_lock);
598
599 done:
600         blk_finish_plug(&plug);
601 }
602
603 static void pending_bios_fn(struct btrfs_work *work)
604 {
605         struct btrfs_device *device;
606
607         device = container_of(work, struct btrfs_device, work);
608         run_scheduled_bios(device);
609 }
610
611 /*
612  *  Search and remove all stale (devices which are not mounted) devices.
613  *  When both inputs are NULL, it will search and release all stale devices.
614  *  path:       Optional. When provided will it release all unmounted devices
615  *              matching this path only.
616  *  skip_dev:   Optional. Will skip this device when searching for the stale
617  *              devices.
618  */
619 static void btrfs_free_stale_devices(const char *path,
620                                      struct btrfs_device *skip_device)
621 {
622         struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
623         struct btrfs_device *device, *tmp_device;
624
625         list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
626                 mutex_lock(&fs_devices->device_list_mutex);
627                 if (fs_devices->opened) {
628                         mutex_unlock(&fs_devices->device_list_mutex);
629                         continue;
630                 }
631
632                 list_for_each_entry_safe(device, tmp_device,
633                                          &fs_devices->devices, dev_list) {
634                         int not_found = 0;
635
636                         if (skip_device && skip_device == device)
637                                 continue;
638                         if (path && !device->name)
639                                 continue;
640
641                         rcu_read_lock();
642                         if (path)
643                                 not_found = strcmp(rcu_str_deref(device->name),
644                                                    path);
645                         rcu_read_unlock();
646                         if (not_found)
647                                 continue;
648
649                         /* delete the stale device */
650                         fs_devices->num_devices--;
651                         list_del(&device->dev_list);
652                         btrfs_free_device(device);
653
654                         if (fs_devices->num_devices == 0)
655                                 break;
656                 }
657                 mutex_unlock(&fs_devices->device_list_mutex);
658                 if (fs_devices->num_devices == 0) {
659                         btrfs_sysfs_remove_fsid(fs_devices);
660                         list_del(&fs_devices->fs_list);
661                         free_fs_devices(fs_devices);
662                 }
663         }
664 }
665
666 /*
667  * This is only used on mount, and we are protected from competing things
668  * messing with our fs_devices by the uuid_mutex, thus we do not need the
669  * fs_devices->device_list_mutex here.
670  */
671 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
672                         struct btrfs_device *device, fmode_t flags,
673                         void *holder)
674 {
675         struct request_queue *q;
676         struct block_device *bdev;
677         struct buffer_head *bh;
678         struct btrfs_super_block *disk_super;
679         u64 devid;
680         int ret;
681
682         if (device->bdev)
683                 return -EINVAL;
684         if (!device->name)
685                 return -EINVAL;
686
687         ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
688                                     &bdev, &bh);
689         if (ret)
690                 return ret;
691
692         disk_super = (struct btrfs_super_block *)bh->b_data;
693         devid = btrfs_stack_device_id(&disk_super->dev_item);
694         if (devid != device->devid)
695                 goto error_brelse;
696
697         if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
698                 goto error_brelse;
699
700         device->generation = btrfs_super_generation(disk_super);
701
702         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
703                 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
704                 fs_devices->seeding = 1;
705         } else {
706                 if (bdev_read_only(bdev))
707                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
708                 else
709                         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
710         }
711
712         q = bdev_get_queue(bdev);
713         if (!blk_queue_nonrot(q))
714                 fs_devices->rotating = 1;
715
716         device->bdev = bdev;
717         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
718         device->mode = flags;
719
720         fs_devices->open_devices++;
721         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
722             device->devid != BTRFS_DEV_REPLACE_DEVID) {
723                 fs_devices->rw_devices++;
724                 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
725         }
726         brelse(bh);
727
728         return 0;
729
730 error_brelse:
731         brelse(bh);
732         blkdev_put(bdev, flags);
733
734         return -EINVAL;
735 }
736
737 /*
738  * Add new device to list of registered devices
739  *
740  * Returns:
741  * device pointer which was just added or updated when successful
742  * error pointer when failed
743  */
744 static noinline struct btrfs_device *device_list_add(const char *path,
745                            struct btrfs_super_block *disk_super,
746                            bool *new_device_added)
747 {
748         struct btrfs_device *device;
749         struct btrfs_fs_devices *fs_devices;
750         struct rcu_string *name;
751         u64 found_transid = btrfs_super_generation(disk_super);
752         u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
753
754         fs_devices = find_fsid(disk_super->fsid);
755         if (!fs_devices) {
756                 fs_devices = alloc_fs_devices(disk_super->fsid);
757                 if (IS_ERR(fs_devices))
758                         return ERR_CAST(fs_devices);
759
760                 mutex_lock(&fs_devices->device_list_mutex);
761                 list_add(&fs_devices->fs_list, &fs_uuids);
762
763                 device = NULL;
764         } else {
765                 mutex_lock(&fs_devices->device_list_mutex);
766                 device = btrfs_find_device(fs_devices, devid,
767                                 disk_super->dev_item.uuid, NULL, false);
768         }
769
770         if (!device) {
771                 if (fs_devices->opened) {
772                         mutex_unlock(&fs_devices->device_list_mutex);
773                         return ERR_PTR(-EBUSY);
774                 }
775
776                 device = btrfs_alloc_device(NULL, &devid,
777                                             disk_super->dev_item.uuid);
778                 if (IS_ERR(device)) {
779                         mutex_unlock(&fs_devices->device_list_mutex);
780                         /* we can safely leave the fs_devices entry around */
781                         return device;
782                 }
783
784                 name = rcu_string_strdup(path, GFP_NOFS);
785                 if (!name) {
786                         btrfs_free_device(device);
787                         mutex_unlock(&fs_devices->device_list_mutex);
788                         return ERR_PTR(-ENOMEM);
789                 }
790                 rcu_assign_pointer(device->name, name);
791
792                 list_add_rcu(&device->dev_list, &fs_devices->devices);
793                 fs_devices->num_devices++;
794
795                 device->fs_devices = fs_devices;
796                 *new_device_added = true;
797
798                 if (disk_super->label[0])
799                         pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
800                                 disk_super->label, devid, found_transid, path);
801                 else
802                         pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
803                                 disk_super->fsid, devid, found_transid, path);
804
805         } else if (!device->name || strcmp(device->name->str, path)) {
806                 /*
807                  * When FS is already mounted.
808                  * 1. If you are here and if the device->name is NULL that
809                  *    means this device was missing at time of FS mount.
810                  * 2. If you are here and if the device->name is different
811                  *    from 'path' that means either
812                  *      a. The same device disappeared and reappeared with
813                  *         different name. or
814                  *      b. The missing-disk-which-was-replaced, has
815                  *         reappeared now.
816                  *
817                  * We must allow 1 and 2a above. But 2b would be a spurious
818                  * and unintentional.
819                  *
820                  * Further in case of 1 and 2a above, the disk at 'path'
821                  * would have missed some transaction when it was away and
822                  * in case of 2a the stale bdev has to be updated as well.
823                  * 2b must not be allowed at all time.
824                  */
825
826                 /*
827                  * For now, we do allow update to btrfs_fs_device through the
828                  * btrfs dev scan cli after FS has been mounted.  We're still
829                  * tracking a problem where systems fail mount by subvolume id
830                  * when we reject replacement on a mounted FS.
831                  */
832                 if (!fs_devices->opened && found_transid < device->generation) {
833                         /*
834                          * That is if the FS is _not_ mounted and if you
835                          * are here, that means there is more than one
836                          * disk with same uuid and devid.We keep the one
837                          * with larger generation number or the last-in if
838                          * generation are equal.
839                          */
840                         mutex_unlock(&fs_devices->device_list_mutex);
841                         return ERR_PTR(-EEXIST);
842                 }
843
844                 /*
845                  * We are going to replace the device path for a given devid,
846                  * make sure it's the same device if the device is mounted
847                  */
848                 if (device->bdev) {
849                         struct block_device *path_bdev;
850
851                         path_bdev = lookup_bdev(path);
852                         if (IS_ERR(path_bdev)) {
853                                 mutex_unlock(&fs_devices->device_list_mutex);
854                                 return ERR_CAST(path_bdev);
855                         }
856
857                         if (device->bdev != path_bdev) {
858                                 bdput(path_bdev);
859                                 mutex_unlock(&fs_devices->device_list_mutex);
860                                 /*
861                                  * device->fs_info may not be reliable here, so
862                                  * pass in a NULL instead. This avoids a
863                                  * possible use-after-free when the fs_info and
864                                  * fs_info->sb are already torn down.
865                                  */
866                                 btrfs_warn_in_rcu(NULL,
867         "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
868                                                   path, devid, found_transid,
869                                                   current->comm,
870                                                   task_pid_nr(current));
871                                 return ERR_PTR(-EEXIST);
872                         }
873                         bdput(path_bdev);
874                         btrfs_info_in_rcu(device->fs_info,
875         "devid %llu device path %s changed to %s scanned by %s (%d)",
876                                           devid, rcu_str_deref(device->name),
877                                           path, current->comm,
878                                           task_pid_nr(current));
879                 }
880
881                 name = rcu_string_strdup(path, GFP_NOFS);
882                 if (!name) {
883                         mutex_unlock(&fs_devices->device_list_mutex);
884                         return ERR_PTR(-ENOMEM);
885                 }
886                 rcu_string_free(device->name);
887                 rcu_assign_pointer(device->name, name);
888                 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
889                         fs_devices->missing_devices--;
890                         clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
891                 }
892         }
893
894         /*
895          * Unmount does not free the btrfs_device struct but would zero
896          * generation along with most of the other members. So just update
897          * it back. We need it to pick the disk with largest generation
898          * (as above).
899          */
900         if (!fs_devices->opened)
901                 device->generation = found_transid;
902
903         fs_devices->total_devices = btrfs_super_num_devices(disk_super);
904
905         mutex_unlock(&fs_devices->device_list_mutex);
906         return device;
907 }
908
909 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
910 {
911         struct btrfs_fs_devices *fs_devices;
912         struct btrfs_device *device;
913         struct btrfs_device *orig_dev;
914
915         fs_devices = alloc_fs_devices(orig->fsid);
916         if (IS_ERR(fs_devices))
917                 return fs_devices;
918
919         mutex_lock(&orig->device_list_mutex);
920         fs_devices->total_devices = orig->total_devices;
921
922         /* We have held the volume lock, it is safe to get the devices. */
923         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
924                 struct rcu_string *name;
925
926                 device = btrfs_alloc_device(NULL, &orig_dev->devid,
927                                             orig_dev->uuid);
928                 if (IS_ERR(device))
929                         goto error;
930
931                 /*
932                  * This is ok to do without rcu read locked because we hold the
933                  * uuid mutex so nothing we touch in here is going to disappear.
934                  */
935                 if (orig_dev->name) {
936                         name = rcu_string_strdup(orig_dev->name->str,
937                                         GFP_KERNEL);
938                         if (!name) {
939                                 btrfs_free_device(device);
940                                 goto error;
941                         }
942                         rcu_assign_pointer(device->name, name);
943                 }
944
945                 list_add(&device->dev_list, &fs_devices->devices);
946                 device->fs_devices = fs_devices;
947                 fs_devices->num_devices++;
948         }
949         mutex_unlock(&orig->device_list_mutex);
950         return fs_devices;
951 error:
952         mutex_unlock(&orig->device_list_mutex);
953         free_fs_devices(fs_devices);
954         return ERR_PTR(-ENOMEM);
955 }
956
957 /*
958  * After we have read the system tree and know devids belonging to
959  * this filesystem, remove the device which does not belong there.
960  */
961 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
962 {
963         struct btrfs_device *device, *next;
964         struct btrfs_device *latest_dev = NULL;
965
966         mutex_lock(&uuid_mutex);
967 again:
968         /* This is the initialized path, it is safe to release the devices. */
969         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
970                 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
971                                                         &device->dev_state)) {
972                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
973                              &device->dev_state) &&
974                             !test_bit(BTRFS_DEV_STATE_MISSING,
975                                       &device->dev_state) &&
976                              (!latest_dev ||
977                               device->generation > latest_dev->generation)) {
978                                 latest_dev = device;
979                         }
980                         continue;
981                 }
982
983                 /*
984                  * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
985                  * in btrfs_init_dev_replace() so just continue.
986                  */
987                 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
988                         continue;
989
990                 if (device->bdev) {
991                         blkdev_put(device->bdev, device->mode);
992                         device->bdev = NULL;
993                         fs_devices->open_devices--;
994                 }
995                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
996                         list_del_init(&device->dev_alloc_list);
997                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
998                         fs_devices->rw_devices--;
999                 }
1000                 list_del_init(&device->dev_list);
1001                 fs_devices->num_devices--;
1002                 btrfs_free_device(device);
1003         }
1004
1005         if (fs_devices->seed) {
1006                 fs_devices = fs_devices->seed;
1007                 goto again;
1008         }
1009
1010         fs_devices->latest_bdev = latest_dev->bdev;
1011
1012         mutex_unlock(&uuid_mutex);
1013 }
1014
1015 static void free_device_rcu(struct rcu_head *head)
1016 {
1017         struct btrfs_device *device;
1018
1019         device = container_of(head, struct btrfs_device, rcu);
1020         btrfs_free_device(device);
1021 }
1022
1023 static void btrfs_close_bdev(struct btrfs_device *device)
1024 {
1025         if (!device->bdev)
1026                 return;
1027
1028         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1029                 sync_blockdev(device->bdev);
1030                 invalidate_bdev(device->bdev);
1031         }
1032
1033         blkdev_put(device->bdev, device->mode);
1034 }
1035
1036 static void btrfs_close_one_device(struct btrfs_device *device)
1037 {
1038         struct btrfs_fs_devices *fs_devices = device->fs_devices;
1039         struct btrfs_device *new_device;
1040         struct rcu_string *name;
1041
1042         if (device->bdev)
1043                 fs_devices->open_devices--;
1044
1045         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1046             device->devid != BTRFS_DEV_REPLACE_DEVID) {
1047                 list_del_init(&device->dev_alloc_list);
1048                 fs_devices->rw_devices--;
1049         }
1050
1051         if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1052                 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1053
1054         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1055                 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1056                 fs_devices->missing_devices--;
1057         }
1058
1059         btrfs_close_bdev(device);
1060
1061         new_device = btrfs_alloc_device(NULL, &device->devid,
1062                                         device->uuid);
1063         BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
1064
1065         /* Safe because we are under uuid_mutex */
1066         if (device->name) {
1067                 name = rcu_string_strdup(device->name->str, GFP_NOFS);
1068                 BUG_ON(!name); /* -ENOMEM */
1069                 rcu_assign_pointer(new_device->name, name);
1070         }
1071
1072         list_replace_rcu(&device->dev_list, &new_device->dev_list);
1073         new_device->fs_devices = device->fs_devices;
1074
1075         call_rcu(&device->rcu, free_device_rcu);
1076 }
1077
1078 static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
1079 {
1080         struct btrfs_device *device, *tmp;
1081
1082         if (--fs_devices->opened > 0)
1083                 return 0;
1084
1085         mutex_lock(&fs_devices->device_list_mutex);
1086         list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
1087                 btrfs_close_one_device(device);
1088         }
1089         mutex_unlock(&fs_devices->device_list_mutex);
1090
1091         WARN_ON(fs_devices->open_devices);
1092         WARN_ON(fs_devices->rw_devices);
1093         fs_devices->opened = 0;
1094         fs_devices->seeding = 0;
1095
1096         return 0;
1097 }
1098
1099 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1100 {
1101         struct btrfs_fs_devices *seed_devices = NULL;
1102         int ret;
1103
1104         mutex_lock(&uuid_mutex);
1105         ret = close_fs_devices(fs_devices);
1106         if (!fs_devices->opened) {
1107                 seed_devices = fs_devices->seed;
1108                 fs_devices->seed = NULL;
1109         }
1110         mutex_unlock(&uuid_mutex);
1111
1112         while (seed_devices) {
1113                 fs_devices = seed_devices;
1114                 seed_devices = fs_devices->seed;
1115                 close_fs_devices(fs_devices);
1116                 free_fs_devices(fs_devices);
1117         }
1118         return ret;
1119 }
1120
1121 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1122                                 fmode_t flags, void *holder)
1123 {
1124         struct btrfs_device *device;
1125         struct btrfs_device *latest_dev = NULL;
1126         int ret = 0;
1127
1128         flags |= FMODE_EXCL;
1129
1130         list_for_each_entry(device, &fs_devices->devices, dev_list) {
1131                 /* Just open everything we can; ignore failures here */
1132                 if (btrfs_open_one_device(fs_devices, device, flags, holder))
1133                         continue;
1134
1135                 if (!latest_dev ||
1136                     device->generation > latest_dev->generation)
1137                         latest_dev = device;
1138         }
1139         if (fs_devices->open_devices == 0) {
1140                 ret = -EINVAL;
1141                 goto out;
1142         }
1143         fs_devices->opened = 1;
1144         fs_devices->latest_bdev = latest_dev->bdev;
1145         fs_devices->total_rw_bytes = 0;
1146 out:
1147         return ret;
1148 }
1149
1150 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1151 {
1152         struct btrfs_device *dev1, *dev2;
1153
1154         dev1 = list_entry(a, struct btrfs_device, dev_list);
1155         dev2 = list_entry(b, struct btrfs_device, dev_list);
1156
1157         if (dev1->devid < dev2->devid)
1158                 return -1;
1159         else if (dev1->devid > dev2->devid)
1160                 return 1;
1161         return 0;
1162 }
1163
1164 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1165                        fmode_t flags, void *holder)
1166 {
1167         int ret;
1168
1169         lockdep_assert_held(&uuid_mutex);
1170         /*
1171          * The device_list_mutex cannot be taken here in case opening the
1172          * underlying device takes further locks like bd_mutex.
1173          *
1174          * We also don't need the lock here as this is called during mount and
1175          * exclusion is provided by uuid_mutex
1176          */
1177
1178         if (fs_devices->opened) {
1179                 fs_devices->opened++;
1180                 ret = 0;
1181         } else {
1182                 list_sort(NULL, &fs_devices->devices, devid_cmp);
1183                 ret = open_fs_devices(fs_devices, flags, holder);
1184         }
1185
1186         return ret;
1187 }
1188
1189 static void btrfs_release_disk_super(struct page *page)
1190 {
1191         kunmap(page);
1192         put_page(page);
1193 }
1194
1195 static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
1196                                  struct page **page,
1197                                  struct btrfs_super_block **disk_super)
1198 {
1199         void *p;
1200         pgoff_t index;
1201
1202         /* make sure our super fits in the device */
1203         if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1204                 return 1;
1205
1206         /* make sure our super fits in the page */
1207         if (sizeof(**disk_super) > PAGE_SIZE)
1208                 return 1;
1209
1210         /* make sure our super doesn't straddle pages on disk */
1211         index = bytenr >> PAGE_SHIFT;
1212         if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
1213                 return 1;
1214
1215         /* pull in the page with our super */
1216         *page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
1217                                    index, GFP_KERNEL);
1218
1219         if (IS_ERR_OR_NULL(*page))
1220                 return 1;
1221
1222         p = kmap(*page);
1223
1224         /* align our pointer to the offset of the super block */
1225         *disk_super = p + (bytenr & ~PAGE_MASK);
1226
1227         if (btrfs_super_bytenr(*disk_super) != bytenr ||
1228             btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
1229                 btrfs_release_disk_super(*page);
1230                 return 1;
1231         }
1232
1233         if ((*disk_super)->label[0] &&
1234                 (*disk_super)->label[BTRFS_LABEL_SIZE - 1])
1235                 (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
1236
1237         return 0;
1238 }
1239
1240 /*
1241  * Look for a btrfs signature on a device. This may be called out of the mount path
1242  * and we are not allowed to call set_blocksize during the scan. The superblock
1243  * is read via pagecache
1244  */
1245 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1246                                            void *holder)
1247 {
1248         struct btrfs_super_block *disk_super;
1249         bool new_device_added = false;
1250         struct btrfs_device *device = NULL;
1251         struct block_device *bdev;
1252         struct page *page;
1253         u64 bytenr;
1254
1255         lockdep_assert_held(&uuid_mutex);
1256
1257         /*
1258          * we would like to check all the supers, but that would make
1259          * a btrfs mount succeed after a mkfs from a different FS.
1260          * So, we need to add a special mount option to scan for
1261          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1262          */
1263         bytenr = btrfs_sb_offset(0);
1264         flags |= FMODE_EXCL;
1265
1266         bdev = blkdev_get_by_path(path, flags, holder);
1267         if (IS_ERR(bdev))
1268                 return ERR_CAST(bdev);
1269
1270         if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
1271                 device = ERR_PTR(-EINVAL);
1272                 goto error_bdev_put;
1273         }
1274
1275         device = device_list_add(path, disk_super, &new_device_added);
1276         if (!IS_ERR(device)) {
1277                 if (new_device_added)
1278                         btrfs_free_stale_devices(path, device);
1279         }
1280
1281         btrfs_release_disk_super(page);
1282
1283 error_bdev_put:
1284         blkdev_put(bdev, flags);
1285
1286         return device;
1287 }
1288
1289 static int contains_pending_extent(struct btrfs_transaction *transaction,
1290                                    struct btrfs_device *device,
1291                                    u64 *start, u64 len)
1292 {
1293         struct btrfs_fs_info *fs_info = device->fs_info;
1294         struct extent_map *em;
1295         struct list_head *search_list = &fs_info->pinned_chunks;
1296         int ret = 0;
1297         u64 physical_start = *start;
1298
1299         if (transaction)
1300                 search_list = &transaction->pending_chunks;
1301 again:
1302         list_for_each_entry(em, search_list, list) {
1303                 struct map_lookup *map;
1304                 int i;
1305
1306                 map = em->map_lookup;
1307                 for (i = 0; i < map->num_stripes; i++) {
1308                         u64 end;
1309
1310                         if (map->stripes[i].dev != device)
1311                                 continue;
1312                         if (map->stripes[i].physical >= physical_start + len ||
1313                             map->stripes[i].physical + em->orig_block_len <=
1314                             physical_start)
1315                                 continue;
1316                         /*
1317                          * Make sure that while processing the pinned list we do
1318                          * not override our *start with a lower value, because
1319                          * we can have pinned chunks that fall within this
1320                          * device hole and that have lower physical addresses
1321                          * than the pending chunks we processed before. If we
1322                          * do not take this special care we can end up getting
1323                          * 2 pending chunks that start at the same physical
1324                          * device offsets because the end offset of a pinned
1325                          * chunk can be equal to the start offset of some
1326                          * pending chunk.
1327                          */
1328                         end = map->stripes[i].physical + em->orig_block_len;
1329                         if (end > *start) {
1330                                 *start = end;
1331                                 ret = 1;
1332                         }
1333                 }
1334         }
1335         if (search_list != &fs_info->pinned_chunks) {
1336                 search_list = &fs_info->pinned_chunks;
1337                 goto again;
1338         }
1339
1340         return ret;
1341 }
1342
1343
1344 /*
1345  * find_free_dev_extent_start - find free space in the specified device
1346  * @device:       the device which we search the free space in
1347  * @num_bytes:    the size of the free space that we need
1348  * @search_start: the position from which to begin the search
1349  * @start:        store the start of the free space.
1350  * @len:          the size of the free space. that we find, or the size
1351  *                of the max free space if we don't find suitable free space
1352  *
1353  * this uses a pretty simple search, the expectation is that it is
1354  * called very infrequently and that a given device has a small number
1355  * of extents
1356  *
1357  * @start is used to store the start of the free space if we find. But if we
1358  * don't find suitable free space, it will be used to store the start position
1359  * of the max free space.
1360  *
1361  * @len is used to store the size of the free space that we find.
1362  * But if we don't find suitable free space, it is used to store the size of
1363  * the max free space.
1364  */
1365 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
1366                                struct btrfs_device *device, u64 num_bytes,
1367                                u64 search_start, u64 *start, u64 *len)
1368 {
1369         struct btrfs_fs_info *fs_info = device->fs_info;
1370         struct btrfs_root *root = fs_info->dev_root;
1371         struct btrfs_key key;
1372         struct btrfs_dev_extent *dev_extent;
1373         struct btrfs_path *path;
1374         u64 hole_size;
1375         u64 max_hole_start;
1376         u64 max_hole_size;
1377         u64 extent_end;
1378         u64 search_end = device->total_bytes;
1379         int ret;
1380         int slot;
1381         struct extent_buffer *l;
1382
1383         /*
1384          * We don't want to overwrite the superblock on the drive nor any area
1385          * used by the boot loader (grub for example), so we make sure to start
1386          * at an offset of at least 1MB.
1387          */
1388         search_start = max_t(u64, search_start, SZ_1M);
1389
1390         path = btrfs_alloc_path();
1391         if (!path)
1392                 return -ENOMEM;
1393
1394         max_hole_start = search_start;
1395         max_hole_size = 0;
1396
1397 again:
1398         if (search_start >= search_end ||
1399                 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1400                 ret = -ENOSPC;
1401                 goto out;
1402         }
1403
1404         path->reada = READA_FORWARD;
1405         path->search_commit_root = 1;
1406         path->skip_locking = 1;
1407
1408         key.objectid = device->devid;
1409         key.offset = search_start;
1410         key.type = BTRFS_DEV_EXTENT_KEY;
1411
1412         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1413         if (ret < 0)
1414                 goto out;
1415         if (ret > 0) {
1416                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1417                 if (ret < 0)
1418                         goto out;
1419         }
1420
1421         while (1) {
1422                 l = path->nodes[0];
1423                 slot = path->slots[0];
1424                 if (slot >= btrfs_header_nritems(l)) {
1425                         ret = btrfs_next_leaf(root, path);
1426                         if (ret == 0)
1427                                 continue;
1428                         if (ret < 0)
1429                                 goto out;
1430
1431                         break;
1432                 }
1433                 btrfs_item_key_to_cpu(l, &key, slot);
1434
1435                 if (key.objectid < device->devid)
1436                         goto next;
1437
1438                 if (key.objectid > device->devid)
1439                         break;
1440
1441                 if (key.type != BTRFS_DEV_EXTENT_KEY)
1442                         goto next;
1443
1444                 if (key.offset > search_start) {
1445                         hole_size = key.offset - search_start;
1446
1447                         /*
1448                          * Have to check before we set max_hole_start, otherwise
1449                          * we could end up sending back this offset anyway.
1450                          */
1451                         if (contains_pending_extent(transaction, device,
1452                                                     &search_start,
1453                                                     hole_size)) {
1454                                 if (key.offset >= search_start) {
1455                                         hole_size = key.offset - search_start;
1456                                 } else {
1457                                         WARN_ON_ONCE(1);
1458                                         hole_size = 0;
1459                                 }
1460                         }
1461
1462                         if (hole_size > max_hole_size) {
1463                                 max_hole_start = search_start;
1464                                 max_hole_size = hole_size;
1465                         }
1466
1467                         /*
1468                          * If this free space is greater than which we need,
1469                          * it must be the max free space that we have found
1470                          * until now, so max_hole_start must point to the start
1471                          * of this free space and the length of this free space
1472                          * is stored in max_hole_size. Thus, we return
1473                          * max_hole_start and max_hole_size and go back to the
1474                          * caller.
1475                          */
1476                         if (hole_size >= num_bytes) {
1477                                 ret = 0;
1478                                 goto out;
1479                         }
1480                 }
1481
1482                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1483                 extent_end = key.offset + btrfs_dev_extent_length(l,
1484                                                                   dev_extent);
1485                 if (extent_end > search_start)
1486                         search_start = extent_end;
1487 next:
1488                 path->slots[0]++;
1489                 cond_resched();
1490         }
1491
1492         /*
1493          * At this point, search_start should be the end of
1494          * allocated dev extents, and when shrinking the device,
1495          * search_end may be smaller than search_start.
1496          */
1497         if (search_end > search_start) {
1498                 hole_size = search_end - search_start;
1499
1500                 if (contains_pending_extent(transaction, device, &search_start,
1501                                             hole_size)) {
1502                         btrfs_release_path(path);
1503                         goto again;
1504                 }
1505
1506                 if (hole_size > max_hole_size) {
1507                         max_hole_start = search_start;
1508                         max_hole_size = hole_size;
1509                 }
1510         }
1511
1512         /* See above. */
1513         if (max_hole_size < num_bytes)
1514                 ret = -ENOSPC;
1515         else
1516                 ret = 0;
1517
1518 out:
1519         btrfs_free_path(path);
1520         *start = max_hole_start;
1521         if (len)
1522                 *len = max_hole_size;
1523         return ret;
1524 }
1525
1526 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1527                          struct btrfs_device *device, u64 num_bytes,
1528                          u64 *start, u64 *len)
1529 {
1530         /* FIXME use last free of some kind */
1531         return find_free_dev_extent_start(trans->transaction, device,
1532                                           num_bytes, 0, start, len);
1533 }
1534
1535 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1536                           struct btrfs_device *device,
1537                           u64 start, u64 *dev_extent_len)
1538 {
1539         struct btrfs_fs_info *fs_info = device->fs_info;
1540         struct btrfs_root *root = fs_info->dev_root;
1541         int ret;
1542         struct btrfs_path *path;
1543         struct btrfs_key key;
1544         struct btrfs_key found_key;
1545         struct extent_buffer *leaf = NULL;
1546         struct btrfs_dev_extent *extent = NULL;
1547
1548         path = btrfs_alloc_path();
1549         if (!path)
1550                 return -ENOMEM;
1551
1552         key.objectid = device->devid;
1553         key.offset = start;
1554         key.type = BTRFS_DEV_EXTENT_KEY;
1555 again:
1556         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1557         if (ret > 0) {
1558                 ret = btrfs_previous_item(root, path, key.objectid,
1559                                           BTRFS_DEV_EXTENT_KEY);
1560                 if (ret)
1561                         goto out;
1562                 leaf = path->nodes[0];
1563                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1564                 extent = btrfs_item_ptr(leaf, path->slots[0],
1565                                         struct btrfs_dev_extent);
1566                 BUG_ON(found_key.offset > start || found_key.offset +
1567                        btrfs_dev_extent_length(leaf, extent) < start);
1568                 key = found_key;
1569                 btrfs_release_path(path);
1570                 goto again;
1571         } else if (ret == 0) {
1572                 leaf = path->nodes[0];
1573                 extent = btrfs_item_ptr(leaf, path->slots[0],
1574                                         struct btrfs_dev_extent);
1575         } else {
1576                 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1577                 goto out;
1578         }
1579
1580         *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1581
1582         ret = btrfs_del_item(trans, root, path);
1583         if (ret) {
1584                 btrfs_handle_fs_error(fs_info, ret,
1585                                       "Failed to remove dev extent item");
1586         } else {
1587                 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1588         }
1589 out:
1590         btrfs_free_path(path);
1591         return ret;
1592 }
1593
1594 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1595                                   struct btrfs_device *device,
1596                                   u64 chunk_offset, u64 start, u64 num_bytes)
1597 {
1598         int ret;
1599         struct btrfs_path *path;
1600         struct btrfs_fs_info *fs_info = device->fs_info;
1601         struct btrfs_root *root = fs_info->dev_root;
1602         struct btrfs_dev_extent *extent;
1603         struct extent_buffer *leaf;
1604         struct btrfs_key key;
1605
1606         WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1607         WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1608         path = btrfs_alloc_path();
1609         if (!path)
1610                 return -ENOMEM;
1611
1612         key.objectid = device->devid;
1613         key.offset = start;
1614         key.type = BTRFS_DEV_EXTENT_KEY;
1615         ret = btrfs_insert_empty_item(trans, root, path, &key,
1616                                       sizeof(*extent));
1617         if (ret)
1618                 goto out;
1619
1620         leaf = path->nodes[0];
1621         extent = btrfs_item_ptr(leaf, path->slots[0],
1622                                 struct btrfs_dev_extent);
1623         btrfs_set_dev_extent_chunk_tree(leaf, extent,
1624                                         BTRFS_CHUNK_TREE_OBJECTID);
1625         btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1626                                             BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1627         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1628
1629         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1630         btrfs_mark_buffer_dirty(leaf);
1631 out:
1632         btrfs_free_path(path);
1633         return ret;
1634 }
1635
1636 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1637 {
1638         struct extent_map_tree *em_tree;
1639         struct extent_map *em;
1640         struct rb_node *n;
1641         u64 ret = 0;
1642
1643         em_tree = &fs_info->mapping_tree.map_tree;
1644         read_lock(&em_tree->lock);
1645         n = rb_last(&em_tree->map);
1646         if (n) {
1647                 em = rb_entry(n, struct extent_map, rb_node);
1648                 ret = em->start + em->len;
1649         }
1650         read_unlock(&em_tree->lock);
1651
1652         return ret;
1653 }
1654
1655 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1656                                     u64 *devid_ret)
1657 {
1658         int ret;
1659         struct btrfs_key key;
1660         struct btrfs_key found_key;
1661         struct btrfs_path *path;
1662
1663         path = btrfs_alloc_path();
1664         if (!path)
1665                 return -ENOMEM;
1666
1667         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1668         key.type = BTRFS_DEV_ITEM_KEY;
1669         key.offset = (u64)-1;
1670
1671         ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1672         if (ret < 0)
1673                 goto error;
1674
1675         BUG_ON(ret == 0); /* Corruption */
1676
1677         ret = btrfs_previous_item(fs_info->chunk_root, path,
1678                                   BTRFS_DEV_ITEMS_OBJECTID,
1679                                   BTRFS_DEV_ITEM_KEY);
1680         if (ret) {
1681                 *devid_ret = 1;
1682         } else {
1683                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1684                                       path->slots[0]);
1685                 *devid_ret = found_key.offset + 1;
1686         }
1687         ret = 0;
1688 error:
1689         btrfs_free_path(path);
1690         return ret;
1691 }
1692
1693 /*
1694  * the device information is stored in the chunk root
1695  * the btrfs_device struct should be fully filled in
1696  */
1697 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1698                             struct btrfs_device *device)
1699 {
1700         int ret;
1701         struct btrfs_path *path;
1702         struct btrfs_dev_item *dev_item;
1703         struct extent_buffer *leaf;
1704         struct btrfs_key key;
1705         unsigned long ptr;
1706
1707         path = btrfs_alloc_path();
1708         if (!path)
1709                 return -ENOMEM;
1710
1711         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1712         key.type = BTRFS_DEV_ITEM_KEY;
1713         key.offset = device->devid;
1714
1715         ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1716                                       &key, sizeof(*dev_item));
1717         if (ret)
1718                 goto out;
1719
1720         leaf = path->nodes[0];
1721         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1722
1723         btrfs_set_device_id(leaf, dev_item, device->devid);
1724         btrfs_set_device_generation(leaf, dev_item, 0);
1725         btrfs_set_device_type(leaf, dev_item, device->type);
1726         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1727         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1728         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1729         btrfs_set_device_total_bytes(leaf, dev_item,
1730                                      btrfs_device_get_disk_total_bytes(device));
1731         btrfs_set_device_bytes_used(leaf, dev_item,
1732                                     btrfs_device_get_bytes_used(device));
1733         btrfs_set_device_group(leaf, dev_item, 0);
1734         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1735         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1736         btrfs_set_device_start_offset(leaf, dev_item, 0);
1737
1738         ptr = btrfs_device_uuid(dev_item);
1739         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1740         ptr = btrfs_device_fsid(dev_item);
1741         write_extent_buffer(leaf, trans->fs_info->fsid, ptr, BTRFS_FSID_SIZE);
1742         btrfs_mark_buffer_dirty(leaf);
1743
1744         ret = 0;
1745 out:
1746         btrfs_free_path(path);
1747         return ret;
1748 }
1749
1750 /*
1751  * Function to update ctime/mtime for a given device path.
1752  * Mainly used for ctime/mtime based probe like libblkid.
1753  */
1754 static void update_dev_time(const char *path_name)
1755 {
1756         struct file *filp;
1757
1758         filp = filp_open(path_name, O_RDWR, 0);
1759         if (IS_ERR(filp))
1760                 return;
1761         file_update_time(filp);
1762         filp_close(filp, NULL);
1763 }
1764
1765 static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
1766                              struct btrfs_device *device)
1767 {
1768         struct btrfs_root *root = fs_info->chunk_root;
1769         int ret;
1770         struct btrfs_path *path;
1771         struct btrfs_key key;
1772         struct btrfs_trans_handle *trans;
1773
1774         path = btrfs_alloc_path();
1775         if (!path)
1776                 return -ENOMEM;
1777
1778         trans = btrfs_start_transaction(root, 0);
1779         if (IS_ERR(trans)) {
1780                 btrfs_free_path(path);
1781                 return PTR_ERR(trans);
1782         }
1783         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1784         key.type = BTRFS_DEV_ITEM_KEY;
1785         key.offset = device->devid;
1786
1787         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1788         if (ret) {
1789                 if (ret > 0)
1790                         ret = -ENOENT;
1791                 btrfs_abort_transaction(trans, ret);
1792                 btrfs_end_transaction(trans);
1793                 goto out;
1794         }
1795
1796         ret = btrfs_del_item(trans, root, path);
1797         if (ret) {
1798                 btrfs_abort_transaction(trans, ret);
1799                 btrfs_end_transaction(trans);
1800         }
1801
1802 out:
1803         btrfs_free_path(path);
1804         if (!ret)
1805                 ret = btrfs_commit_transaction(trans);
1806         return ret;
1807 }
1808
1809 /*
1810  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1811  * filesystem. It's up to the caller to adjust that number regarding eg. device
1812  * replace.
1813  */
1814 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1815                 u64 num_devices)
1816 {
1817         u64 all_avail;
1818         unsigned seq;
1819         int i;
1820
1821         do {
1822                 seq = read_seqbegin(&fs_info->profiles_lock);
1823
1824                 all_avail = fs_info->avail_data_alloc_bits |
1825                             fs_info->avail_system_alloc_bits |
1826                             fs_info->avail_metadata_alloc_bits;
1827         } while (read_seqretry(&fs_info->profiles_lock, seq));
1828
1829         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1830                 if (!(all_avail & btrfs_raid_array[i].bg_flag))
1831                         continue;
1832
1833                 if (num_devices < btrfs_raid_array[i].devs_min) {
1834                         int ret = btrfs_raid_array[i].mindev_error;
1835
1836                         if (ret)
1837                                 return ret;
1838                 }
1839         }
1840
1841         return 0;
1842 }
1843
1844 static struct btrfs_device * btrfs_find_next_active_device(
1845                 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1846 {
1847         struct btrfs_device *next_device;
1848
1849         list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1850                 if (next_device != device &&
1851                     !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1852                     && next_device->bdev)
1853                         return next_device;
1854         }
1855
1856         return NULL;
1857 }
1858
1859 /*
1860  * Helper function to check if the given device is part of s_bdev / latest_bdev
1861  * and replace it with the provided or the next active device, in the context
1862  * where this function called, there should be always be another device (or
1863  * this_dev) which is active.
1864  */
1865 void btrfs_assign_next_active_device(struct btrfs_device *device,
1866                                      struct btrfs_device *this_dev)
1867 {
1868         struct btrfs_fs_info *fs_info = device->fs_info;
1869         struct btrfs_device *next_device;
1870
1871         if (this_dev)
1872                 next_device = this_dev;
1873         else
1874                 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1875                                                                 device);
1876         ASSERT(next_device);
1877
1878         if (fs_info->sb->s_bdev &&
1879                         (fs_info->sb->s_bdev == device->bdev))
1880                 fs_info->sb->s_bdev = next_device->bdev;
1881
1882         if (fs_info->fs_devices->latest_bdev == device->bdev)
1883                 fs_info->fs_devices->latest_bdev = next_device->bdev;
1884 }
1885
1886 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
1887                 u64 devid)
1888 {
1889         struct btrfs_device *device;
1890         struct btrfs_fs_devices *cur_devices;
1891         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1892         u64 num_devices;
1893         int ret = 0;
1894
1895         mutex_lock(&uuid_mutex);
1896
1897         num_devices = fs_devices->num_devices;
1898         btrfs_dev_replace_read_lock(&fs_info->dev_replace);
1899         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1900                 WARN_ON(num_devices < 1);
1901                 num_devices--;
1902         }
1903         btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
1904
1905         ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
1906         if (ret)
1907                 goto out;
1908
1909         ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
1910                                            &device);
1911         if (ret)
1912                 goto out;
1913
1914         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1915                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1916                 goto out;
1917         }
1918
1919         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1920             fs_info->fs_devices->rw_devices == 1) {
1921                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1922                 goto out;
1923         }
1924
1925         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1926                 mutex_lock(&fs_info->chunk_mutex);
1927                 list_del_init(&device->dev_alloc_list);
1928                 device->fs_devices->rw_devices--;
1929                 mutex_unlock(&fs_info->chunk_mutex);
1930         }
1931
1932         mutex_unlock(&uuid_mutex);
1933         ret = btrfs_shrink_device(device, 0);
1934         mutex_lock(&uuid_mutex);
1935         if (ret)
1936                 goto error_undo;
1937
1938         /*
1939          * TODO: the superblock still includes this device in its num_devices
1940          * counter although write_all_supers() is not locked out. This
1941          * could give a filesystem state which requires a degraded mount.
1942          */
1943         ret = btrfs_rm_dev_item(fs_info, device);
1944         if (ret)
1945                 goto error_undo;
1946
1947         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
1948         btrfs_scrub_cancel_dev(fs_info, device);
1949
1950         /*
1951          * the device list mutex makes sure that we don't change
1952          * the device list while someone else is writing out all
1953          * the device supers. Whoever is writing all supers, should
1954          * lock the device list mutex before getting the number of
1955          * devices in the super block (super_copy). Conversely,
1956          * whoever updates the number of devices in the super block
1957          * (super_copy) should hold the device list mutex.
1958          */
1959
1960         /*
1961          * In normal cases the cur_devices == fs_devices. But in case
1962          * of deleting a seed device, the cur_devices should point to
1963          * its own fs_devices listed under the fs_devices->seed.
1964          */
1965         cur_devices = device->fs_devices;
1966         mutex_lock(&fs_devices->device_list_mutex);
1967         list_del_rcu(&device->dev_list);
1968
1969         cur_devices->num_devices--;
1970         cur_devices->total_devices--;
1971         /* Update total_devices of the parent fs_devices if it's seed */
1972         if (cur_devices != fs_devices)
1973                 fs_devices->total_devices--;
1974
1975         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1976                 cur_devices->missing_devices--;
1977
1978         btrfs_assign_next_active_device(device, NULL);
1979
1980         if (device->bdev) {
1981                 cur_devices->open_devices--;
1982                 /* remove sysfs entry */
1983                 btrfs_sysfs_rm_device_link(fs_devices, device);
1984         }
1985
1986         num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
1987         btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
1988         mutex_unlock(&fs_devices->device_list_mutex);
1989
1990         /*
1991          * at this point, the device is zero sized and detached from
1992          * the devices list.  All that's left is to zero out the old
1993          * supers and free the device.
1994          */
1995         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
1996                 btrfs_scratch_superblocks(device->bdev, device->name->str);
1997
1998         btrfs_close_bdev(device);
1999         call_rcu(&device->rcu, free_device_rcu);
2000
2001         if (cur_devices->open_devices == 0) {
2002                 while (fs_devices) {
2003                         if (fs_devices->seed == cur_devices) {
2004                                 fs_devices->seed = cur_devices->seed;
2005                                 break;
2006                         }
2007                         fs_devices = fs_devices->seed;
2008                 }
2009                 cur_devices->seed = NULL;
2010                 close_fs_devices(cur_devices);
2011                 free_fs_devices(cur_devices);
2012         }
2013
2014 out:
2015         mutex_unlock(&uuid_mutex);
2016         return ret;
2017
2018 error_undo:
2019         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2020                 mutex_lock(&fs_info->chunk_mutex);
2021                 list_add(&device->dev_alloc_list,
2022                          &fs_devices->alloc_list);
2023                 device->fs_devices->rw_devices++;
2024                 mutex_unlock(&fs_info->chunk_mutex);
2025         }
2026         goto out;
2027 }
2028
2029 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2030 {
2031         struct btrfs_fs_devices *fs_devices;
2032
2033         lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2034
2035         /*
2036          * in case of fs with no seed, srcdev->fs_devices will point
2037          * to fs_devices of fs_info. However when the dev being replaced is
2038          * a seed dev it will point to the seed's local fs_devices. In short
2039          * srcdev will have its correct fs_devices in both the cases.
2040          */
2041         fs_devices = srcdev->fs_devices;
2042
2043         list_del_rcu(&srcdev->dev_list);
2044         list_del(&srcdev->dev_alloc_list);
2045         fs_devices->num_devices--;
2046         if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2047                 fs_devices->missing_devices--;
2048
2049         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2050                 fs_devices->rw_devices--;
2051
2052         if (srcdev->bdev)
2053                 fs_devices->open_devices--;
2054 }
2055
2056 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
2057                                       struct btrfs_device *srcdev)
2058 {
2059         struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2060
2061         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
2062                 /* zero out the old super if it is writable */
2063                 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2064         }
2065
2066         btrfs_close_bdev(srcdev);
2067         call_rcu(&srcdev->rcu, free_device_rcu);
2068
2069         /* if this is no devs we rather delete the fs_devices */
2070         if (!fs_devices->num_devices) {
2071                 struct btrfs_fs_devices *tmp_fs_devices;
2072
2073                 /*
2074                  * On a mounted FS, num_devices can't be zero unless it's a
2075                  * seed. In case of a seed device being replaced, the replace
2076                  * target added to the sprout FS, so there will be no more
2077                  * device left under the seed FS.
2078                  */
2079                 ASSERT(fs_devices->seeding);
2080
2081                 tmp_fs_devices = fs_info->fs_devices;
2082                 while (tmp_fs_devices) {
2083                         if (tmp_fs_devices->seed == fs_devices) {
2084                                 tmp_fs_devices->seed = fs_devices->seed;
2085                                 break;
2086                         }
2087                         tmp_fs_devices = tmp_fs_devices->seed;
2088                 }
2089                 fs_devices->seed = NULL;
2090                 close_fs_devices(fs_devices);
2091                 free_fs_devices(fs_devices);
2092         }
2093 }
2094
2095 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2096 {
2097         struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2098
2099         WARN_ON(!tgtdev);
2100         mutex_lock(&fs_devices->device_list_mutex);
2101
2102         btrfs_sysfs_rm_device_link(fs_devices, tgtdev);
2103
2104         if (tgtdev->bdev)
2105                 fs_devices->open_devices--;
2106
2107         fs_devices->num_devices--;
2108
2109         btrfs_assign_next_active_device(tgtdev, NULL);
2110
2111         list_del_rcu(&tgtdev->dev_list);
2112
2113         mutex_unlock(&fs_devices->device_list_mutex);
2114
2115         /*
2116          * The update_dev_time() with in btrfs_scratch_superblocks()
2117          * may lead to a call to btrfs_show_devname() which will try
2118          * to hold device_list_mutex. And here this device
2119          * is already out of device list, so we don't have to hold
2120          * the device_list_mutex lock.
2121          */
2122         btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2123
2124         btrfs_close_bdev(tgtdev);
2125         call_rcu(&tgtdev->rcu, free_device_rcu);
2126 }
2127
2128 static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
2129                                      const char *device_path,
2130                                      struct btrfs_device **device)
2131 {
2132         int ret = 0;
2133         struct btrfs_super_block *disk_super;
2134         u64 devid;
2135         u8 *dev_uuid;
2136         struct block_device *bdev;
2137         struct buffer_head *bh;
2138
2139         *device = NULL;
2140         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2141                                     fs_info->bdev_holder, 0, &bdev, &bh);
2142         if (ret)
2143                 return ret;
2144         disk_super = (struct btrfs_super_block *)bh->b_data;
2145         devid = btrfs_stack_device_id(&disk_super->dev_item);
2146         dev_uuid = disk_super->dev_item.uuid;
2147         *device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2148                                     disk_super->fsid, true);
2149         brelse(bh);
2150         if (!*device)
2151                 ret = -ENOENT;
2152         blkdev_put(bdev, FMODE_READ);
2153         return ret;
2154 }
2155
2156 int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
2157                                          const char *device_path,
2158                                          struct btrfs_device **device)
2159 {
2160         *device = NULL;
2161         if (strcmp(device_path, "missing") == 0) {
2162                 struct list_head *devices;
2163                 struct btrfs_device *tmp;
2164
2165                 devices = &fs_info->fs_devices->devices;
2166                 list_for_each_entry(tmp, devices, dev_list) {
2167                         if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2168                                         &tmp->dev_state) && !tmp->bdev) {
2169                                 *device = tmp;
2170                                 break;
2171                         }
2172                 }
2173
2174                 if (!*device)
2175                         return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2176
2177                 return 0;
2178         } else {
2179                 return btrfs_find_device_by_path(fs_info, device_path, device);
2180         }
2181 }
2182
2183 /*
2184  * Lookup a device given by device id, or the path if the id is 0.
2185  */
2186 int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
2187                                  const char *devpath,
2188                                  struct btrfs_device **device)
2189 {
2190         int ret;
2191
2192         if (devid) {
2193                 ret = 0;
2194                 *device = btrfs_find_device(fs_info->fs_devices, devid,
2195                                             NULL, NULL, true);
2196                 if (!*device)
2197                         ret = -ENOENT;
2198         } else {
2199                 if (!devpath || !devpath[0])
2200                         return -EINVAL;
2201
2202                 ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
2203                                                            device);
2204         }
2205         return ret;
2206 }
2207
2208 /*
2209  * does all the dirty work required for changing file system's UUID.
2210  */
2211 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2212 {
2213         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2214         struct btrfs_fs_devices *old_devices;
2215         struct btrfs_fs_devices *seed_devices;
2216         struct btrfs_super_block *disk_super = fs_info->super_copy;
2217         struct btrfs_device *device;
2218         u64 super_flags;
2219
2220         lockdep_assert_held(&uuid_mutex);
2221         if (!fs_devices->seeding)
2222                 return -EINVAL;
2223
2224         seed_devices = alloc_fs_devices(NULL);
2225         if (IS_ERR(seed_devices))
2226                 return PTR_ERR(seed_devices);
2227
2228         old_devices = clone_fs_devices(fs_devices);
2229         if (IS_ERR(old_devices)) {
2230                 kfree(seed_devices);
2231                 return PTR_ERR(old_devices);
2232         }
2233
2234         list_add(&old_devices->fs_list, &fs_uuids);
2235
2236         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2237         seed_devices->opened = 1;
2238         INIT_LIST_HEAD(&seed_devices->devices);
2239         INIT_LIST_HEAD(&seed_devices->alloc_list);
2240         mutex_init(&seed_devices->device_list_mutex);
2241
2242         mutex_lock(&fs_devices->device_list_mutex);
2243         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2244                               synchronize_rcu);
2245         list_for_each_entry(device, &seed_devices->devices, dev_list)
2246                 device->fs_devices = seed_devices;
2247
2248         mutex_lock(&fs_info->chunk_mutex);
2249         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
2250         mutex_unlock(&fs_info->chunk_mutex);
2251
2252         fs_devices->seeding = 0;
2253         fs_devices->num_devices = 0;
2254         fs_devices->open_devices = 0;
2255         fs_devices->missing_devices = 0;
2256         fs_devices->rotating = 0;
2257         fs_devices->seed = seed_devices;
2258
2259         generate_random_uuid(fs_devices->fsid);
2260         memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2261         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2262         mutex_unlock(&fs_devices->device_list_mutex);
2263
2264         super_flags = btrfs_super_flags(disk_super) &
2265                       ~BTRFS_SUPER_FLAG_SEEDING;
2266         btrfs_set_super_flags(disk_super, super_flags);
2267
2268         return 0;
2269 }
2270
2271 /*
2272  * Store the expected generation for seed devices in device items.
2273  */
2274 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
2275                                struct btrfs_fs_info *fs_info)
2276 {
2277         struct btrfs_root *root = fs_info->chunk_root;
2278         struct btrfs_path *path;
2279         struct extent_buffer *leaf;
2280         struct btrfs_dev_item *dev_item;
2281         struct btrfs_device *device;
2282         struct btrfs_key key;
2283         u8 fs_uuid[BTRFS_FSID_SIZE];
2284         u8 dev_uuid[BTRFS_UUID_SIZE];
2285         u64 devid;
2286         int ret;
2287
2288         path = btrfs_alloc_path();
2289         if (!path)
2290                 return -ENOMEM;
2291
2292         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2293         key.offset = 0;
2294         key.type = BTRFS_DEV_ITEM_KEY;
2295
2296         while (1) {
2297                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2298                 if (ret < 0)
2299                         goto error;
2300
2301                 leaf = path->nodes[0];
2302 next_slot:
2303                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2304                         ret = btrfs_next_leaf(root, path);
2305                         if (ret > 0)
2306                                 break;
2307                         if (ret < 0)
2308                                 goto error;
2309                         leaf = path->nodes[0];
2310                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2311                         btrfs_release_path(path);
2312                         continue;
2313                 }
2314
2315                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2316                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2317                     key.type != BTRFS_DEV_ITEM_KEY)
2318                         break;
2319
2320                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2321                                           struct btrfs_dev_item);
2322                 devid = btrfs_device_id(leaf, dev_item);
2323                 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2324                                    BTRFS_UUID_SIZE);
2325                 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2326                                    BTRFS_FSID_SIZE);
2327                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2328                                            fs_uuid, true);
2329                 BUG_ON(!device); /* Logic error */
2330
2331                 if (device->fs_devices->seeding) {
2332                         btrfs_set_device_generation(leaf, dev_item,
2333                                                     device->generation);
2334                         btrfs_mark_buffer_dirty(leaf);
2335                 }
2336
2337                 path->slots[0]++;
2338                 goto next_slot;
2339         }
2340         ret = 0;
2341 error:
2342         btrfs_free_path(path);
2343         return ret;
2344 }
2345
2346 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2347 {
2348         struct btrfs_root *root = fs_info->dev_root;
2349         struct request_queue *q;
2350         struct btrfs_trans_handle *trans;
2351         struct btrfs_device *device;
2352         struct block_device *bdev;
2353         struct super_block *sb = fs_info->sb;
2354         struct rcu_string *name;
2355         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2356         u64 orig_super_total_bytes;
2357         u64 orig_super_num_devices;
2358         int seeding_dev = 0;
2359         int ret = 0;
2360         bool unlocked = false;
2361
2362         if (sb_rdonly(sb) && !fs_devices->seeding)
2363                 return -EROFS;
2364
2365         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2366                                   fs_info->bdev_holder);
2367         if (IS_ERR(bdev))
2368                 return PTR_ERR(bdev);
2369
2370         if (fs_devices->seeding) {
2371                 seeding_dev = 1;
2372                 down_write(&sb->s_umount);
2373                 mutex_lock(&uuid_mutex);
2374         }
2375
2376         filemap_write_and_wait(bdev->bd_inode->i_mapping);
2377
2378         mutex_lock(&fs_devices->device_list_mutex);
2379         list_for_each_entry(device, &fs_devices->devices, dev_list) {
2380                 if (device->bdev == bdev) {
2381                         ret = -EEXIST;
2382                         mutex_unlock(
2383                                 &fs_devices->device_list_mutex);
2384                         goto error;
2385                 }
2386         }
2387         mutex_unlock(&fs_devices->device_list_mutex);
2388
2389         device = btrfs_alloc_device(fs_info, NULL, NULL);
2390         if (IS_ERR(device)) {
2391                 /* we can safely leave the fs_devices entry around */
2392                 ret = PTR_ERR(device);
2393                 goto error;
2394         }
2395
2396         name = rcu_string_strdup(device_path, GFP_KERNEL);
2397         if (!name) {
2398                 ret = -ENOMEM;
2399                 goto error_free_device;
2400         }
2401         rcu_assign_pointer(device->name, name);
2402
2403         trans = btrfs_start_transaction(root, 0);
2404         if (IS_ERR(trans)) {
2405                 ret = PTR_ERR(trans);
2406                 goto error_free_device;
2407         }
2408
2409         q = bdev_get_queue(bdev);
2410         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2411         device->generation = trans->transid;
2412         device->io_width = fs_info->sectorsize;
2413         device->io_align = fs_info->sectorsize;
2414         device->sector_size = fs_info->sectorsize;
2415         device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2416                                          fs_info->sectorsize);
2417         device->disk_total_bytes = device->total_bytes;
2418         device->commit_total_bytes = device->total_bytes;
2419         device->fs_info = fs_info;
2420         device->bdev = bdev;
2421         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2422         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2423         device->mode = FMODE_EXCL;
2424         device->dev_stats_valid = 1;
2425         set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2426
2427         if (seeding_dev) {
2428                 sb->s_flags &= ~SB_RDONLY;
2429                 ret = btrfs_prepare_sprout(fs_info);
2430                 if (ret) {
2431                         btrfs_abort_transaction(trans, ret);
2432                         goto error_trans;
2433                 }
2434         }
2435
2436         device->fs_devices = fs_devices;
2437
2438         mutex_lock(&fs_devices->device_list_mutex);
2439         mutex_lock(&fs_info->chunk_mutex);
2440         list_add_rcu(&device->dev_list, &fs_devices->devices);
2441         list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2442         fs_devices->num_devices++;
2443         fs_devices->open_devices++;
2444         fs_devices->rw_devices++;
2445         fs_devices->total_devices++;
2446         fs_devices->total_rw_bytes += device->total_bytes;
2447
2448         atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2449
2450         if (!blk_queue_nonrot(q))
2451                 fs_devices->rotating = 1;
2452
2453         orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2454         btrfs_set_super_total_bytes(fs_info->super_copy,
2455                 round_down(orig_super_total_bytes + device->total_bytes,
2456                            fs_info->sectorsize));
2457
2458         orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2459         btrfs_set_super_num_devices(fs_info->super_copy,
2460                                     orig_super_num_devices + 1);
2461
2462         /*
2463          * we've got more storage, clear any full flags on the space
2464          * infos
2465          */
2466         btrfs_clear_space_info_full(fs_info);
2467
2468         mutex_unlock(&fs_info->chunk_mutex);
2469
2470         /* Add sysfs device entry */
2471         btrfs_sysfs_add_device_link(fs_devices, device);
2472
2473         mutex_unlock(&fs_devices->device_list_mutex);
2474
2475         if (seeding_dev) {
2476                 mutex_lock(&fs_info->chunk_mutex);
2477                 ret = init_first_rw_device(trans, fs_info);
2478                 mutex_unlock(&fs_info->chunk_mutex);
2479                 if (ret) {
2480                         btrfs_abort_transaction(trans, ret);
2481                         goto error_sysfs;
2482                 }
2483         }
2484
2485         ret = btrfs_add_dev_item(trans, device);
2486         if (ret) {
2487                 btrfs_abort_transaction(trans, ret);
2488                 goto error_sysfs;
2489         }
2490
2491         if (seeding_dev) {
2492                 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2493
2494                 ret = btrfs_finish_sprout(trans, fs_info);
2495                 if (ret) {
2496                         btrfs_abort_transaction(trans, ret);
2497                         goto error_sysfs;
2498                 }
2499
2500                 /* Sprouting would change fsid of the mounted root,
2501                  * so rename the fsid on the sysfs
2502                  */
2503                 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2504                                                 fs_info->fsid);
2505                 if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
2506                         btrfs_warn(fs_info,
2507                                    "sysfs: failed to create fsid for sprout");
2508         }
2509
2510         ret = btrfs_commit_transaction(trans);
2511
2512         if (seeding_dev) {
2513                 mutex_unlock(&uuid_mutex);
2514                 up_write(&sb->s_umount);
2515                 unlocked = true;
2516
2517                 if (ret) /* transaction commit */
2518                         return ret;
2519
2520                 ret = btrfs_relocate_sys_chunks(fs_info);
2521                 if (ret < 0)
2522                         btrfs_handle_fs_error(fs_info, ret,
2523                                     "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2524                 trans = btrfs_attach_transaction(root);
2525                 if (IS_ERR(trans)) {
2526                         if (PTR_ERR(trans) == -ENOENT)
2527                                 return 0;
2528                         ret = PTR_ERR(trans);
2529                         trans = NULL;
2530                         goto error_sysfs;
2531                 }
2532                 ret = btrfs_commit_transaction(trans);
2533         }
2534
2535         /* Update ctime/mtime for libblkid */
2536         update_dev_time(device_path);
2537         return ret;
2538
2539 error_sysfs:
2540         btrfs_sysfs_rm_device_link(fs_devices, device);
2541         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2542         mutex_lock(&fs_info->chunk_mutex);
2543         list_del_rcu(&device->dev_list);
2544         list_del(&device->dev_alloc_list);
2545         fs_info->fs_devices->num_devices--;
2546         fs_info->fs_devices->open_devices--;
2547         fs_info->fs_devices->rw_devices--;
2548         fs_info->fs_devices->total_devices--;
2549         fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2550         atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2551         btrfs_set_super_total_bytes(fs_info->super_copy,
2552                                     orig_super_total_bytes);
2553         btrfs_set_super_num_devices(fs_info->super_copy,
2554                                     orig_super_num_devices);
2555         mutex_unlock(&fs_info->chunk_mutex);
2556         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2557 error_trans:
2558         if (seeding_dev)
2559                 sb->s_flags |= SB_RDONLY;
2560         if (trans)
2561                 btrfs_end_transaction(trans);
2562 error_free_device:
2563         btrfs_free_device(device);
2564 error:
2565         blkdev_put(bdev, FMODE_EXCL);
2566         if (seeding_dev && !unlocked) {
2567                 mutex_unlock(&uuid_mutex);
2568                 up_write(&sb->s_umount);
2569         }
2570         return ret;
2571 }
2572
2573 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2574                                         struct btrfs_device *device)
2575 {
2576         int ret;
2577         struct btrfs_path *path;
2578         struct btrfs_root *root = device->fs_info->chunk_root;
2579         struct btrfs_dev_item *dev_item;
2580         struct extent_buffer *leaf;
2581         struct btrfs_key key;
2582
2583         path = btrfs_alloc_path();
2584         if (!path)
2585                 return -ENOMEM;
2586
2587         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2588         key.type = BTRFS_DEV_ITEM_KEY;
2589         key.offset = device->devid;
2590
2591         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2592         if (ret < 0)
2593                 goto out;
2594
2595         if (ret > 0) {
2596                 ret = -ENOENT;
2597                 goto out;
2598         }
2599
2600         leaf = path->nodes[0];
2601         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2602
2603         btrfs_set_device_id(leaf, dev_item, device->devid);
2604         btrfs_set_device_type(leaf, dev_item, device->type);
2605         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2606         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2607         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2608         btrfs_set_device_total_bytes(leaf, dev_item,
2609                                      btrfs_device_get_disk_total_bytes(device));
2610         btrfs_set_device_bytes_used(leaf, dev_item,
2611                                     btrfs_device_get_bytes_used(device));
2612         btrfs_mark_buffer_dirty(leaf);
2613
2614 out:
2615         btrfs_free_path(path);
2616         return ret;
2617 }
2618
2619 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2620                       struct btrfs_device *device, u64 new_size)
2621 {
2622         struct btrfs_fs_info *fs_info = device->fs_info;
2623         struct btrfs_super_block *super_copy = fs_info->super_copy;
2624         struct btrfs_fs_devices *fs_devices;
2625         u64 old_total;
2626         u64 diff;
2627
2628         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2629                 return -EACCES;
2630
2631         new_size = round_down(new_size, fs_info->sectorsize);
2632
2633         mutex_lock(&fs_info->chunk_mutex);
2634         old_total = btrfs_super_total_bytes(super_copy);
2635         diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2636
2637         if (new_size <= device->total_bytes ||
2638             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2639                 mutex_unlock(&fs_info->chunk_mutex);
2640                 return -EINVAL;
2641         }
2642
2643         fs_devices = fs_info->fs_devices;
2644
2645         btrfs_set_super_total_bytes(super_copy,
2646                         round_down(old_total + diff, fs_info->sectorsize));
2647         device->fs_devices->total_rw_bytes += diff;
2648
2649         btrfs_device_set_total_bytes(device, new_size);
2650         btrfs_device_set_disk_total_bytes(device, new_size);
2651         btrfs_clear_space_info_full(device->fs_info);
2652         if (list_empty(&device->resized_list))
2653                 list_add_tail(&device->resized_list,
2654                               &fs_devices->resized_devices);
2655         mutex_unlock(&fs_info->chunk_mutex);
2656
2657         return btrfs_update_device(trans, device);
2658 }
2659
2660 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2661 {
2662         struct btrfs_fs_info *fs_info = trans->fs_info;
2663         struct btrfs_root *root = fs_info->chunk_root;
2664         int ret;
2665         struct btrfs_path *path;
2666         struct btrfs_key key;
2667
2668         path = btrfs_alloc_path();
2669         if (!path)
2670                 return -ENOMEM;
2671
2672         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2673         key.offset = chunk_offset;
2674         key.type = BTRFS_CHUNK_ITEM_KEY;
2675
2676         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2677         if (ret < 0)
2678                 goto out;
2679         else if (ret > 0) { /* Logic error or corruption */
2680                 btrfs_handle_fs_error(fs_info, -ENOENT,
2681                                       "Failed lookup while freeing chunk.");
2682                 ret = -ENOENT;
2683                 goto out;
2684         }
2685
2686         ret = btrfs_del_item(trans, root, path);
2687         if (ret < 0)
2688                 btrfs_handle_fs_error(fs_info, ret,
2689                                       "Failed to delete chunk item.");
2690 out:
2691         btrfs_free_path(path);
2692         return ret;
2693 }
2694
2695 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2696 {
2697         struct btrfs_super_block *super_copy = fs_info->super_copy;
2698         struct btrfs_disk_key *disk_key;
2699         struct btrfs_chunk *chunk;
2700         u8 *ptr;
2701         int ret = 0;
2702         u32 num_stripes;
2703         u32 array_size;
2704         u32 len = 0;
2705         u32 cur;
2706         struct btrfs_key key;
2707
2708         mutex_lock(&fs_info->chunk_mutex);
2709         array_size = btrfs_super_sys_array_size(super_copy);
2710
2711         ptr = super_copy->sys_chunk_array;
2712         cur = 0;
2713
2714         while (cur < array_size) {
2715                 disk_key = (struct btrfs_disk_key *)ptr;
2716                 btrfs_disk_key_to_cpu(&key, disk_key);
2717
2718                 len = sizeof(*disk_key);
2719
2720                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2721                         chunk = (struct btrfs_chunk *)(ptr + len);
2722                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2723                         len += btrfs_chunk_item_size(num_stripes);
2724                 } else {
2725                         ret = -EIO;
2726                         break;
2727                 }
2728                 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2729                     key.offset == chunk_offset) {
2730                         memmove(ptr, ptr + len, array_size - (cur + len));
2731                         array_size -= len;
2732                         btrfs_set_super_sys_array_size(super_copy, array_size);
2733                 } else {
2734                         ptr += len;
2735                         cur += len;
2736                 }
2737         }
2738         mutex_unlock(&fs_info->chunk_mutex);
2739         return ret;
2740 }
2741
2742 static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info,
2743                                         u64 logical, u64 length)
2744 {
2745         struct extent_map_tree *em_tree;
2746         struct extent_map *em;
2747
2748         em_tree = &fs_info->mapping_tree.map_tree;
2749         read_lock(&em_tree->lock);
2750         em = lookup_extent_mapping(em_tree, logical, length);
2751         read_unlock(&em_tree->lock);
2752
2753         if (!em) {
2754                 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2755                            logical, length);
2756                 return ERR_PTR(-EINVAL);
2757         }
2758
2759         if (em->start > logical || em->start + em->len < logical) {
2760                 btrfs_crit(fs_info,
2761                            "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2762                            logical, length, em->start, em->start + em->len);
2763                 free_extent_map(em);
2764                 return ERR_PTR(-EINVAL);
2765         }
2766
2767         /* callers are responsible for dropping em's ref. */
2768         return em;
2769 }
2770
2771 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2772 {
2773         struct btrfs_fs_info *fs_info = trans->fs_info;
2774         struct extent_map *em;
2775         struct map_lookup *map;
2776         u64 dev_extent_len = 0;
2777         int i, ret = 0;
2778         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2779
2780         em = get_chunk_map(fs_info, chunk_offset, 1);
2781         if (IS_ERR(em)) {
2782                 /*
2783                  * This is a logic error, but we don't want to just rely on the
2784                  * user having built with ASSERT enabled, so if ASSERT doesn't
2785                  * do anything we still error out.
2786                  */
2787                 ASSERT(0);
2788                 return PTR_ERR(em);
2789         }
2790         map = em->map_lookup;
2791         mutex_lock(&fs_info->chunk_mutex);
2792         check_system_chunk(trans, map->type);
2793         mutex_unlock(&fs_info->chunk_mutex);
2794
2795         /*
2796          * Take the device list mutex to prevent races with the final phase of
2797          * a device replace operation that replaces the device object associated
2798          * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2799          */
2800         mutex_lock(&fs_devices->device_list_mutex);
2801         for (i = 0; i < map->num_stripes; i++) {
2802                 struct btrfs_device *device = map->stripes[i].dev;
2803                 ret = btrfs_free_dev_extent(trans, device,
2804                                             map->stripes[i].physical,
2805                                             &dev_extent_len);
2806                 if (ret) {
2807                         mutex_unlock(&fs_devices->device_list_mutex);
2808                         btrfs_abort_transaction(trans, ret);
2809                         goto out;
2810                 }
2811
2812                 if (device->bytes_used > 0) {
2813                         mutex_lock(&fs_info->chunk_mutex);
2814                         btrfs_device_set_bytes_used(device,
2815                                         device->bytes_used - dev_extent_len);
2816                         atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
2817                         btrfs_clear_space_info_full(fs_info);
2818                         mutex_unlock(&fs_info->chunk_mutex);
2819                 }
2820
2821                 if (map->stripes[i].dev) {
2822                         ret = btrfs_update_device(trans, map->stripes[i].dev);
2823                         if (ret) {
2824                                 mutex_unlock(&fs_devices->device_list_mutex);
2825                                 btrfs_abort_transaction(trans, ret);
2826                                 goto out;
2827                         }
2828                 }
2829         }
2830         mutex_unlock(&fs_devices->device_list_mutex);
2831
2832         ret = btrfs_free_chunk(trans, chunk_offset);
2833         if (ret) {
2834                 btrfs_abort_transaction(trans, ret);
2835                 goto out;
2836         }
2837
2838         trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
2839
2840         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2841                 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
2842                 if (ret) {
2843                         btrfs_abort_transaction(trans, ret);
2844                         goto out;
2845                 }
2846         }
2847
2848         ret = btrfs_remove_block_group(trans, chunk_offset, em);
2849         if (ret) {
2850                 btrfs_abort_transaction(trans, ret);
2851                 goto out;
2852         }
2853
2854 out:
2855         /* once for us */
2856         free_extent_map(em);
2857         return ret;
2858 }
2859
2860 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2861 {
2862         struct btrfs_root *root = fs_info->chunk_root;
2863         struct btrfs_trans_handle *trans;
2864         int ret;
2865
2866         /*
2867          * Prevent races with automatic removal of unused block groups.
2868          * After we relocate and before we remove the chunk with offset
2869          * chunk_offset, automatic removal of the block group can kick in,
2870          * resulting in a failure when calling btrfs_remove_chunk() below.
2871          *
2872          * Make sure to acquire this mutex before doing a tree search (dev
2873          * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2874          * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2875          * we release the path used to search the chunk/dev tree and before
2876          * the current task acquires this mutex and calls us.
2877          */
2878         lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
2879
2880         ret = btrfs_can_relocate(fs_info, chunk_offset);
2881         if (ret)
2882                 return -ENOSPC;
2883
2884         /* step one, relocate all the extents inside this chunk */
2885         btrfs_scrub_pause(fs_info);
2886         ret = btrfs_relocate_block_group(fs_info, chunk_offset);
2887         btrfs_scrub_continue(fs_info);
2888         if (ret)
2889                 return ret;
2890
2891         /*
2892          * We add the kobjects here (and after forcing data chunk creation)
2893          * since relocation is the only place we'll create chunks of a new
2894          * type at runtime.  The only place where we'll remove the last
2895          * chunk of a type is the call immediately below this one.  Even
2896          * so, we're protected against races with the cleaner thread since
2897          * we're covered by the delete_unused_bgs_mutex.
2898          */
2899         btrfs_add_raid_kobjects(fs_info);
2900
2901         trans = btrfs_start_trans_remove_block_group(root->fs_info,
2902                                                      chunk_offset);
2903         if (IS_ERR(trans)) {
2904                 ret = PTR_ERR(trans);
2905                 btrfs_handle_fs_error(root->fs_info, ret, NULL);
2906                 return ret;
2907         }
2908
2909         /*
2910          * step two, delete the device extents and the
2911          * chunk tree entries
2912          */
2913         ret = btrfs_remove_chunk(trans, chunk_offset);
2914         btrfs_end_transaction(trans);
2915         return ret;
2916 }
2917
2918 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
2919 {
2920         struct btrfs_root *chunk_root = fs_info->chunk_root;
2921         struct btrfs_path *path;
2922         struct extent_buffer *leaf;
2923         struct btrfs_chunk *chunk;
2924         struct btrfs_key key;
2925         struct btrfs_key found_key;
2926         u64 chunk_type;
2927         bool retried = false;
2928         int failed = 0;
2929         int ret;
2930
2931         path = btrfs_alloc_path();
2932         if (!path)
2933                 return -ENOMEM;
2934
2935 again:
2936         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2937         key.offset = (u64)-1;
2938         key.type = BTRFS_CHUNK_ITEM_KEY;
2939
2940         while (1) {
2941                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
2942                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2943                 if (ret < 0) {
2944                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2945                         goto error;
2946                 }
2947                 BUG_ON(ret == 0); /* Corruption */
2948
2949                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2950                                           key.type);
2951                 if (ret)
2952                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2953                 if (ret < 0)
2954                         goto error;
2955                 if (ret > 0)
2956                         break;
2957
2958                 leaf = path->nodes[0];
2959                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2960
2961                 chunk = btrfs_item_ptr(leaf, path->slots[0],
2962                                        struct btrfs_chunk);
2963                 chunk_type = btrfs_chunk_type(leaf, chunk);
2964                 btrfs_release_path(path);
2965
2966                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2967                         ret = btrfs_relocate_chunk(fs_info, found_key.offset);
2968                         if (ret == -ENOSPC)
2969                                 failed++;
2970                         else
2971                                 BUG_ON(ret);
2972                 }
2973                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
2974
2975                 if (found_key.offset == 0)
2976                         break;
2977                 key.offset = found_key.offset - 1;
2978         }
2979         ret = 0;
2980         if (failed && !retried) {
2981                 failed = 0;
2982                 retried = true;
2983                 goto again;
2984         } else if (WARN_ON(failed && retried)) {
2985                 ret = -ENOSPC;
2986         }
2987 error:
2988         btrfs_free_path(path);
2989         return ret;
2990 }
2991
2992 /*
2993  * return 1 : allocate a data chunk successfully,
2994  * return <0: errors during allocating a data chunk,
2995  * return 0 : no need to allocate a data chunk.
2996  */
2997 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
2998                                       u64 chunk_offset)
2999 {
3000         struct btrfs_block_group_cache *cache;
3001         u64 bytes_used;
3002         u64 chunk_type;
3003
3004         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3005         ASSERT(cache);
3006         chunk_type = cache->flags;
3007         btrfs_put_block_group(cache);
3008
3009         if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
3010                 spin_lock(&fs_info->data_sinfo->lock);
3011                 bytes_used = fs_info->data_sinfo->bytes_used;
3012                 spin_unlock(&fs_info->data_sinfo->lock);
3013
3014                 if (!bytes_used) {
3015                         struct btrfs_trans_handle *trans;
3016                         int ret;
3017
3018                         trans = btrfs_join_transaction(fs_info->tree_root);
3019                         if (IS_ERR(trans))
3020                                 return PTR_ERR(trans);
3021
3022                         ret = btrfs_force_chunk_alloc(trans,
3023                                                       BTRFS_BLOCK_GROUP_DATA);
3024                         btrfs_end_transaction(trans);
3025                         if (ret < 0)
3026                                 return ret;
3027
3028                         btrfs_add_raid_kobjects(fs_info);
3029
3030                         return 1;
3031                 }
3032         }
3033         return 0;
3034 }
3035
3036 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3037                                struct btrfs_balance_control *bctl)
3038 {
3039         struct btrfs_root *root = fs_info->tree_root;
3040         struct btrfs_trans_handle *trans;
3041         struct btrfs_balance_item *item;
3042         struct btrfs_disk_balance_args disk_bargs;
3043         struct btrfs_path *path;
3044         struct extent_buffer *leaf;
3045         struct btrfs_key key;
3046         int ret, err;
3047
3048         path = btrfs_alloc_path();
3049         if (!path)
3050                 return -ENOMEM;
3051
3052         trans = btrfs_start_transaction(root, 0);
3053         if (IS_ERR(trans)) {
3054                 btrfs_free_path(path);
3055                 return PTR_ERR(trans);
3056         }
3057
3058         key.objectid = BTRFS_BALANCE_OBJECTID;
3059         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3060         key.offset = 0;
3061
3062         ret = btrfs_insert_empty_item(trans, root, path, &key,
3063                                       sizeof(*item));
3064         if (ret)
3065                 goto out;
3066
3067         leaf = path->nodes[0];
3068         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3069
3070         memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3071
3072         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3073         btrfs_set_balance_data(leaf, item, &disk_bargs);
3074         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3075         btrfs_set_balance_meta(leaf, item, &disk_bargs);
3076         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3077         btrfs_set_balance_sys(leaf, item, &disk_bargs);
3078
3079         btrfs_set_balance_flags(leaf, item, bctl->flags);
3080
3081         btrfs_mark_buffer_dirty(leaf);
3082 out:
3083         btrfs_free_path(path);
3084         err = btrfs_commit_transaction(trans);
3085         if (err && !ret)
3086                 ret = err;
3087         return ret;
3088 }
3089
3090 static int del_balance_item(struct btrfs_fs_info *fs_info)
3091 {
3092         struct btrfs_root *root = fs_info->tree_root;
3093         struct btrfs_trans_handle *trans;
3094         struct btrfs_path *path;
3095         struct btrfs_key key;
3096         int ret, err;
3097
3098         path = btrfs_alloc_path();
3099         if (!path)
3100                 return -ENOMEM;
3101
3102         trans = btrfs_start_transaction(root, 0);
3103         if (IS_ERR(trans)) {
3104                 btrfs_free_path(path);
3105                 return PTR_ERR(trans);
3106         }
3107
3108         key.objectid = BTRFS_BALANCE_OBJECTID;
3109         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3110         key.offset = 0;
3111
3112         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3113         if (ret < 0)
3114                 goto out;
3115         if (ret > 0) {
3116                 ret = -ENOENT;
3117                 goto out;
3118         }
3119
3120         ret = btrfs_del_item(trans, root, path);
3121 out:
3122         btrfs_free_path(path);
3123         err = btrfs_commit_transaction(trans);
3124         if (err && !ret)
3125                 ret = err;
3126         return ret;
3127 }
3128
3129 /*
3130  * This is a heuristic used to reduce the number of chunks balanced on
3131  * resume after balance was interrupted.
3132  */
3133 static void update_balance_args(struct btrfs_balance_control *bctl)
3134 {
3135         /*
3136          * Turn on soft mode for chunk types that were being converted.
3137          */
3138         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3139                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3140         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3141                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3142         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3143                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3144
3145         /*
3146          * Turn on usage filter if is not already used.  The idea is
3147          * that chunks that we have already balanced should be
3148          * reasonably full.  Don't do it for chunks that are being
3149          * converted - that will keep us from relocating unconverted
3150          * (albeit full) chunks.
3151          */
3152         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3153             !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3154             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3155                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3156                 bctl->data.usage = 90;
3157         }
3158         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3159             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3160             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3161                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3162                 bctl->sys.usage = 90;
3163         }
3164         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3165             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3166             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3167                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3168                 bctl->meta.usage = 90;
3169         }
3170 }
3171
3172 /*
3173  * Clear the balance status in fs_info and delete the balance item from disk.
3174  */
3175 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3176 {
3177         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3178         int ret;
3179
3180         BUG_ON(!fs_info->balance_ctl);
3181
3182         spin_lock(&fs_info->balance_lock);
3183         fs_info->balance_ctl = NULL;
3184         spin_unlock(&fs_info->balance_lock);
3185
3186         kfree(bctl);
3187         ret = del_balance_item(fs_info);
3188         if (ret)
3189                 btrfs_handle_fs_error(fs_info, ret, NULL);
3190 }
3191
3192 /*
3193  * Balance filters.  Return 1 if chunk should be filtered out
3194  * (should not be balanced).
3195  */
3196 static int chunk_profiles_filter(u64 chunk_type,
3197                                  struct btrfs_balance_args *bargs)
3198 {
3199         chunk_type = chunk_to_extended(chunk_type) &
3200                                 BTRFS_EXTENDED_PROFILE_MASK;
3201
3202         if (bargs->profiles & chunk_type)
3203                 return 0;
3204
3205         return 1;
3206 }
3207
3208 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3209                               struct btrfs_balance_args *bargs)
3210 {
3211         struct btrfs_block_group_cache *cache;
3212         u64 chunk_used;
3213         u64 user_thresh_min;
3214         u64 user_thresh_max;
3215         int ret = 1;
3216
3217         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3218         chunk_used = btrfs_block_group_used(&cache->item);
3219
3220         if (bargs->usage_min == 0)
3221                 user_thresh_min = 0;
3222         else
3223                 user_thresh_min = div_factor_fine(cache->key.offset,
3224                                         bargs->usage_min);
3225
3226         if (bargs->usage_max == 0)
3227                 user_thresh_max = 1;
3228         else if (bargs->usage_max > 100)
3229                 user_thresh_max = cache->key.offset;
3230         else
3231                 user_thresh_max = div_factor_fine(cache->key.offset,
3232                                         bargs->usage_max);
3233
3234         if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3235                 ret = 0;
3236
3237         btrfs_put_block_group(cache);
3238         return ret;
3239 }
3240
3241 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3242                 u64 chunk_offset, struct btrfs_balance_args *bargs)
3243 {
3244         struct btrfs_block_group_cache *cache;
3245         u64 chunk_used, user_thresh;
3246         int ret = 1;
3247
3248         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3249         chunk_used = btrfs_block_group_used(&cache->item);
3250
3251         if (bargs->usage_min == 0)
3252                 user_thresh = 1;
3253         else if (bargs->usage > 100)
3254                 user_thresh = cache->key.offset;
3255         else
3256                 user_thresh = div_factor_fine(cache->key.offset,
3257                                               bargs->usage);
3258
3259         if (chunk_used < user_thresh)
3260                 ret = 0;
3261
3262         btrfs_put_block_group(cache);
3263         return ret;
3264 }
3265
3266 static int chunk_devid_filter(struct extent_buffer *leaf,
3267                               struct btrfs_chunk *chunk,
3268                               struct btrfs_balance_args *bargs)
3269 {
3270         struct btrfs_stripe *stripe;
3271         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3272         int i;
3273
3274         for (i = 0; i < num_stripes; i++) {
3275                 stripe = btrfs_stripe_nr(chunk, i);
3276                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3277                         return 0;
3278         }
3279
3280         return 1;
3281 }
3282
3283 /* [pstart, pend) */
3284 static int chunk_drange_filter(struct extent_buffer *leaf,
3285                                struct btrfs_chunk *chunk,
3286                                struct btrfs_balance_args *bargs)
3287 {
3288         struct btrfs_stripe *stripe;
3289         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3290         u64 stripe_offset;
3291         u64 stripe_length;
3292         int factor;
3293         int i;
3294
3295         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3296                 return 0;
3297
3298         if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
3299              BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
3300                 factor = num_stripes / 2;
3301         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
3302                 factor = num_stripes - 1;
3303         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
3304                 factor = num_stripes - 2;
3305         } else {
3306                 factor = num_stripes;
3307         }
3308
3309         for (i = 0; i < num_stripes; i++) {
3310                 stripe = btrfs_stripe_nr(chunk, i);
3311                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3312                         continue;
3313
3314                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3315                 stripe_length = btrfs_chunk_length(leaf, chunk);
3316                 stripe_length = div_u64(stripe_length, factor);
3317
3318                 if (stripe_offset < bargs->pend &&
3319                     stripe_offset + stripe_length > bargs->pstart)
3320                         return 0;
3321         }
3322
3323         return 1;
3324 }
3325
3326 /* [vstart, vend) */
3327 static int chunk_vrange_filter(struct extent_buffer *leaf,
3328                                struct btrfs_chunk *chunk,
3329                                u64 chunk_offset,
3330                                struct btrfs_balance_args *bargs)
3331 {
3332         if (chunk_offset < bargs->vend &&
3333             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3334                 /* at least part of the chunk is inside this vrange */
3335                 return 0;
3336
3337         return 1;
3338 }
3339
3340 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3341                                struct btrfs_chunk *chunk,
3342                                struct btrfs_balance_args *bargs)
3343 {
3344         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3345
3346         if (bargs->stripes_min <= num_stripes
3347                         && num_stripes <= bargs->stripes_max)
3348                 return 0;
3349
3350         return 1;
3351 }
3352
3353 static int chunk_soft_convert_filter(u64 chunk_type,
3354                                      struct btrfs_balance_args *bargs)
3355 {
3356         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3357                 return 0;
3358
3359         chunk_type = chunk_to_extended(chunk_type) &
3360                                 BTRFS_EXTENDED_PROFILE_MASK;
3361
3362         if (bargs->target == chunk_type)
3363                 return 1;
3364
3365         return 0;
3366 }
3367
3368 static int should_balance_chunk(struct btrfs_fs_info *fs_info,
3369                                 struct extent_buffer *leaf,
3370                                 struct btrfs_chunk *chunk, u64 chunk_offset)
3371 {
3372         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3373         struct btrfs_balance_args *bargs = NULL;
3374         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3375
3376         /* type filter */
3377         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3378               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3379                 return 0;
3380         }
3381
3382         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3383                 bargs = &bctl->data;
3384         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3385                 bargs = &bctl->sys;
3386         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3387                 bargs = &bctl->meta;
3388
3389         /* profiles filter */
3390         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3391             chunk_profiles_filter(chunk_type, bargs)) {
3392                 return 0;
3393         }
3394
3395         /* usage filter */
3396         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3397             chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3398                 return 0;
3399         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3400             chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3401                 return 0;
3402         }
3403
3404         /* devid filter */
3405         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3406             chunk_devid_filter(leaf, chunk, bargs)) {
3407                 return 0;
3408         }
3409
3410         /* drange filter, makes sense only with devid filter */
3411         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3412             chunk_drange_filter(leaf, chunk, bargs)) {
3413                 return 0;
3414         }
3415
3416         /* vrange filter */
3417         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3418             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3419                 return 0;
3420         }
3421
3422         /* stripes filter */
3423         if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3424             chunk_stripes_range_filter(leaf, chunk, bargs)) {
3425                 return 0;
3426         }
3427
3428         /* soft profile changing mode */
3429         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3430             chunk_soft_convert_filter(chunk_type, bargs)) {
3431                 return 0;
3432         }
3433
3434         /*
3435          * limited by count, must be the last filter
3436          */
3437         if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3438                 if (bargs->limit == 0)
3439                         return 0;
3440                 else
3441                         bargs->limit--;
3442         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3443                 /*
3444                  * Same logic as the 'limit' filter; the minimum cannot be
3445                  * determined here because we do not have the global information
3446                  * about the count of all chunks that satisfy the filters.
3447                  */
3448                 if (bargs->limit_max == 0)
3449                         return 0;
3450                 else
3451                         bargs->limit_max--;
3452         }
3453
3454         return 1;
3455 }
3456
3457 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3458 {
3459         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3460         struct btrfs_root *chunk_root = fs_info->chunk_root;
3461         struct btrfs_root *dev_root = fs_info->dev_root;
3462         struct list_head *devices;
3463         struct btrfs_device *device;
3464         u64 old_size;
3465         u64 size_to_free;
3466         u64 chunk_type;
3467         struct btrfs_chunk *chunk;
3468         struct btrfs_path *path = NULL;
3469         struct btrfs_key key;
3470         struct btrfs_key found_key;
3471         struct btrfs_trans_handle *trans;
3472         struct extent_buffer *leaf;
3473         int slot;
3474         int ret;
3475         int enospc_errors = 0;
3476         bool counting = true;
3477         /* The single value limit and min/max limits use the same bytes in the */
3478         u64 limit_data = bctl->data.limit;
3479         u64 limit_meta = bctl->meta.limit;
3480         u64 limit_sys = bctl->sys.limit;
3481         u32 count_data = 0;
3482         u32 count_meta = 0;
3483         u32 count_sys = 0;
3484         int chunk_reserved = 0;
3485
3486         /* step one make some room on all the devices */
3487         devices = &fs_info->fs_devices->devices;
3488         list_for_each_entry(device, devices, dev_list) {
3489                 old_size = btrfs_device_get_total_bytes(device);
3490                 size_to_free = div_factor(old_size, 1);
3491                 size_to_free = min_t(u64, size_to_free, SZ_1M);
3492                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) ||
3493                     btrfs_device_get_total_bytes(device) -
3494                     btrfs_device_get_bytes_used(device) > size_to_free ||
3495                     test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
3496                         continue;
3497
3498                 ret = btrfs_shrink_device(device, old_size - size_to_free);
3499                 if (ret == -ENOSPC)
3500                         break;
3501                 if (ret) {
3502                         /* btrfs_shrink_device never returns ret > 0 */
3503                         WARN_ON(ret > 0);
3504                         goto error;
3505                 }
3506
3507                 trans = btrfs_start_transaction(dev_root, 0);
3508                 if (IS_ERR(trans)) {
3509                         ret = PTR_ERR(trans);
3510                         btrfs_info_in_rcu(fs_info,
3511                  "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu",
3512                                           rcu_str_deref(device->name), ret,
3513                                           old_size, old_size - size_to_free);
3514                         goto error;
3515                 }
3516
3517                 ret = btrfs_grow_device(trans, device, old_size);
3518                 if (ret) {
3519                         btrfs_end_transaction(trans);
3520                         /* btrfs_grow_device never returns ret > 0 */
3521                         WARN_ON(ret > 0);
3522                         btrfs_info_in_rcu(fs_info,
3523                  "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu",
3524                                           rcu_str_deref(device->name), ret,
3525                                           old_size, old_size - size_to_free);
3526                         goto error;
3527                 }
3528
3529                 btrfs_end_transaction(trans);
3530         }
3531
3532         /* step two, relocate all the chunks */
3533         path = btrfs_alloc_path();
3534         if (!path) {
3535                 ret = -ENOMEM;
3536                 goto error;
3537         }
3538
3539         /* zero out stat counters */
3540         spin_lock(&fs_info->balance_lock);
3541         memset(&bctl->stat, 0, sizeof(bctl->stat));
3542         spin_unlock(&fs_info->balance_lock);
3543 again:
3544         if (!counting) {
3545                 /*
3546                  * The single value limit and min/max limits use the same bytes
3547                  * in the
3548                  */
3549                 bctl->data.limit = limit_data;
3550                 bctl->meta.limit = limit_meta;
3551                 bctl->sys.limit = limit_sys;
3552         }
3553         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3554         key.offset = (u64)-1;
3555         key.type = BTRFS_CHUNK_ITEM_KEY;
3556
3557         while (1) {
3558                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3559                     atomic_read(&fs_info->balance_cancel_req)) {
3560                         ret = -ECANCELED;
3561                         goto error;
3562                 }
3563
3564                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3565                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3566                 if (ret < 0) {
3567                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3568                         goto error;
3569                 }
3570
3571                 /*
3572                  * this shouldn't happen, it means the last relocate
3573                  * failed
3574                  */
3575                 if (ret == 0)
3576                         BUG(); /* FIXME break ? */
3577
3578                 ret = btrfs_previous_item(chunk_root, path, 0,
3579                                           BTRFS_CHUNK_ITEM_KEY);
3580                 if (ret) {
3581                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3582                         ret = 0;
3583                         break;
3584                 }
3585
3586                 leaf = path->nodes[0];
3587                 slot = path->slots[0];
3588                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3589
3590                 if (found_key.objectid != key.objectid) {
3591                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3592                         break;
3593                 }
3594
3595                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3596                 chunk_type = btrfs_chunk_type(leaf, chunk);
3597
3598                 if (!counting) {
3599                         spin_lock(&fs_info->balance_lock);
3600                         bctl->stat.considered++;
3601                         spin_unlock(&fs_info->balance_lock);
3602                 }
3603
3604                 ret = should_balance_chunk(fs_info, leaf, chunk,
3605                                            found_key.offset);
3606
3607                 btrfs_release_path(path);
3608                 if (!ret) {
3609                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3610                         goto loop;
3611                 }
3612
3613                 if (counting) {
3614                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3615                         spin_lock(&fs_info->balance_lock);
3616                         bctl->stat.expected++;
3617                         spin_unlock(&fs_info->balance_lock);
3618
3619                         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3620                                 count_data++;
3621                         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3622                                 count_sys++;
3623                         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3624                                 count_meta++;
3625
3626                         goto loop;
3627                 }
3628
3629                 /*
3630                  * Apply limit_min filter, no need to check if the LIMITS
3631                  * filter is used, limit_min is 0 by default
3632                  */
3633                 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3634                                         count_data < bctl->data.limit_min)
3635                                 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3636                                         count_meta < bctl->meta.limit_min)
3637                                 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3638                                         count_sys < bctl->sys.limit_min)) {
3639                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3640                         goto loop;
3641                 }
3642
3643                 if (!chunk_reserved) {
3644                         /*
3645                          * We may be relocating the only data chunk we have,
3646                          * which could potentially end up with losing data's
3647                          * raid profile, so lets allocate an empty one in
3648                          * advance.
3649                          */
3650                         ret = btrfs_may_alloc_data_chunk(fs_info,
3651                                                          found_key.offset);
3652                         if (ret < 0) {
3653                                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3654                                 goto error;
3655                         } else if (ret == 1) {
3656                                 chunk_reserved = 1;
3657                         }
3658                 }
3659
3660                 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3661                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3662                 if (ret && ret != -ENOSPC)
3663                         goto error;
3664                 if (ret == -ENOSPC) {
3665                         enospc_errors++;
3666                 } else {
3667                         spin_lock(&fs_info->balance_lock);
3668                         bctl->stat.completed++;
3669                         spin_unlock(&fs_info->balance_lock);
3670                 }
3671 loop:
3672                 if (found_key.offset == 0)
3673                         break;
3674                 key.offset = found_key.offset - 1;
3675         }
3676
3677         if (counting) {
3678                 btrfs_release_path(path);
3679                 counting = false;
3680                 goto again;
3681         }
3682 error:
3683         btrfs_free_path(path);
3684         if (enospc_errors) {
3685                 btrfs_info(fs_info, "%d enospc errors during balance",
3686                            enospc_errors);
3687                 if (!ret)
3688                         ret = -ENOSPC;
3689         }
3690
3691         return ret;
3692 }
3693
3694 /**
3695  * alloc_profile_is_valid - see if a given profile is valid and reduced
3696  * @flags: profile to validate
3697  * @extended: if true @flags is treated as an extended profile
3698  */
3699 static int alloc_profile_is_valid(u64 flags, int extended)
3700 {
3701         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3702                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3703
3704         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3705
3706         /* 1) check that all other bits are zeroed */
3707         if (flags & ~mask)
3708                 return 0;
3709
3710         /* 2) see if profile is reduced */
3711         if (flags == 0)
3712                 return !extended; /* "0" is valid for usual profiles */
3713
3714         /* true if exactly one bit set */
3715         return (flags & (flags - 1)) == 0;
3716 }
3717
3718 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3719 {
3720         /* cancel requested || normal exit path */
3721         return atomic_read(&fs_info->balance_cancel_req) ||
3722                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3723                  atomic_read(&fs_info->balance_cancel_req) == 0);
3724 }
3725
3726 /* Non-zero return value signifies invalidity */
3727 static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
3728                 u64 allowed)
3729 {
3730         return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3731                 (!alloc_profile_is_valid(bctl_arg->target, 1) ||
3732                  (bctl_arg->target & ~allowed)));
3733 }
3734
3735 /*
3736  * Should be called with balance mutexe held
3737  */
3738 int btrfs_balance(struct btrfs_fs_info *fs_info,
3739                   struct btrfs_balance_control *bctl,
3740                   struct btrfs_ioctl_balance_args *bargs)
3741 {
3742         u64 meta_target, data_target;
3743         u64 allowed;
3744         int mixed = 0;
3745         int ret;
3746         u64 num_devices;
3747         unsigned seq;
3748         bool reducing_integrity;
3749
3750         if (btrfs_fs_closing(fs_info) ||
3751             atomic_read(&fs_info->balance_pause_req) ||
3752             atomic_read(&fs_info->balance_cancel_req)) {
3753                 ret = -EINVAL;
3754                 goto out;
3755         }
3756
3757         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3758         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3759                 mixed = 1;
3760
3761         /*
3762          * In case of mixed groups both data and meta should be picked,
3763          * and identical options should be given for both of them.
3764          */
3765         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3766         if (mixed && (bctl->flags & allowed)) {
3767                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3768                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3769                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3770                         btrfs_err(fs_info,
3771           "balance: mixed groups data and metadata options must be the same");
3772                         ret = -EINVAL;
3773                         goto out;
3774                 }
3775         }
3776
3777         num_devices = fs_info->fs_devices->num_devices;
3778         btrfs_dev_replace_read_lock(&fs_info->dev_replace);
3779         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3780                 BUG_ON(num_devices < 1);
3781                 num_devices--;
3782         }
3783         btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
3784         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
3785         if (num_devices > 1)
3786                 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3787         if (num_devices > 2)
3788                 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3789         if (num_devices > 3)
3790                 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3791                             BTRFS_BLOCK_GROUP_RAID6);
3792         if (validate_convert_profile(&bctl->data, allowed)) {
3793                 int index = btrfs_bg_flags_to_raid_index(bctl->data.target);
3794
3795                 btrfs_err(fs_info,
3796                           "balance: invalid convert data profile %s",
3797                           get_raid_name(index));
3798                 ret = -EINVAL;
3799                 goto out;
3800         }
3801         if (validate_convert_profile(&bctl->meta, allowed)) {
3802                 int index = btrfs_bg_flags_to_raid_index(bctl->meta.target);
3803
3804                 btrfs_err(fs_info,
3805                           "balance: invalid convert metadata profile %s",
3806                           get_raid_name(index));
3807                 ret = -EINVAL;
3808                 goto out;
3809         }
3810         if (validate_convert_profile(&bctl->sys, allowed)) {
3811                 int index = btrfs_bg_flags_to_raid_index(bctl->sys.target);
3812
3813                 btrfs_err(fs_info,
3814                           "balance: invalid convert system profile %s",
3815                           get_raid_name(index));
3816                 ret = -EINVAL;
3817                 goto out;
3818         }
3819
3820         /* allow to reduce meta or sys integrity only if force set */
3821         allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3822                         BTRFS_BLOCK_GROUP_RAID10 |
3823                         BTRFS_BLOCK_GROUP_RAID5 |
3824                         BTRFS_BLOCK_GROUP_RAID6;
3825         do {
3826                 seq = read_seqbegin(&fs_info->profiles_lock);
3827
3828                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3829                      (fs_info->avail_system_alloc_bits & allowed) &&
3830                      !(bctl->sys.target & allowed)) ||
3831                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3832                      (fs_info->avail_metadata_alloc_bits & allowed) &&
3833                      !(bctl->meta.target & allowed)))
3834                         reducing_integrity = true;
3835                 else
3836                         reducing_integrity = false;
3837
3838                 /* if we're not converting, the target field is uninitialized */
3839                 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3840                         bctl->meta.target : fs_info->avail_metadata_alloc_bits;
3841                 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
3842                         bctl->data.target : fs_info->avail_data_alloc_bits;
3843         } while (read_seqretry(&fs_info->profiles_lock, seq));
3844
3845         if (reducing_integrity) {
3846                 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3847                         btrfs_info(fs_info,
3848                                    "balance: force reducing metadata integrity");
3849                 } else {
3850                         btrfs_err(fs_info,
3851           "balance: reduces metadata integrity, use --force if you want this");
3852                         ret = -EINVAL;
3853                         goto out;
3854                 }
3855         }
3856
3857         if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
3858                 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
3859                 int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
3860                 int data_index = btrfs_bg_flags_to_raid_index(data_target);
3861
3862                 btrfs_warn(fs_info,
3863         "balance: metadata profile %s has lower redundancy than data profile %s",
3864                            get_raid_name(meta_index), get_raid_name(data_index));
3865         }
3866
3867         ret = insert_balance_item(fs_info, bctl);
3868         if (ret && ret != -EEXIST)
3869                 goto out;
3870
3871         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3872                 BUG_ON(ret == -EEXIST);
3873                 BUG_ON(fs_info->balance_ctl);
3874                 spin_lock(&fs_info->balance_lock);
3875                 fs_info->balance_ctl = bctl;
3876                 spin_unlock(&fs_info->balance_lock);
3877         } else {
3878                 BUG_ON(ret != -EEXIST);
3879                 spin_lock(&fs_info->balance_lock);
3880                 update_balance_args(bctl);
3881                 spin_unlock(&fs_info->balance_lock);
3882         }
3883
3884         ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
3885         set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
3886         mutex_unlock(&fs_info->balance_mutex);
3887
3888         ret = __btrfs_balance(fs_info);
3889
3890         mutex_lock(&fs_info->balance_mutex);
3891         clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
3892
3893         if (bargs) {
3894                 memset(bargs, 0, sizeof(*bargs));
3895                 btrfs_update_ioctl_balance_args(fs_info, bargs);
3896         }
3897
3898         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3899             balance_need_close(fs_info)) {
3900                 reset_balance_state(fs_info);
3901                 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3902         }
3903
3904         wake_up(&fs_info->balance_wait_q);
3905
3906         return ret;
3907 out:
3908         if (bctl->flags & BTRFS_BALANCE_RESUME)
3909                 reset_balance_state(fs_info);
3910         else
3911                 kfree(bctl);
3912         clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
3913
3914         return ret;
3915 }
3916
3917 static int balance_kthread(void *data)
3918 {
3919         struct btrfs_fs_info *fs_info = data;
3920         int ret = 0;
3921
3922         mutex_lock(&fs_info->balance_mutex);
3923         if (fs_info->balance_ctl) {
3924                 btrfs_info(fs_info, "balance: resuming");
3925                 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
3926         }
3927         mutex_unlock(&fs_info->balance_mutex);
3928
3929         return ret;
3930 }
3931
3932 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3933 {
3934         struct task_struct *tsk;
3935
3936         mutex_lock(&fs_info->balance_mutex);
3937         if (!fs_info->balance_ctl) {
3938                 mutex_unlock(&fs_info->balance_mutex);
3939                 return 0;
3940         }
3941         mutex_unlock(&fs_info->balance_mutex);
3942
3943         if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
3944                 btrfs_info(fs_info, "balance: resume skipped");
3945                 return 0;
3946         }
3947
3948         /*
3949          * A ro->rw remount sequence should continue with the paused balance
3950          * regardless of who pauses it, system or the user as of now, so set
3951          * the resume flag.
3952          */
3953         spin_lock(&fs_info->balance_lock);
3954         fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
3955         spin_unlock(&fs_info->balance_lock);
3956
3957         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3958         return PTR_ERR_OR_ZERO(tsk);
3959 }
3960
3961 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3962 {
3963         struct btrfs_balance_control *bctl;
3964         struct btrfs_balance_item *item;
3965         struct btrfs_disk_balance_args disk_bargs;
3966         struct btrfs_path *path;
3967         struct extent_buffer *leaf;
3968         struct btrfs_key key;
3969         int ret;
3970
3971         path = btrfs_alloc_path();
3972         if (!path)
3973                 return -ENOMEM;
3974
3975         key.objectid = BTRFS_BALANCE_OBJECTID;
3976         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3977         key.offset = 0;
3978
3979         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3980         if (ret < 0)
3981                 goto out;
3982         if (ret > 0) { /* ret = -ENOENT; */
3983                 ret = 0;
3984                 goto out;
3985         }
3986
3987         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3988         if (!bctl) {
3989                 ret = -ENOMEM;
3990                 goto out;
3991         }
3992
3993         leaf = path->nodes[0];
3994         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3995
3996         bctl->flags = btrfs_balance_flags(leaf, item);
3997         bctl->flags |= BTRFS_BALANCE_RESUME;
3998
3999         btrfs_balance_data(leaf, item, &disk_bargs);
4000         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4001         btrfs_balance_meta(leaf, item, &disk_bargs);
4002         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4003         btrfs_balance_sys(leaf, item, &disk_bargs);
4004         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4005
4006         /*
4007          * This should never happen, as the paused balance state is recovered
4008          * during mount without any chance of other exclusive ops to collide.
4009          *
4010          * This gives the exclusive op status to balance and keeps in paused
4011          * state until user intervention (cancel or umount). If the ownership
4012          * cannot be assigned, show a message but do not fail. The balance
4013          * is in a paused state and must have fs_info::balance_ctl properly
4014          * set up.
4015          */
4016         if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
4017                 btrfs_warn(fs_info,
4018         "balance: cannot set exclusive op status, resume manually");
4019
4020         btrfs_release_path(path);
4021
4022         mutex_lock(&fs_info->balance_mutex);
4023         BUG_ON(fs_info->balance_ctl);
4024         spin_lock(&fs_info->balance_lock);
4025         fs_info->balance_ctl = bctl;
4026         spin_unlock(&fs_info->balance_lock);
4027         mutex_unlock(&fs_info->balance_mutex);
4028 out:
4029         btrfs_free_path(path);
4030         return ret;
4031 }
4032
4033 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4034 {
4035         int ret = 0;
4036
4037         mutex_lock(&fs_info->balance_mutex);
4038         if (!fs_info->balance_ctl) {
4039                 mutex_unlock(&fs_info->balance_mutex);
4040                 return -ENOTCONN;
4041         }
4042
4043         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4044                 atomic_inc(&fs_info->balance_pause_req);
4045                 mutex_unlock(&fs_info->balance_mutex);
4046
4047                 wait_event(fs_info->balance_wait_q,
4048                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4049
4050                 mutex_lock(&fs_info->balance_mutex);
4051                 /* we are good with balance_ctl ripped off from under us */
4052                 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4053                 atomic_dec(&fs_info->balance_pause_req);
4054         } else {
4055                 ret = -ENOTCONN;
4056         }
4057
4058         mutex_unlock(&fs_info->balance_mutex);
4059         return ret;
4060 }
4061
4062 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4063 {
4064         mutex_lock(&fs_info->balance_mutex);
4065         if (!fs_info->balance_ctl) {
4066                 mutex_unlock(&fs_info->balance_mutex);
4067                 return -ENOTCONN;
4068         }
4069
4070         /*
4071          * A paused balance with the item stored on disk can be resumed at
4072          * mount time if the mount is read-write. Otherwise it's still paused
4073          * and we must not allow cancelling as it deletes the item.
4074          */
4075         if (sb_rdonly(fs_info->sb)) {
4076                 mutex_unlock(&fs_info->balance_mutex);
4077                 return -EROFS;
4078         }
4079
4080         atomic_inc(&fs_info->balance_cancel_req);
4081         /*
4082          * if we are running just wait and return, balance item is
4083          * deleted in btrfs_balance in this case
4084          */
4085         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4086                 mutex_unlock(&fs_info->balance_mutex);
4087                 wait_event(fs_info->balance_wait_q,
4088                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4089                 mutex_lock(&fs_info->balance_mutex);
4090         } else {
4091                 mutex_unlock(&fs_info->balance_mutex);
4092                 /*
4093                  * Lock released to allow other waiters to continue, we'll
4094                  * reexamine the status again.
4095                  */
4096                 mutex_lock(&fs_info->balance_mutex);
4097
4098                 if (fs_info->balance_ctl) {
4099                         reset_balance_state(fs_info);
4100                         clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
4101                         btrfs_info(fs_info, "balance: canceled");
4102                 }
4103         }
4104
4105         BUG_ON(fs_info->balance_ctl ||
4106                 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4107         atomic_dec(&fs_info->balance_cancel_req);
4108         mutex_unlock(&fs_info->balance_mutex);
4109         return 0;
4110 }
4111
4112 static int btrfs_uuid_scan_kthread(void *data)
4113 {
4114         struct btrfs_fs_info *fs_info = data;
4115         struct btrfs_root *root = fs_info->tree_root;
4116         struct btrfs_key key;
4117         struct btrfs_path *path = NULL;
4118         int ret = 0;
4119         struct extent_buffer *eb;
4120         int slot;
4121         struct btrfs_root_item root_item;
4122         u32 item_size;
4123         struct btrfs_trans_handle *trans = NULL;
4124
4125         path = btrfs_alloc_path();
4126         if (!path) {
4127                 ret = -ENOMEM;
4128                 goto out;
4129         }
4130
4131         key.objectid = 0;
4132         key.type = BTRFS_ROOT_ITEM_KEY;
4133         key.offset = 0;
4134
4135         while (1) {
4136                 ret = btrfs_search_forward(root, &key, path,
4137                                 BTRFS_OLDEST_GENERATION);
4138                 if (ret) {
4139                         if (ret > 0)
4140                                 ret = 0;
4141                         break;
4142                 }
4143
4144                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4145                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4146                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4147                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
4148                         goto skip;
4149
4150                 eb = path->nodes[0];
4151                 slot = path->slots[0];
4152                 item_size = btrfs_item_size_nr(eb, slot);
4153                 if (item_size < sizeof(root_item))
4154                         goto skip;
4155
4156                 read_extent_buffer(eb, &root_item,
4157                                    btrfs_item_ptr_offset(eb, slot),
4158                                    (int)sizeof(root_item));
4159                 if (btrfs_root_refs(&root_item) == 0)
4160                         goto skip;
4161
4162                 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4163                     !btrfs_is_empty_uuid(root_item.received_uuid)) {
4164                         if (trans)
4165                                 goto update_tree;
4166
4167                         btrfs_release_path(path);
4168                         /*
4169                          * 1 - subvol uuid item
4170                          * 1 - received_subvol uuid item
4171                          */
4172                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4173                         if (IS_ERR(trans)) {
4174                                 ret = PTR_ERR(trans);
4175                                 break;
4176                         }
4177                         continue;
4178                 } else {
4179                         goto skip;
4180                 }
4181 update_tree:
4182                 btrfs_release_path(path);
4183                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4184                         ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4185                                                   BTRFS_UUID_KEY_SUBVOL,
4186                                                   key.objectid);
4187                         if (ret < 0) {
4188                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4189                                         ret);
4190                                 break;
4191                         }
4192                 }
4193
4194                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4195                         ret = btrfs_uuid_tree_add(trans,
4196                                                   root_item.received_uuid,
4197                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4198                                                   key.objectid);
4199                         if (ret < 0) {
4200                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4201                                         ret);
4202                                 break;
4203                         }
4204                 }
4205
4206 skip:
4207                 btrfs_release_path(path);
4208                 if (trans) {
4209                         ret = btrfs_end_transaction(trans);
4210                         trans = NULL;
4211                         if (ret)
4212                                 break;
4213                 }
4214
4215                 if (key.offset < (u64)-1) {
4216                         key.offset++;
4217                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4218                         key.offset = 0;
4219                         key.type = BTRFS_ROOT_ITEM_KEY;
4220                 } else if (key.objectid < (u64)-1) {
4221                         key.offset = 0;
4222                         key.type = BTRFS_ROOT_ITEM_KEY;
4223                         key.objectid++;
4224                 } else {
4225                         break;
4226                 }
4227                 cond_resched();
4228         }
4229
4230 out:
4231         btrfs_free_path(path);
4232         if (trans && !IS_ERR(trans))
4233                 btrfs_end_transaction(trans);
4234         if (ret)
4235                 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4236         else
4237                 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4238         up(&fs_info->uuid_tree_rescan_sem);
4239         return 0;
4240 }
4241
4242 /*
4243  * Callback for btrfs_uuid_tree_iterate().
4244  * returns:
4245  * 0    check succeeded, the entry is not outdated.
4246  * < 0  if an error occurred.
4247  * > 0  if the check failed, which means the caller shall remove the entry.
4248  */
4249 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
4250                                        u8 *uuid, u8 type, u64 subid)
4251 {
4252         struct btrfs_key key;
4253         int ret = 0;
4254         struct btrfs_root *subvol_root;
4255
4256         if (type != BTRFS_UUID_KEY_SUBVOL &&
4257             type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
4258                 goto out;
4259
4260         key.objectid = subid;
4261         key.type = BTRFS_ROOT_ITEM_KEY;
4262         key.offset = (u64)-1;
4263         subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
4264         if (IS_ERR(subvol_root)) {
4265                 ret = PTR_ERR(subvol_root);
4266                 if (ret == -ENOENT)
4267                         ret = 1;
4268                 goto out;
4269         }
4270
4271         switch (type) {
4272         case BTRFS_UUID_KEY_SUBVOL:
4273                 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
4274                         ret = 1;
4275                 break;
4276         case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
4277                 if (memcmp(uuid, subvol_root->root_item.received_uuid,
4278                            BTRFS_UUID_SIZE))
4279                         ret = 1;
4280                 break;
4281         }
4282
4283 out:
4284         return ret;
4285 }
4286
4287 static int btrfs_uuid_rescan_kthread(void *data)
4288 {
4289         struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
4290         int ret;
4291
4292         /*
4293          * 1st step is to iterate through the existing UUID tree and
4294          * to delete all entries that contain outdated data.
4295          * 2nd step is to add all missing entries to the UUID tree.
4296          */
4297         ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
4298         if (ret < 0) {
4299                 btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
4300                 up(&fs_info->uuid_tree_rescan_sem);
4301                 return ret;
4302         }
4303         return btrfs_uuid_scan_kthread(data);
4304 }
4305
4306 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4307 {
4308         struct btrfs_trans_handle *trans;
4309         struct btrfs_root *tree_root = fs_info->tree_root;
4310         struct btrfs_root *uuid_root;
4311         struct task_struct *task;
4312         int ret;
4313
4314         /*
4315          * 1 - root node
4316          * 1 - root item
4317          */
4318         trans = btrfs_start_transaction(tree_root, 2);
4319         if (IS_ERR(trans))
4320                 return PTR_ERR(trans);
4321
4322         uuid_root = btrfs_create_tree(trans, fs_info,
4323                                       BTRFS_UUID_TREE_OBJECTID);
4324         if (IS_ERR(uuid_root)) {
4325                 ret = PTR_ERR(uuid_root);
4326                 btrfs_abort_transaction(trans, ret);
4327                 btrfs_end_transaction(trans);
4328                 return ret;
4329         }
4330
4331         fs_info->uuid_root = uuid_root;
4332
4333         ret = btrfs_commit_transaction(trans);
4334         if (ret)
4335                 return ret;
4336
4337         down(&fs_info->uuid_tree_rescan_sem);
4338         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4339         if (IS_ERR(task)) {
4340                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4341                 btrfs_warn(fs_info, "failed to start uuid_scan task");
4342                 up(&fs_info->uuid_tree_rescan_sem);
4343                 return PTR_ERR(task);
4344         }
4345
4346         return 0;
4347 }
4348
4349 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
4350 {
4351         struct task_struct *task;
4352
4353         down(&fs_info->uuid_tree_rescan_sem);
4354         task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
4355         if (IS_ERR(task)) {
4356                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4357                 btrfs_warn(fs_info, "failed to start uuid_rescan task");
4358                 up(&fs_info->uuid_tree_rescan_sem);
4359                 return PTR_ERR(task);
4360         }
4361
4362         return 0;
4363 }
4364
4365 /*
4366  * shrinking a device means finding all of the device extents past
4367  * the new size, and then following the back refs to the chunks.
4368  * The chunk relocation code actually frees the device extent
4369  */
4370 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4371 {
4372         struct btrfs_fs_info *fs_info = device->fs_info;
4373         struct btrfs_root *root = fs_info->dev_root;
4374         struct btrfs_trans_handle *trans;
4375         struct btrfs_dev_extent *dev_extent = NULL;
4376         struct btrfs_path *path;
4377         u64 length;
4378         u64 chunk_offset;
4379         int ret;
4380         int slot;
4381         int failed = 0;
4382         bool retried = false;
4383         bool checked_pending_chunks = false;
4384         struct extent_buffer *l;
4385         struct btrfs_key key;
4386         struct btrfs_super_block *super_copy = fs_info->super_copy;
4387         u64 old_total = btrfs_super_total_bytes(super_copy);
4388         u64 old_size = btrfs_device_get_total_bytes(device);
4389         u64 diff;
4390
4391         new_size = round_down(new_size, fs_info->sectorsize);
4392         diff = round_down(old_size - new_size, fs_info->sectorsize);
4393
4394         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4395                 return -EINVAL;
4396
4397         path = btrfs_alloc_path();
4398         if (!path)
4399                 return -ENOMEM;
4400
4401         path->reada = READA_BACK;
4402
4403         mutex_lock(&fs_info->chunk_mutex);
4404
4405         btrfs_device_set_total_bytes(device, new_size);
4406         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4407                 device->fs_devices->total_rw_bytes -= diff;
4408                 atomic64_sub(diff, &fs_info->free_chunk_space);
4409         }
4410         mutex_unlock(&fs_info->chunk_mutex);
4411
4412 again:
4413         key.objectid = device->devid;
4414         key.offset = (u64)-1;
4415         key.type = BTRFS_DEV_EXTENT_KEY;
4416
4417         do {
4418                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4419                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4420                 if (ret < 0) {
4421                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4422                         goto done;
4423                 }
4424
4425                 ret = btrfs_previous_item(root, path, 0, key.type);
4426                 if (ret)
4427                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4428                 if (ret < 0)
4429                         goto done;
4430                 if (ret) {
4431                         ret = 0;
4432                         btrfs_release_path(path);
4433                         break;
4434                 }
4435
4436                 l = path->nodes[0];
4437                 slot = path->slots[0];
4438                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4439
4440                 if (key.objectid != device->devid) {
4441                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4442                         btrfs_release_path(path);
4443                         break;
4444                 }
4445
4446                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4447                 length = btrfs_dev_extent_length(l, dev_extent);
4448
4449                 if (key.offset + length <= new_size) {
4450                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4451                         btrfs_release_path(path);
4452                         break;
4453                 }
4454
4455                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4456                 btrfs_release_path(path);
4457
4458                 /*
4459                  * We may be relocating the only data chunk we have,
4460                  * which could potentially end up with losing data's
4461                  * raid profile, so lets allocate an empty one in
4462                  * advance.
4463                  */
4464                 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4465                 if (ret < 0) {
4466                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4467                         goto done;
4468                 }
4469
4470                 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4471                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4472                 if (ret && ret != -ENOSPC)
4473                         goto done;
4474                 if (ret == -ENOSPC)
4475                         failed++;
4476         } while (key.offset-- > 0);
4477
4478         if (failed && !retried) {
4479                 failed = 0;
4480                 retried = true;
4481                 goto again;
4482         } else if (failed && retried) {
4483                 ret = -ENOSPC;
4484                 goto done;
4485         }
4486
4487         /* Shrinking succeeded, else we would be at "done". */
4488         trans = btrfs_start_transaction(root, 0);
4489         if (IS_ERR(trans)) {
4490                 ret = PTR_ERR(trans);
4491                 goto done;
4492         }
4493
4494         mutex_lock(&fs_info->chunk_mutex);
4495
4496         /*
4497          * We checked in the above loop all device extents that were already in
4498          * the device tree. However before we have updated the device's
4499          * total_bytes to the new size, we might have had chunk allocations that
4500          * have not complete yet (new block groups attached to transaction
4501          * handles), and therefore their device extents were not yet in the
4502          * device tree and we missed them in the loop above. So if we have any
4503          * pending chunk using a device extent that overlaps the device range
4504          * that we can not use anymore, commit the current transaction and
4505          * repeat the search on the device tree - this way we guarantee we will
4506          * not have chunks using device extents that end beyond 'new_size'.
4507          */
4508         if (!checked_pending_chunks) {
4509                 u64 start = new_size;
4510                 u64 len = old_size - new_size;
4511
4512                 if (contains_pending_extent(trans->transaction, device,
4513                                             &start, len)) {
4514                         mutex_unlock(&fs_info->chunk_mutex);
4515                         checked_pending_chunks = true;
4516                         failed = 0;
4517                         retried = false;
4518                         ret = btrfs_commit_transaction(trans);
4519                         if (ret)
4520                                 goto done;
4521                         goto again;
4522                 }
4523         }
4524
4525         btrfs_device_set_disk_total_bytes(device, new_size);
4526         if (list_empty(&device->resized_list))
4527                 list_add_tail(&device->resized_list,
4528                               &fs_info->fs_devices->resized_devices);
4529
4530         WARN_ON(diff > old_total);
4531         btrfs_set_super_total_bytes(super_copy,
4532                         round_down(old_total - diff, fs_info->sectorsize));
4533         mutex_unlock(&fs_info->chunk_mutex);
4534
4535         /* Now btrfs_update_device() will change the on-disk size. */
4536         ret = btrfs_update_device(trans, device);
4537         if (ret < 0) {
4538                 btrfs_abort_transaction(trans, ret);
4539                 btrfs_end_transaction(trans);
4540         } else {
4541                 ret = btrfs_commit_transaction(trans);
4542         }
4543 done:
4544         btrfs_free_path(path);
4545         if (ret) {
4546                 mutex_lock(&fs_info->chunk_mutex);
4547                 btrfs_device_set_total_bytes(device, old_size);
4548                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4549                         device->fs_devices->total_rw_bytes += diff;
4550                 atomic64_add(diff, &fs_info->free_chunk_space);
4551                 mutex_unlock(&fs_info->chunk_mutex);
4552         }
4553         return ret;
4554 }
4555
4556 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4557                            struct btrfs_key *key,
4558                            struct btrfs_chunk *chunk, int item_size)
4559 {
4560         struct btrfs_super_block *super_copy = fs_info->super_copy;
4561         struct btrfs_disk_key disk_key;
4562         u32 array_size;
4563         u8 *ptr;
4564
4565         mutex_lock(&fs_info->chunk_mutex);
4566         array_size = btrfs_super_sys_array_size(super_copy);
4567         if (array_size + item_size + sizeof(disk_key)
4568                         > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4569                 mutex_unlock(&fs_info->chunk_mutex);
4570                 return -EFBIG;
4571         }
4572
4573         ptr = super_copy->sys_chunk_array + array_size;
4574         btrfs_cpu_key_to_disk(&disk_key, key);
4575         memcpy(ptr, &disk_key, sizeof(disk_key));
4576         ptr += sizeof(disk_key);
4577         memcpy(ptr, chunk, item_size);
4578         item_size += sizeof(disk_key);
4579         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4580         mutex_unlock(&fs_info->chunk_mutex);
4581
4582         return 0;
4583 }
4584
4585 /*
4586  * sort the devices in descending order by max_avail, total_avail
4587  */
4588 static int btrfs_cmp_device_info(const void *a, const void *b)
4589 {
4590         const struct btrfs_device_info *di_a = a;
4591         const struct btrfs_device_info *di_b = b;
4592
4593         if (di_a->max_avail > di_b->max_avail)
4594                 return -1;
4595         if (di_a->max_avail < di_b->max_avail)
4596                 return 1;
4597         if (di_a->total_avail > di_b->total_avail)
4598                 return -1;
4599         if (di_a->total_avail < di_b->total_avail)
4600                 return 1;
4601         return 0;
4602 }
4603
4604 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4605 {
4606         if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4607                 return;
4608
4609         btrfs_set_fs_incompat(info, RAID56);
4610 }
4611
4612 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4613                                u64 start, u64 type)
4614 {
4615         struct btrfs_fs_info *info = trans->fs_info;
4616         struct btrfs_fs_devices *fs_devices = info->fs_devices;
4617         struct btrfs_device *device;
4618         struct map_lookup *map = NULL;
4619         struct extent_map_tree *em_tree;
4620         struct extent_map *em;
4621         struct btrfs_device_info *devices_info = NULL;
4622         u64 total_avail;
4623         int num_stripes;        /* total number of stripes to allocate */
4624         int data_stripes;       /* number of stripes that count for
4625                                    block group size */
4626         int sub_stripes;        /* sub_stripes info for map */
4627         int dev_stripes;        /* stripes per dev */
4628         int devs_max;           /* max devs to use */
4629         int devs_min;           /* min devs needed */
4630         int devs_increment;     /* ndevs has to be a multiple of this */
4631         int ncopies;            /* how many copies to data has */
4632         int ret;
4633         u64 max_stripe_size;
4634         u64 max_chunk_size;
4635         u64 stripe_size;
4636         u64 num_bytes;
4637         int ndevs;
4638         int i;
4639         int j;
4640         int index;
4641
4642         BUG_ON(!alloc_profile_is_valid(type, 0));
4643
4644         if (list_empty(&fs_devices->alloc_list)) {
4645                 if (btrfs_test_opt(info, ENOSPC_DEBUG))
4646                         btrfs_debug(info, "%s: no writable device", __func__);
4647                 return -ENOSPC;
4648         }
4649
4650         index = btrfs_bg_flags_to_raid_index(type);
4651
4652         sub_stripes = btrfs_raid_array[index].sub_stripes;
4653         dev_stripes = btrfs_raid_array[index].dev_stripes;
4654         devs_max = btrfs_raid_array[index].devs_max;
4655         devs_min = btrfs_raid_array[index].devs_min;
4656         devs_increment = btrfs_raid_array[index].devs_increment;
4657         ncopies = btrfs_raid_array[index].ncopies;
4658
4659         if (type & BTRFS_BLOCK_GROUP_DATA) {
4660                 max_stripe_size = SZ_1G;
4661                 max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4662                 if (!devs_max)
4663                         devs_max = BTRFS_MAX_DEVS(info);
4664         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4665                 /* for larger filesystems, use larger metadata chunks */
4666                 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4667                         max_stripe_size = SZ_1G;
4668                 else
4669                         max_stripe_size = SZ_256M;
4670                 max_chunk_size = max_stripe_size;
4671                 if (!devs_max)
4672                         devs_max = BTRFS_MAX_DEVS(info);
4673         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4674                 max_stripe_size = SZ_32M;
4675                 max_chunk_size = 2 * max_stripe_size;
4676                 if (!devs_max)
4677                         devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
4678         } else {
4679                 btrfs_err(info, "invalid chunk type 0x%llx requested",
4680                        type);
4681                 BUG_ON(1);
4682         }
4683
4684         /* we don't want a chunk larger than 10% of writeable space */
4685         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4686                              max_chunk_size);
4687
4688         devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
4689                                GFP_NOFS);
4690         if (!devices_info)
4691                 return -ENOMEM;
4692
4693         /*
4694          * in the first pass through the devices list, we gather information
4695          * about the available holes on each device.
4696          */
4697         ndevs = 0;
4698         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4699                 u64 max_avail;
4700                 u64 dev_offset;
4701
4702                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4703                         WARN(1, KERN_ERR
4704                                "BTRFS: read-only device in alloc_list\n");
4705                         continue;
4706                 }
4707
4708                 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
4709                                         &device->dev_state) ||
4710                     test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4711                         continue;
4712
4713                 if (device->total_bytes > device->bytes_used)
4714                         total_avail = device->total_bytes - device->bytes_used;
4715                 else
4716                         total_avail = 0;
4717
4718                 /* If there is no space on this device, skip it. */
4719                 if (total_avail == 0)
4720                         continue;
4721
4722                 ret = find_free_dev_extent(trans, device,
4723                                            max_stripe_size * dev_stripes,
4724                                            &dev_offset, &max_avail);
4725                 if (ret && ret != -ENOSPC)
4726                         goto error;
4727
4728                 if (ret == 0)
4729                         max_avail = max_stripe_size * dev_stripes;
4730
4731                 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) {
4732                         if (btrfs_test_opt(info, ENOSPC_DEBUG))
4733                                 btrfs_debug(info,
4734                         "%s: devid %llu has no free space, have=%llu want=%u",
4735                                             __func__, device->devid, max_avail,
4736                                             BTRFS_STRIPE_LEN * dev_stripes);
4737                         continue;
4738                 }
4739
4740                 if (ndevs == fs_devices->rw_devices) {
4741                         WARN(1, "%s: found more than %llu devices\n",
4742                              __func__, fs_devices->rw_devices);
4743                         break;
4744                 }
4745                 devices_info[ndevs].dev_offset = dev_offset;
4746                 devices_info[ndevs].max_avail = max_avail;
4747                 devices_info[ndevs].total_avail = total_avail;
4748                 devices_info[ndevs].dev = device;
4749                 ++ndevs;
4750         }
4751
4752         /*
4753          * now sort the devices by hole size / available space
4754          */
4755         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4756              btrfs_cmp_device_info, NULL);
4757
4758         /* round down to number of usable stripes */
4759         ndevs = round_down(ndevs, devs_increment);
4760
4761         if (ndevs < devs_min) {
4762                 ret = -ENOSPC;
4763                 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
4764                         btrfs_debug(info,
4765         "%s: not enough devices with free space: have=%d minimum required=%d",
4766                                     __func__, ndevs, devs_min);
4767                 }
4768                 goto error;
4769         }
4770
4771         ndevs = min(ndevs, devs_max);
4772
4773         /*
4774          * The primary goal is to maximize the number of stripes, so use as
4775          * many devices as possible, even if the stripes are not maximum sized.
4776          *
4777          * The DUP profile stores more than one stripe per device, the
4778          * max_avail is the total size so we have to adjust.
4779          */
4780         stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
4781         num_stripes = ndevs * dev_stripes;
4782
4783         /*
4784          * this will have to be fixed for RAID1 and RAID10 over
4785          * more drives
4786          */
4787         data_stripes = num_stripes / ncopies;
4788
4789         if (type & BTRFS_BLOCK_GROUP_RAID5)
4790                 data_stripes = num_stripes - 1;
4791
4792         if (type & BTRFS_BLOCK_GROUP_RAID6)
4793                 data_stripes = num_stripes - 2;
4794
4795         /*
4796          * Use the number of data stripes to figure out how big this chunk
4797          * is really going to be in terms of logical address space,
4798          * and compare that answer with the max chunk size. If it's higher,
4799          * we try to reduce stripe_size.
4800          */
4801         if (stripe_size * data_stripes > max_chunk_size) {
4802                 /*
4803                  * Reduce stripe_size, round it up to a 16MB boundary again and
4804                  * then use it, unless it ends up being even bigger than the
4805                  * previous value we had already.
4806                  */
4807                 stripe_size = min(round_up(div_u64(max_chunk_size,
4808                                                    data_stripes), SZ_16M),
4809                                   stripe_size);
4810         }
4811
4812         /* align to BTRFS_STRIPE_LEN */
4813         stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
4814
4815         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4816         if (!map) {
4817                 ret = -ENOMEM;
4818                 goto error;
4819         }
4820         map->num_stripes = num_stripes;
4821
4822         for (i = 0; i < ndevs; ++i) {
4823                 for (j = 0; j < dev_stripes; ++j) {
4824                         int s = i * dev_stripes + j;
4825                         map->stripes[s].dev = devices_info[i].dev;
4826                         map->stripes[s].physical = devices_info[i].dev_offset +
4827                                                    j * stripe_size;
4828                 }
4829         }
4830         map->stripe_len = BTRFS_STRIPE_LEN;
4831         map->io_align = BTRFS_STRIPE_LEN;
4832         map->io_width = BTRFS_STRIPE_LEN;
4833         map->type = type;
4834         map->sub_stripes = sub_stripes;
4835
4836         num_bytes = stripe_size * data_stripes;
4837
4838         trace_btrfs_chunk_alloc(info, map, start, num_bytes);
4839
4840         em = alloc_extent_map();
4841         if (!em) {
4842                 kfree(map);
4843                 ret = -ENOMEM;
4844                 goto error;
4845         }
4846         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
4847         em->map_lookup = map;
4848         em->start = start;
4849         em->len = num_bytes;
4850         em->block_start = 0;
4851         em->block_len = em->len;
4852         em->orig_block_len = stripe_size;
4853
4854         em_tree = &info->mapping_tree.map_tree;
4855         write_lock(&em_tree->lock);
4856         ret = add_extent_mapping(em_tree, em, 0);
4857         if (ret) {
4858                 write_unlock(&em_tree->lock);
4859                 free_extent_map(em);
4860                 goto error;
4861         }
4862
4863         list_add_tail(&em->list, &trans->transaction->pending_chunks);
4864         refcount_inc(&em->refs);
4865         write_unlock(&em_tree->lock);
4866
4867         ret = btrfs_make_block_group(trans, 0, type, start, num_bytes);
4868         if (ret)
4869                 goto error_del_extent;
4870
4871         for (i = 0; i < map->num_stripes; i++) {
4872                 num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
4873                 btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
4874                 map->stripes[i].dev->has_pending_chunks = true;
4875         }
4876
4877         atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
4878
4879         free_extent_map(em);
4880         check_raid56_incompat_flag(info, type);
4881
4882         kfree(devices_info);
4883         return 0;
4884
4885 error_del_extent:
4886         write_lock(&em_tree->lock);
4887         remove_extent_mapping(em_tree, em);
4888         write_unlock(&em_tree->lock);
4889
4890         /* One for our allocation */
4891         free_extent_map(em);
4892         /* One for the tree reference */
4893         free_extent_map(em);
4894         /* One for the pending_chunks list reference */
4895         free_extent_map(em);
4896 error:
4897         kfree(devices_info);
4898         return ret;
4899 }
4900
4901 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4902                              u64 chunk_offset, u64 chunk_size)
4903 {
4904         struct btrfs_fs_info *fs_info = trans->fs_info;
4905         struct btrfs_root *extent_root = fs_info->extent_root;
4906         struct btrfs_root *chunk_root = fs_info->chunk_root;
4907         struct btrfs_key key;
4908         struct btrfs_device *device;
4909         struct btrfs_chunk *chunk;
4910         struct btrfs_stripe *stripe;
4911         struct extent_map *em;
4912         struct map_lookup *map;
4913         size_t item_size;
4914         u64 dev_offset;
4915         u64 stripe_size;
4916         int i = 0;
4917         int ret = 0;
4918
4919         em = get_chunk_map(fs_info, chunk_offset, chunk_size);
4920         if (IS_ERR(em))
4921                 return PTR_ERR(em);
4922
4923         map = em->map_lookup;
4924         item_size = btrfs_chunk_item_size(map->num_stripes);
4925         stripe_size = em->orig_block_len;
4926
4927         chunk = kzalloc(item_size, GFP_NOFS);
4928         if (!chunk) {
4929                 ret = -ENOMEM;
4930                 goto out;
4931         }
4932
4933         /*
4934          * Take the device list mutex to prevent races with the final phase of
4935          * a device replace operation that replaces the device object associated
4936          * with the map's stripes, because the device object's id can change
4937          * at any time during that final phase of the device replace operation
4938          * (dev-replace.c:btrfs_dev_replace_finishing()).
4939          */
4940         mutex_lock(&fs_info->fs_devices->device_list_mutex);
4941         for (i = 0; i < map->num_stripes; i++) {
4942                 device = map->stripes[i].dev;
4943                 dev_offset = map->stripes[i].physical;
4944
4945                 ret = btrfs_update_device(trans, device);
4946                 if (ret)
4947                         break;
4948                 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
4949                                              dev_offset, stripe_size);
4950                 if (ret)
4951                         break;
4952         }
4953         if (ret) {
4954                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4955                 goto out;
4956         }
4957
4958         stripe = &chunk->stripe;
4959         for (i = 0; i < map->num_stripes; i++) {
4960                 device = map->stripes[i].dev;
4961                 dev_offset = map->stripes[i].physical;
4962
4963                 btrfs_set_stack_stripe_devid(stripe, device->devid);
4964                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4965                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4966                 stripe++;
4967         }
4968         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4969
4970         btrfs_set_stack_chunk_length(chunk, chunk_size);
4971         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4972         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4973         btrfs_set_stack_chunk_type(chunk, map->type);
4974         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4975         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4976         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4977         btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
4978         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4979
4980         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4981         key.type = BTRFS_CHUNK_ITEM_KEY;
4982         key.offset = chunk_offset;
4983
4984         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4985         if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4986                 /*
4987                  * TODO: Cleanup of inserted chunk root in case of
4988                  * failure.
4989                  */
4990                 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
4991         }
4992
4993 out:
4994         kfree(chunk);
4995         free_extent_map(em);
4996         return ret;
4997 }
4998
4999 /*
5000  * Chunk allocation falls into two parts. The first part does works
5001  * that make the new allocated chunk useable, but not do any operation
5002  * that modifies the chunk tree. The second part does the works that
5003  * require modifying the chunk tree. This division is important for the
5004  * bootstrap process of adding storage to a seed btrfs.
5005  */
5006 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5007 {
5008         u64 chunk_offset;
5009
5010         lockdep_assert_held(&trans->fs_info->chunk_mutex);
5011         chunk_offset = find_next_chunk(trans->fs_info);
5012         return __btrfs_alloc_chunk(trans, chunk_offset, type);
5013 }
5014
5015 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
5016                                          struct btrfs_fs_info *fs_info)
5017 {
5018         u64 chunk_offset;
5019         u64 sys_chunk_offset;
5020         u64 alloc_profile;
5021         int ret;
5022
5023         chunk_offset = find_next_chunk(fs_info);
5024         alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5025         ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
5026         if (ret)
5027                 return ret;
5028
5029         sys_chunk_offset = find_next_chunk(fs_info);
5030         alloc_profile = btrfs_system_alloc_profile(fs_info);
5031         ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
5032         return ret;
5033 }
5034
5035 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5036 {
5037         int max_errors;
5038
5039         if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5040                          BTRFS_BLOCK_GROUP_RAID10 |
5041                          BTRFS_BLOCK_GROUP_RAID5)) {
5042                 max_errors = 1;
5043         } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5044                 max_errors = 2;
5045         } else {
5046                 max_errors = 0;
5047         }
5048
5049         return max_errors;
5050 }
5051
5052 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5053 {
5054         struct extent_map *em;
5055         struct map_lookup *map;
5056         int readonly = 0;
5057         int miss_ndevs = 0;
5058         int i;
5059
5060         em = get_chunk_map(fs_info, chunk_offset, 1);
5061         if (IS_ERR(em))
5062                 return 1;
5063
5064         map = em->map_lookup;
5065         for (i = 0; i < map->num_stripes; i++) {
5066                 if (test_bit(BTRFS_DEV_STATE_MISSING,
5067                                         &map->stripes[i].dev->dev_state)) {
5068                         miss_ndevs++;
5069                         continue;
5070                 }
5071                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5072                                         &map->stripes[i].dev->dev_state)) {
5073                         readonly = 1;
5074                         goto end;
5075                 }
5076         }
5077
5078         /*
5079          * If the number of missing devices is larger than max errors,
5080          * we can not write the data into that chunk successfully, so
5081          * set it readonly.
5082          */
5083         if (miss_ndevs > btrfs_chunk_max_errors(map))
5084                 readonly = 1;
5085 end:
5086         free_extent_map(em);
5087         return readonly;
5088 }
5089
5090 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
5091 {
5092         extent_map_tree_init(&tree->map_tree);
5093 }
5094
5095 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
5096 {
5097         struct extent_map *em;
5098
5099         while (1) {
5100                 write_lock(&tree->map_tree.lock);
5101                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
5102                 if (em)
5103                         remove_extent_mapping(&tree->map_tree, em);
5104                 write_unlock(&tree->map_tree.lock);
5105                 if (!em)
5106                         break;
5107                 /* once for us */
5108                 free_extent_map(em);
5109                 /* once for the tree */
5110                 free_extent_map(em);
5111         }
5112 }
5113
5114 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5115 {
5116         struct extent_map *em;
5117         struct map_lookup *map;
5118         int ret;
5119
5120         em = get_chunk_map(fs_info, logical, len);
5121         if (IS_ERR(em))
5122                 /*
5123                  * We could return errors for these cases, but that could get
5124                  * ugly and we'd probably do the same thing which is just not do
5125                  * anything else and exit, so return 1 so the callers don't try
5126                  * to use other copies.
5127                  */
5128                 return 1;
5129
5130         map = em->map_lookup;
5131         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
5132                 ret = map->num_stripes;
5133         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5134                 ret = map->sub_stripes;
5135         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5136                 ret = 2;
5137         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5138                 /*
5139                  * There could be two corrupted data stripes, we need
5140                  * to loop retry in order to rebuild the correct data.
5141                  *
5142                  * Fail a stripe at a time on every retry except the
5143                  * stripe under reconstruction.
5144                  */
5145                 ret = map->num_stripes;
5146         else
5147                 ret = 1;
5148         free_extent_map(em);
5149
5150         btrfs_dev_replace_read_lock(&fs_info->dev_replace);
5151         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5152             fs_info->dev_replace.tgtdev)
5153                 ret++;
5154         btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
5155
5156         return ret;
5157 }
5158
5159 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5160                                     u64 logical)
5161 {
5162         struct extent_map *em;
5163         struct map_lookup *map;
5164         unsigned long len = fs_info->sectorsize;
5165
5166         em = get_chunk_map(fs_info, logical, len);
5167
5168         if (!WARN_ON(IS_ERR(em))) {
5169                 map = em->map_lookup;
5170                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5171                         len = map->stripe_len * nr_data_stripes(map);
5172                 free_extent_map(em);
5173         }
5174         return len;
5175 }
5176
5177 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5178 {
5179         struct extent_map *em;
5180         struct map_lookup *map;
5181         int ret = 0;
5182
5183         em = get_chunk_map(fs_info, logical, len);
5184
5185         if(!WARN_ON(IS_ERR(em))) {
5186                 map = em->map_lookup;
5187                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5188                         ret = 1;
5189                 free_extent_map(em);
5190         }
5191         return ret;
5192 }
5193
5194 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5195                             struct map_lookup *map, int first,
5196                             int dev_replace_is_ongoing)
5197 {
5198         int i;
5199         int num_stripes;
5200         int preferred_mirror;
5201         int tolerance;
5202         struct btrfs_device *srcdev;
5203
5204         ASSERT((map->type &
5205                  (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)));
5206
5207         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5208                 num_stripes = map->sub_stripes;
5209         else
5210                 num_stripes = map->num_stripes;
5211
5212         preferred_mirror = first + current->pid % num_stripes;
5213
5214         if (dev_replace_is_ongoing &&
5215             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5216              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5217                 srcdev = fs_info->dev_replace.srcdev;
5218         else
5219                 srcdev = NULL;
5220
5221         /*
5222          * try to avoid the drive that is the source drive for a
5223          * dev-replace procedure, only choose it if no other non-missing
5224          * mirror is available
5225          */
5226         for (tolerance = 0; tolerance < 2; tolerance++) {
5227                 if (map->stripes[preferred_mirror].dev->bdev &&
5228                     (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5229                         return preferred_mirror;
5230                 for (i = first; i < first + num_stripes; i++) {
5231                         if (map->stripes[i].dev->bdev &&
5232                             (tolerance || map->stripes[i].dev != srcdev))
5233                                 return i;
5234                 }
5235         }
5236
5237         /* we couldn't find one that doesn't fail.  Just return something
5238          * and the io error handling code will clean up eventually
5239          */
5240         return preferred_mirror;
5241 }
5242
5243 static inline int parity_smaller(u64 a, u64 b)
5244 {
5245         return a > b;
5246 }
5247
5248 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5249 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5250 {
5251         struct btrfs_bio_stripe s;
5252         int i;
5253         u64 l;
5254         int again = 1;
5255
5256         while (again) {
5257                 again = 0;
5258                 for (i = 0; i < num_stripes - 1; i++) {
5259                         if (parity_smaller(bbio->raid_map[i],
5260                                            bbio->raid_map[i+1])) {
5261                                 s = bbio->stripes[i];
5262                                 l = bbio->raid_map[i];
5263                                 bbio->stripes[i] = bbio->stripes[i+1];
5264                                 bbio->raid_map[i] = bbio->raid_map[i+1];
5265                                 bbio->stripes[i+1] = s;
5266                                 bbio->raid_map[i+1] = l;
5267
5268                                 again = 1;
5269                         }
5270                 }
5271         }
5272 }
5273
5274 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5275 {
5276         struct btrfs_bio *bbio = kzalloc(
5277                  /* the size of the btrfs_bio */
5278                 sizeof(struct btrfs_bio) +
5279                 /* plus the variable array for the stripes */
5280                 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5281                 /* plus the variable array for the tgt dev */
5282                 sizeof(int) * (real_stripes) +
5283                 /*
5284                  * plus the raid_map, which includes both the tgt dev
5285                  * and the stripes
5286                  */
5287                 sizeof(u64) * (total_stripes),
5288                 GFP_NOFS|__GFP_NOFAIL);
5289
5290         atomic_set(&bbio->error, 0);
5291         refcount_set(&bbio->refs, 1);
5292
5293         return bbio;
5294 }
5295
5296 void btrfs_get_bbio(struct btrfs_bio *bbio)
5297 {
5298         WARN_ON(!refcount_read(&bbio->refs));
5299         refcount_inc(&bbio->refs);
5300 }
5301
5302 void btrfs_put_bbio(struct btrfs_bio *bbio)
5303 {
5304         if (!bbio)
5305                 return;
5306         if (refcount_dec_and_test(&bbio->refs))
5307                 kfree(bbio);
5308 }
5309
5310 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5311 /*
5312  * Please note that, discard won't be sent to target device of device
5313  * replace.
5314  */
5315 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5316                                          u64 logical, u64 length,
5317                                          struct btrfs_bio **bbio_ret)
5318 {
5319         struct extent_map *em;
5320         struct map_lookup *map;
5321         struct btrfs_bio *bbio;
5322         u64 offset;
5323         u64 stripe_nr;
5324         u64 stripe_nr_end;
5325         u64 stripe_end_offset;
5326         u64 stripe_cnt;
5327         u64 stripe_len;
5328         u64 stripe_offset;
5329         u64 num_stripes;
5330         u32 stripe_index;
5331         u32 factor = 0;
5332         u32 sub_stripes = 0;
5333         u64 stripes_per_dev = 0;
5334         u32 remaining_stripes = 0;
5335         u32 last_stripe = 0;
5336         int ret = 0;
5337         int i;
5338
5339         /* discard always return a bbio */
5340         ASSERT(bbio_ret);
5341
5342         em = get_chunk_map(fs_info, logical, length);
5343         if (IS_ERR(em))
5344                 return PTR_ERR(em);
5345
5346         map = em->map_lookup;
5347         /* we don't discard raid56 yet */
5348         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5349                 ret = -EOPNOTSUPP;
5350                 goto out;
5351         }
5352
5353         offset = logical - em->start;
5354         length = min_t(u64, em->len - offset, length);
5355
5356         stripe_len = map->stripe_len;
5357         /*
5358          * stripe_nr counts the total number of stripes we have to stride
5359          * to get to this block
5360          */
5361         stripe_nr = div64_u64(offset, stripe_len);
5362
5363         /* stripe_offset is the offset of this block in its stripe */
5364         stripe_offset = offset - stripe_nr * stripe_len;
5365
5366         stripe_nr_end = round_up(offset + length, map->stripe_len);
5367         stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5368         stripe_cnt = stripe_nr_end - stripe_nr;
5369         stripe_end_offset = stripe_nr_end * map->stripe_len -
5370                             (offset + length);
5371         /*
5372          * after this, stripe_nr is the number of stripes on this
5373          * device we have to walk to find the data, and stripe_index is
5374          * the number of our device in the stripe array
5375          */
5376         num_stripes = 1;
5377         stripe_index = 0;
5378         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5379                          BTRFS_BLOCK_GROUP_RAID10)) {
5380                 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5381                         sub_stripes = 1;
5382                 else
5383                         sub_stripes = map->sub_stripes;
5384
5385                 factor = map->num_stripes / sub_stripes;
5386                 num_stripes = min_t(u64, map->num_stripes,
5387                                     sub_stripes * stripe_cnt);
5388                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5389                 stripe_index *= sub_stripes;
5390                 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5391                                               &remaining_stripes);
5392                 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5393                 last_stripe *= sub_stripes;
5394         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5395                                 BTRFS_BLOCK_GROUP_DUP)) {
5396                 num_stripes = map->num_stripes;
5397         } else {
5398                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5399                                         &stripe_index);
5400         }
5401
5402         bbio = alloc_btrfs_bio(num_stripes, 0);
5403         if (!bbio) {
5404                 ret = -ENOMEM;
5405                 goto out;
5406         }
5407
5408         for (i = 0; i < num_stripes; i++) {
5409                 bbio->stripes[i].physical =
5410                         map->stripes[stripe_index].physical +
5411                         stripe_offset + stripe_nr * map->stripe_len;
5412                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5413
5414                 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5415                                  BTRFS_BLOCK_GROUP_RAID10)) {
5416                         bbio->stripes[i].length = stripes_per_dev *
5417                                 map->stripe_len;
5418
5419                         if (i / sub_stripes < remaining_stripes)
5420                                 bbio->stripes[i].length +=
5421                                         map->stripe_len;
5422
5423                         /*
5424                          * Special for the first stripe and
5425                          * the last stripe:
5426                          *
5427                          * |-------|...|-------|
5428                          *     |----------|
5429                          *    off     end_off
5430                          */
5431                         if (i < sub_stripes)
5432                                 bbio->stripes[i].length -=
5433                                         stripe_offset;
5434
5435                         if (stripe_index >= last_stripe &&
5436                             stripe_index <= (last_stripe +
5437                                              sub_stripes - 1))
5438                                 bbio->stripes[i].length -=
5439                                         stripe_end_offset;
5440
5441                         if (i == sub_stripes - 1)
5442                                 stripe_offset = 0;
5443                 } else {
5444                         bbio->stripes[i].length = length;
5445                 }
5446
5447                 stripe_index++;
5448                 if (stripe_index == map->num_stripes) {
5449                         stripe_index = 0;
5450                         stripe_nr++;
5451                 }
5452         }
5453
5454         *bbio_ret = bbio;
5455         bbio->map_type = map->type;
5456         bbio->num_stripes = num_stripes;
5457 out:
5458         free_extent_map(em);
5459         return ret;
5460 }
5461
5462 /*
5463  * In dev-replace case, for repair case (that's the only case where the mirror
5464  * is selected explicitly when calling btrfs_map_block), blocks left of the
5465  * left cursor can also be read from the target drive.
5466  *
5467  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5468  * array of stripes.
5469  * For READ, it also needs to be supported using the same mirror number.
5470  *
5471  * If the requested block is not left of the left cursor, EIO is returned. This
5472  * can happen because btrfs_num_copies() returns one more in the dev-replace
5473  * case.
5474  */
5475 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5476                                          u64 logical, u64 length,
5477                                          u64 srcdev_devid, int *mirror_num,
5478                                          u64 *physical)
5479 {
5480         struct btrfs_bio *bbio = NULL;
5481         int num_stripes;
5482         int index_srcdev = 0;
5483         int found = 0;
5484         u64 physical_of_found = 0;
5485         int i;
5486         int ret = 0;
5487
5488         ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5489                                 logical, &length, &bbio, 0, 0);
5490         if (ret) {
5491                 ASSERT(bbio == NULL);
5492                 return ret;
5493         }
5494
5495         num_stripes = bbio->num_stripes;
5496         if (*mirror_num > num_stripes) {
5497                 /*
5498                  * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5499                  * that means that the requested area is not left of the left
5500                  * cursor
5501                  */
5502                 btrfs_put_bbio(bbio);
5503                 return -EIO;
5504         }
5505
5506         /*
5507          * process the rest of the function using the mirror_num of the source
5508          * drive. Therefore look it up first.  At the end, patch the device
5509          * pointer to the one of the target drive.
5510          */
5511         for (i = 0; i < num_stripes; i++) {
5512                 if (bbio->stripes[i].dev->devid != srcdev_devid)
5513                         continue;
5514
5515                 /*
5516                  * In case of DUP, in order to keep it simple, only add the
5517                  * mirror with the lowest physical address
5518                  */
5519                 if (found &&
5520                     physical_of_found <= bbio->stripes[i].physical)
5521                         continue;
5522
5523                 index_srcdev = i;
5524                 found = 1;
5525                 physical_of_found = bbio->stripes[i].physical;
5526         }
5527
5528         btrfs_put_bbio(bbio);
5529
5530         ASSERT(found);
5531         if (!found)
5532                 return -EIO;
5533
5534         *mirror_num = index_srcdev + 1;
5535         *physical = physical_of_found;
5536         return ret;
5537 }
5538
5539 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5540                                       struct btrfs_bio **bbio_ret,
5541                                       struct btrfs_dev_replace *dev_replace,
5542                                       int *num_stripes_ret, int *max_errors_ret)
5543 {
5544         struct btrfs_bio *bbio = *bbio_ret;
5545         u64 srcdev_devid = dev_replace->srcdev->devid;
5546         int tgtdev_indexes = 0;
5547         int num_stripes = *num_stripes_ret;
5548         int max_errors = *max_errors_ret;
5549         int i;
5550
5551         if (op == BTRFS_MAP_WRITE) {
5552                 int index_where_to_add;
5553
5554                 /*
5555                  * duplicate the write operations while the dev replace
5556                  * procedure is running. Since the copying of the old disk to
5557                  * the new disk takes place at run time while the filesystem is
5558                  * mounted writable, the regular write operations to the old
5559                  * disk have to be duplicated to go to the new disk as well.
5560                  *
5561                  * Note that device->missing is handled by the caller, and that
5562                  * the write to the old disk is already set up in the stripes
5563                  * array.
5564                  */
5565                 index_where_to_add = num_stripes;
5566                 for (i = 0; i < num_stripes; i++) {
5567                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5568                                 /* write to new disk, too */
5569                                 struct btrfs_bio_stripe *new =
5570                                         bbio->stripes + index_where_to_add;
5571                                 struct btrfs_bio_stripe *old =
5572                                         bbio->stripes + i;
5573
5574                                 new->physical = old->physical;
5575                                 new->length = old->length;
5576                                 new->dev = dev_replace->tgtdev;
5577                                 bbio->tgtdev_map[i] = index_where_to_add;
5578                                 index_where_to_add++;
5579                                 max_errors++;
5580                                 tgtdev_indexes++;
5581                         }
5582                 }
5583                 num_stripes = index_where_to_add;
5584         } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5585                 int index_srcdev = 0;
5586                 int found = 0;
5587                 u64 physical_of_found = 0;
5588
5589                 /*
5590                  * During the dev-replace procedure, the target drive can also
5591                  * be used to read data in case it is needed to repair a corrupt
5592                  * block elsewhere. This is possible if the requested area is
5593                  * left of the left cursor. In this area, the target drive is a
5594                  * full copy of the source drive.
5595                  */
5596                 for (i = 0; i < num_stripes; i++) {
5597                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5598                                 /*
5599                                  * In case of DUP, in order to keep it simple,
5600                                  * only add the mirror with the lowest physical
5601                                  * address
5602                                  */
5603                                 if (found &&
5604                                     physical_of_found <=
5605                                      bbio->stripes[i].physical)
5606                                         continue;
5607                                 index_srcdev = i;
5608                                 found = 1;
5609                                 physical_of_found = bbio->stripes[i].physical;
5610                         }
5611                 }
5612                 if (found) {
5613                         struct btrfs_bio_stripe *tgtdev_stripe =
5614                                 bbio->stripes + num_stripes;
5615
5616                         tgtdev_stripe->physical = physical_of_found;
5617                         tgtdev_stripe->length =
5618                                 bbio->stripes[index_srcdev].length;
5619                         tgtdev_stripe->dev = dev_replace->tgtdev;
5620                         bbio->tgtdev_map[index_srcdev] = num_stripes;
5621
5622                         tgtdev_indexes++;
5623                         num_stripes++;
5624                 }
5625         }
5626
5627         *num_stripes_ret = num_stripes;
5628         *max_errors_ret = max_errors;
5629         bbio->num_tgtdevs = tgtdev_indexes;
5630         *bbio_ret = bbio;
5631 }
5632
5633 static bool need_full_stripe(enum btrfs_map_op op)
5634 {
5635         return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5636 }
5637
5638 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
5639                              enum btrfs_map_op op,
5640                              u64 logical, u64 *length,
5641                              struct btrfs_bio **bbio_ret,
5642                              int mirror_num, int need_raid_map)
5643 {
5644         struct extent_map *em;
5645         struct map_lookup *map;
5646         u64 offset;
5647         u64 stripe_offset;
5648         u64 stripe_nr;
5649         u64 stripe_len;
5650         u32 stripe_index;
5651         int i;
5652         int ret = 0;
5653         int num_stripes;
5654         int max_errors = 0;
5655         int tgtdev_indexes = 0;
5656         struct btrfs_bio *bbio = NULL;
5657         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
5658         int dev_replace_is_ongoing = 0;
5659         int num_alloc_stripes;
5660         int patch_the_first_stripe_for_dev_replace = 0;
5661         u64 physical_to_patch_in_first_stripe = 0;
5662         u64 raid56_full_stripe_start = (u64)-1;
5663
5664         if (op == BTRFS_MAP_DISCARD)
5665                 return __btrfs_map_block_for_discard(fs_info, logical,
5666                                                      *length, bbio_ret);
5667
5668         em = get_chunk_map(fs_info, logical, *length);
5669         if (IS_ERR(em))
5670                 return PTR_ERR(em);
5671
5672         map = em->map_lookup;
5673         offset = logical - em->start;
5674
5675         stripe_len = map->stripe_len;
5676         stripe_nr = offset;
5677         /*
5678          * stripe_nr counts the total number of stripes we have to stride
5679          * to get to this block
5680          */
5681         stripe_nr = div64_u64(stripe_nr, stripe_len);
5682
5683         stripe_offset = stripe_nr * stripe_len;
5684         if (offset < stripe_offset) {
5685                 btrfs_crit(fs_info,
5686                            "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
5687                            stripe_offset, offset, em->start, logical,
5688                            stripe_len);
5689                 free_extent_map(em);
5690                 return -EINVAL;
5691         }
5692
5693         /* stripe_offset is the offset of this block in its stripe*/
5694         stripe_offset = offset - stripe_offset;
5695
5696         /* if we're here for raid56, we need to know the stripe aligned start */
5697         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5698                 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
5699                 raid56_full_stripe_start = offset;
5700
5701                 /* allow a write of a full stripe, but make sure we don't
5702                  * allow straddling of stripes
5703                  */
5704                 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5705                                 full_stripe_len);
5706                 raid56_full_stripe_start *= full_stripe_len;
5707         }
5708
5709         if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5710                 u64 max_len;
5711                 /* For writes to RAID[56], allow a full stripeset across all disks.
5712                    For other RAID types and for RAID[56] reads, just allow a single
5713                    stripe (on a single disk). */
5714                 if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
5715                     (op == BTRFS_MAP_WRITE)) {
5716                         max_len = stripe_len * nr_data_stripes(map) -
5717                                 (offset - raid56_full_stripe_start);
5718                 } else {
5719                         /* we limit the length of each bio to what fits in a stripe */
5720                         max_len = stripe_len - stripe_offset;
5721                 }
5722                 *length = min_t(u64, em->len - offset, max_len);
5723         } else {
5724                 *length = em->len - offset;
5725         }
5726
5727         /* This is for when we're called from btrfs_merge_bio_hook() and all
5728            it cares about is the length */
5729         if (!bbio_ret)
5730                 goto out;
5731
5732         btrfs_dev_replace_read_lock(dev_replace);
5733         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
5734         if (!dev_replace_is_ongoing)
5735                 btrfs_dev_replace_read_unlock(dev_replace);
5736         else
5737                 btrfs_dev_replace_set_lock_blocking(dev_replace);
5738
5739         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
5740             !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
5741                 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
5742                                                     dev_replace->srcdev->devid,
5743                                                     &mirror_num,
5744                                             &physical_to_patch_in_first_stripe);
5745                 if (ret)
5746                         goto out;
5747                 else
5748                         patch_the_first_stripe_for_dev_replace = 1;
5749         } else if (mirror_num > map->num_stripes) {
5750                 mirror_num = 0;
5751         }
5752
5753         num_stripes = 1;
5754         stripe_index = 0;
5755         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5756                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5757                                 &stripe_index);
5758                 if (!need_full_stripe(op))
5759                         mirror_num = 1;
5760         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
5761                 if (need_full_stripe(op))
5762                         num_stripes = map->num_stripes;
5763                 else if (mirror_num)
5764                         stripe_index = mirror_num - 1;
5765                 else {
5766                         stripe_index = find_live_mirror(fs_info, map, 0,
5767                                             dev_replace_is_ongoing);
5768                         mirror_num = stripe_index + 1;
5769                 }
5770
5771         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
5772                 if (need_full_stripe(op)) {
5773                         num_stripes = map->num_stripes;
5774                 } else if (mirror_num) {
5775                         stripe_index = mirror_num - 1;
5776                 } else {
5777                         mirror_num = 1;
5778                 }
5779
5780         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5781                 u32 factor = map->num_stripes / map->sub_stripes;
5782
5783                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5784                 stripe_index *= map->sub_stripes;
5785
5786                 if (need_full_stripe(op))
5787                         num_stripes = map->sub_stripes;
5788                 else if (mirror_num)
5789                         stripe_index += mirror_num - 1;
5790                 else {
5791                         int old_stripe_index = stripe_index;
5792                         stripe_index = find_live_mirror(fs_info, map,
5793                                               stripe_index,
5794                                               dev_replace_is_ongoing);
5795                         mirror_num = stripe_index - old_stripe_index + 1;
5796                 }
5797
5798         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5799                 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
5800                         /* push stripe_nr back to the start of the full stripe */
5801                         stripe_nr = div64_u64(raid56_full_stripe_start,
5802                                         stripe_len * nr_data_stripes(map));
5803
5804                         /* RAID[56] write or recovery. Return all stripes */
5805                         num_stripes = map->num_stripes;
5806                         max_errors = nr_parity_stripes(map);
5807
5808                         *length = map->stripe_len;
5809                         stripe_index = 0;
5810                         stripe_offset = 0;
5811                 } else {
5812                         /*
5813                          * Mirror #0 or #1 means the original data block.
5814                          * Mirror #2 is RAID5 parity block.
5815                          * Mirror #3 is RAID6 Q block.
5816                          */
5817                         stripe_nr = div_u64_rem(stripe_nr,
5818                                         nr_data_stripes(map), &stripe_index);
5819                         if (mirror_num > 1)
5820                                 stripe_index = nr_data_stripes(map) +
5821                                                 mirror_num - 2;
5822
5823                         /* We distribute the parity blocks across stripes */
5824                         div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
5825                                         &stripe_index);
5826                         if (!need_full_stripe(op) && mirror_num <= 1)
5827                                 mirror_num = 1;
5828                 }
5829         } else {
5830                 /*
5831                  * after this, stripe_nr is the number of stripes on this
5832                  * device we have to walk to find the data, and stripe_index is
5833                  * the number of our device in the stripe array
5834                  */
5835                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5836                                 &stripe_index);
5837                 mirror_num = stripe_index + 1;
5838         }
5839         if (stripe_index >= map->num_stripes) {
5840                 btrfs_crit(fs_info,
5841                            "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
5842                            stripe_index, map->num_stripes);
5843                 ret = -EINVAL;
5844                 goto out;
5845         }
5846
5847         num_alloc_stripes = num_stripes;
5848         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
5849                 if (op == BTRFS_MAP_WRITE)
5850                         num_alloc_stripes <<= 1;
5851                 if (op == BTRFS_MAP_GET_READ_MIRRORS)
5852                         num_alloc_stripes++;
5853                 tgtdev_indexes = num_stripes;
5854         }
5855
5856         bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
5857         if (!bbio) {
5858                 ret = -ENOMEM;
5859                 goto out;
5860         }
5861         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
5862                 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5863
5864         /* build raid_map */
5865         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
5866             (need_full_stripe(op) || mirror_num > 1)) {
5867                 u64 tmp;
5868                 unsigned rot;
5869
5870                 bbio->raid_map = (u64 *)((void *)bbio->stripes +
5871                                  sizeof(struct btrfs_bio_stripe) *
5872                                  num_alloc_stripes +
5873                                  sizeof(int) * tgtdev_indexes);
5874
5875                 /* Work out the disk rotation on this stripe-set */
5876                 div_u64_rem(stripe_nr, num_stripes, &rot);
5877
5878                 /* Fill in the logical address of each stripe */
5879                 tmp = stripe_nr * nr_data_stripes(map);
5880                 for (i = 0; i < nr_data_stripes(map); i++)
5881                         bbio->raid_map[(i+rot) % num_stripes] =
5882                                 em->start + (tmp + i) * map->stripe_len;
5883
5884                 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
5885                 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5886                         bbio->raid_map[(i+rot+1) % num_stripes] =
5887                                 RAID6_Q_STRIPE;
5888         }
5889
5890
5891         for (i = 0; i < num_stripes; i++) {
5892                 bbio->stripes[i].physical =
5893                         map->stripes[stripe_index].physical +
5894                         stripe_offset +
5895                         stripe_nr * map->stripe_len;
5896                 bbio->stripes[i].dev =
5897                         map->stripes[stripe_index].dev;
5898                 stripe_index++;
5899         }
5900
5901         if (need_full_stripe(op))
5902                 max_errors = btrfs_chunk_max_errors(map);
5903
5904         if (bbio->raid_map)
5905                 sort_parity_stripes(bbio, num_stripes);
5906
5907         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
5908             need_full_stripe(op)) {
5909                 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
5910                                           &max_errors);
5911         }
5912
5913         *bbio_ret = bbio;
5914         bbio->map_type = map->type;
5915         bbio->num_stripes = num_stripes;
5916         bbio->max_errors = max_errors;
5917         bbio->mirror_num = mirror_num;
5918
5919         /*
5920          * this is the case that REQ_READ && dev_replace_is_ongoing &&
5921          * mirror_num == num_stripes + 1 && dev_replace target drive is
5922          * available as a mirror
5923          */
5924         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5925                 WARN_ON(num_stripes > 1);
5926                 bbio->stripes[0].dev = dev_replace->tgtdev;
5927                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5928                 bbio->mirror_num = map->num_stripes + 1;
5929         }
5930 out:
5931         if (dev_replace_is_ongoing) {
5932                 btrfs_dev_replace_clear_lock_blocking(dev_replace);
5933                 btrfs_dev_replace_read_unlock(dev_replace);
5934         }
5935         free_extent_map(em);
5936         return ret;
5937 }
5938
5939 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5940                       u64 logical, u64 *length,
5941                       struct btrfs_bio **bbio_ret, int mirror_num)
5942 {
5943         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
5944                                  mirror_num, 0);
5945 }
5946
5947 /* For Scrub/replace */
5948 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5949                      u64 logical, u64 *length,
5950                      struct btrfs_bio **bbio_ret)
5951 {
5952         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
5953 }
5954
5955 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
5956                      u64 physical, u64 **logical, int *naddrs, int *stripe_len)
5957 {
5958         struct extent_map *em;
5959         struct map_lookup *map;
5960         u64 *buf;
5961         u64 bytenr;
5962         u64 length;
5963         u64 stripe_nr;
5964         u64 rmap_len;
5965         int i, j, nr = 0;
5966
5967         em = get_chunk_map(fs_info, chunk_start, 1);
5968         if (IS_ERR(em))
5969                 return -EIO;
5970
5971         map = em->map_lookup;
5972         length = em->len;
5973         rmap_len = map->stripe_len;
5974
5975         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5976                 length = div_u64(length, map->num_stripes / map->sub_stripes);
5977         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5978                 length = div_u64(length, map->num_stripes);
5979         else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5980                 length = div_u64(length, nr_data_stripes(map));
5981                 rmap_len = map->stripe_len * nr_data_stripes(map);
5982         }
5983
5984         buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
5985         BUG_ON(!buf); /* -ENOMEM */
5986
5987         for (i = 0; i < map->num_stripes; i++) {
5988                 if (map->stripes[i].physical > physical ||
5989                     map->stripes[i].physical + length <= physical)
5990                         continue;
5991
5992                 stripe_nr = physical - map->stripes[i].physical;
5993                 stripe_nr = div64_u64(stripe_nr, map->stripe_len);
5994
5995                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5996                         stripe_nr = stripe_nr * map->num_stripes + i;
5997                         stripe_nr = div_u64(stripe_nr, map->sub_stripes);
5998                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5999                         stripe_nr = stripe_nr * map->num_stripes + i;
6000                 } /* else if RAID[56], multiply by nr_data_stripes().
6001                    * Alternatively, just use rmap_len below instead of
6002                    * map->stripe_len */
6003
6004                 bytenr = chunk_start + stripe_nr * rmap_len;
6005                 WARN_ON(nr >= map->num_stripes);
6006                 for (j = 0; j < nr; j++) {
6007                         if (buf[j] == bytenr)
6008                                 break;
6009                 }
6010                 if (j == nr) {
6011                         WARN_ON(nr >= map->num_stripes);
6012                         buf[nr++] = bytenr;
6013                 }
6014         }
6015
6016         *logical = buf;
6017         *naddrs = nr;
6018         *stripe_len = rmap_len;
6019
6020         free_extent_map(em);
6021         return 0;
6022 }
6023
6024 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6025 {
6026         bio->bi_private = bbio->private;
6027         bio->bi_end_io = bbio->end_io;
6028         bio_endio(bio);
6029
6030         btrfs_put_bbio(bbio);
6031 }
6032
6033 static void btrfs_end_bio(struct bio *bio)
6034 {
6035         struct btrfs_bio *bbio = bio->bi_private;
6036         int is_orig_bio = 0;
6037
6038         if (bio->bi_status) {
6039                 atomic_inc(&bbio->error);
6040                 if (bio->bi_status == BLK_STS_IOERR ||
6041                     bio->bi_status == BLK_STS_TARGET) {
6042                         unsigned int stripe_index =
6043                                 btrfs_io_bio(bio)->stripe_index;
6044                         struct btrfs_device *dev;
6045
6046                         BUG_ON(stripe_index >= bbio->num_stripes);
6047                         dev = bbio->stripes[stripe_index].dev;
6048                         if (dev->bdev) {
6049                                 if (bio_op(bio) == REQ_OP_WRITE)
6050                                         btrfs_dev_stat_inc_and_print(dev,
6051                                                 BTRFS_DEV_STAT_WRITE_ERRS);
6052                                 else if (!(bio->bi_opf & REQ_RAHEAD))
6053                                         btrfs_dev_stat_inc_and_print(dev,
6054                                                 BTRFS_DEV_STAT_READ_ERRS);
6055                                 if (bio->bi_opf & REQ_PREFLUSH)
6056                                         btrfs_dev_stat_inc_and_print(dev,
6057                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
6058                         }
6059                 }
6060         }
6061
6062         if (bio == bbio->orig_bio)
6063                 is_orig_bio = 1;
6064
6065         btrfs_bio_counter_dec(bbio->fs_info);
6066
6067         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6068                 if (!is_orig_bio) {
6069                         bio_put(bio);
6070                         bio = bbio->orig_bio;
6071                 }
6072
6073                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6074                 /* only send an error to the higher layers if it is
6075                  * beyond the tolerance of the btrfs bio
6076                  */
6077                 if (atomic_read(&bbio->error) > bbio->max_errors) {
6078                         bio->bi_status = BLK_STS_IOERR;
6079                 } else {
6080                         /*
6081                          * this bio is actually up to date, we didn't
6082                          * go over the max number of errors
6083                          */
6084                         bio->bi_status = BLK_STS_OK;
6085                 }
6086
6087                 btrfs_end_bbio(bbio, bio);
6088         } else if (!is_orig_bio) {
6089                 bio_put(bio);
6090         }
6091 }
6092
6093 /*
6094  * see run_scheduled_bios for a description of why bios are collected for
6095  * async submit.
6096  *
6097  * This will add one bio to the pending list for a device and make sure
6098  * the work struct is scheduled.
6099  */
6100 static noinline void btrfs_schedule_bio(struct btrfs_device *device,
6101                                         struct bio *bio)
6102 {
6103         struct btrfs_fs_info *fs_info = device->fs_info;
6104         int should_queue = 1;
6105         struct btrfs_pending_bios *pending_bios;
6106
6107         /* don't bother with additional async steps for reads, right now */
6108         if (bio_op(bio) == REQ_OP_READ) {
6109                 btrfsic_submit_bio(bio);
6110                 return;
6111         }
6112
6113         WARN_ON(bio->bi_next);
6114         bio->bi_next = NULL;
6115
6116         spin_lock(&device->io_lock);
6117         if (op_is_sync(bio->bi_opf))
6118                 pending_bios = &device->pending_sync_bios;
6119         else
6120                 pending_bios = &device->pending_bios;
6121
6122         if (pending_bios->tail)
6123                 pending_bios->tail->bi_next = bio;
6124
6125         pending_bios->tail = bio;
6126         if (!pending_bios->head)
6127                 pending_bios->head = bio;
6128         if (device->running_pending)
6129                 should_queue = 0;
6130
6131         spin_unlock(&device->io_lock);
6132
6133         if (should_queue)
6134                 btrfs_queue_work(fs_info->submit_workers, &device->work);
6135 }
6136
6137 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6138                               u64 physical, int dev_nr, int async)
6139 {
6140         struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
6141         struct btrfs_fs_info *fs_info = bbio->fs_info;
6142
6143         bio->bi_private = bbio;
6144         btrfs_io_bio(bio)->stripe_index = dev_nr;
6145         bio->bi_end_io = btrfs_end_bio;
6146         bio->bi_iter.bi_sector = physical >> 9;
6147         btrfs_debug_in_rcu(fs_info,
6148         "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6149                 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6150                 (u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
6151                 bio->bi_iter.bi_size);
6152         bio_set_dev(bio, dev->bdev);
6153
6154         btrfs_bio_counter_inc_noblocked(fs_info);
6155
6156         if (async)
6157                 btrfs_schedule_bio(dev, bio);
6158         else
6159                 btrfsic_submit_bio(bio);
6160 }
6161
6162 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6163 {
6164         atomic_inc(&bbio->error);
6165         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6166                 /* Should be the original bio. */
6167                 WARN_ON(bio != bbio->orig_bio);
6168
6169                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6170                 bio->bi_iter.bi_sector = logical >> 9;
6171                 if (atomic_read(&bbio->error) > bbio->max_errors)
6172                         bio->bi_status = BLK_STS_IOERR;
6173                 else
6174                         bio->bi_status = BLK_STS_OK;
6175                 btrfs_end_bbio(bbio, bio);
6176         }
6177 }
6178
6179 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6180                            int mirror_num, int async_submit)
6181 {
6182         struct btrfs_device *dev;
6183         struct bio *first_bio = bio;
6184         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6185         u64 length = 0;
6186         u64 map_length;
6187         int ret;
6188         int dev_nr;
6189         int total_devs;
6190         struct btrfs_bio *bbio = NULL;
6191
6192         length = bio->bi_iter.bi_size;
6193         map_length = length;
6194
6195         btrfs_bio_counter_inc_blocked(fs_info);
6196         ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6197                                 &map_length, &bbio, mirror_num, 1);
6198         if (ret) {
6199                 btrfs_bio_counter_dec(fs_info);
6200                 return errno_to_blk_status(ret);
6201         }
6202
6203         total_devs = bbio->num_stripes;
6204         bbio->orig_bio = first_bio;
6205         bbio->private = first_bio->bi_private;
6206         bbio->end_io = first_bio->bi_end_io;
6207         bbio->fs_info = fs_info;
6208         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6209
6210         if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6211             ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6212                 /* In this case, map_length has been set to the length of
6213                    a single stripe; not the whole write */
6214                 if (bio_op(bio) == REQ_OP_WRITE) {
6215                         ret = raid56_parity_write(fs_info, bio, bbio,
6216                                                   map_length);
6217                 } else {
6218                         ret = raid56_parity_recover(fs_info, bio, bbio,
6219                                                     map_length, mirror_num, 1);
6220                 }
6221
6222                 btrfs_bio_counter_dec(fs_info);
6223                 return errno_to_blk_status(ret);
6224         }
6225
6226         if (map_length < length) {
6227                 btrfs_crit(fs_info,
6228                            "mapping failed logical %llu bio len %llu len %llu",
6229                            logical, length, map_length);
6230                 BUG();
6231         }
6232
6233         for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6234                 dev = bbio->stripes[dev_nr].dev;
6235                 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6236                                                    &dev->dev_state) ||
6237                     (bio_op(first_bio) == REQ_OP_WRITE &&
6238                     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6239                         bbio_error(bbio, first_bio, logical);
6240                         continue;
6241                 }
6242
6243                 if (dev_nr < total_devs - 1)
6244                         bio = btrfs_bio_clone(first_bio);
6245                 else
6246                         bio = first_bio;
6247
6248                 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
6249                                   dev_nr, async_submit);
6250         }
6251         btrfs_bio_counter_dec(fs_info);
6252         return BLK_STS_OK;
6253 }
6254
6255 /*
6256  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6257  * return NULL.
6258  *
6259  * If devid and uuid are both specified, the match must be exact, otherwise
6260  * only devid is used.
6261  *
6262  * If @seed is true, traverse through the seed devices.
6263  */
6264 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6265                                         u64 devid, u8 *uuid, u8 *fsid,
6266                                         bool seed)
6267 {
6268         struct btrfs_device *device;
6269
6270         while (fs_devices) {
6271                 if (!fsid ||
6272                     !memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) {
6273                         list_for_each_entry(device, &fs_devices->devices,
6274                                             dev_list) {
6275                                 if (device->devid == devid &&
6276                                     (!uuid || memcmp(device->uuid, uuid,
6277                                                      BTRFS_UUID_SIZE) == 0))
6278                                         return device;
6279                         }
6280                 }
6281                 if (seed)
6282                         fs_devices = fs_devices->seed;
6283                 else
6284                         return NULL;
6285         }
6286         return NULL;
6287 }
6288
6289 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6290                                             u64 devid, u8 *dev_uuid)
6291 {
6292         struct btrfs_device *device;
6293         unsigned int nofs_flag;
6294
6295         /*
6296          * We call this under the chunk_mutex, so we want to use NOFS for this
6297          * allocation, however we don't want to change btrfs_alloc_device() to
6298          * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6299          * places.
6300          */
6301         nofs_flag = memalloc_nofs_save();
6302         device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6303         memalloc_nofs_restore(nofs_flag);
6304         if (IS_ERR(device))
6305                 return device;
6306
6307         list_add(&device->dev_list, &fs_devices->devices);
6308         device->fs_devices = fs_devices;
6309         fs_devices->num_devices++;
6310
6311         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6312         fs_devices->missing_devices++;
6313
6314         return device;
6315 }
6316
6317 /**
6318  * btrfs_alloc_device - allocate struct btrfs_device
6319  * @fs_info:    used only for generating a new devid, can be NULL if
6320  *              devid is provided (i.e. @devid != NULL).
6321  * @devid:      a pointer to devid for this device.  If NULL a new devid
6322  *              is generated.
6323  * @uuid:       a pointer to UUID for this device.  If NULL a new UUID
6324  *              is generated.
6325  *
6326  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6327  * on error.  Returned struct is not linked onto any lists and must be
6328  * destroyed with btrfs_free_device.
6329  */
6330 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6331                                         const u64 *devid,
6332                                         const u8 *uuid)
6333 {
6334         struct btrfs_device *dev;
6335         u64 tmp;
6336
6337         if (WARN_ON(!devid && !fs_info))
6338                 return ERR_PTR(-EINVAL);
6339
6340         dev = __alloc_device();
6341         if (IS_ERR(dev))
6342                 return dev;
6343
6344         if (devid)
6345                 tmp = *devid;
6346         else {
6347                 int ret;
6348
6349                 ret = find_next_devid(fs_info, &tmp);
6350                 if (ret) {
6351                         btrfs_free_device(dev);
6352                         return ERR_PTR(ret);
6353                 }
6354         }
6355         dev->devid = tmp;
6356
6357         if (uuid)
6358                 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6359         else
6360                 generate_random_uuid(dev->uuid);
6361
6362         btrfs_init_work(&dev->work, btrfs_submit_helper,
6363                         pending_bios_fn, NULL, NULL);
6364
6365         return dev;
6366 }
6367
6368 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6369                                         u64 devid, u8 *uuid, bool error)
6370 {
6371         if (error)
6372                 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6373                               devid, uuid);
6374         else
6375                 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6376                               devid, uuid);
6377 }
6378
6379 static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
6380                           struct extent_buffer *leaf,
6381                           struct btrfs_chunk *chunk)
6382 {
6383         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6384         struct map_lookup *map;
6385         struct extent_map *em;
6386         u64 logical;
6387         u64 length;
6388         u64 devid;
6389         u8 uuid[BTRFS_UUID_SIZE];
6390         int num_stripes;
6391         int ret;
6392         int i;
6393
6394         logical = key->offset;
6395         length = btrfs_chunk_length(leaf, chunk);
6396         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6397
6398         /*
6399          * Only need to verify chunk item if we're reading from sys chunk array,
6400          * as chunk item in tree block is already verified by tree-checker.
6401          */
6402         if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6403                 ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
6404                 if (ret)
6405                         return ret;
6406         }
6407
6408         read_lock(&map_tree->map_tree.lock);
6409         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
6410         read_unlock(&map_tree->map_tree.lock);
6411
6412         /* already mapped? */
6413         if (em && em->start <= logical && em->start + em->len > logical) {
6414                 free_extent_map(em);
6415                 return 0;
6416         } else if (em) {
6417                 free_extent_map(em);
6418         }
6419
6420         em = alloc_extent_map();
6421         if (!em)
6422                 return -ENOMEM;
6423         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6424         if (!map) {
6425                 free_extent_map(em);
6426                 return -ENOMEM;
6427         }
6428
6429         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6430         em->map_lookup = map;
6431         em->start = logical;
6432         em->len = length;
6433         em->orig_start = 0;
6434         em->block_start = 0;
6435         em->block_len = em->len;
6436
6437         map->num_stripes = num_stripes;
6438         map->io_width = btrfs_chunk_io_width(leaf, chunk);
6439         map->io_align = btrfs_chunk_io_align(leaf, chunk);
6440         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6441         map->type = btrfs_chunk_type(leaf, chunk);
6442         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6443         map->verified_stripes = 0;
6444         for (i = 0; i < num_stripes; i++) {
6445                 map->stripes[i].physical =
6446                         btrfs_stripe_offset_nr(leaf, chunk, i);
6447                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6448                 read_extent_buffer(leaf, uuid, (unsigned long)
6449                                    btrfs_stripe_dev_uuid_nr(chunk, i),
6450                                    BTRFS_UUID_SIZE);
6451                 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6452                                                 devid, uuid, NULL, true);
6453                 if (!map->stripes[i].dev &&
6454                     !btrfs_test_opt(fs_info, DEGRADED)) {
6455                         free_extent_map(em);
6456                         btrfs_report_missing_device(fs_info, devid, uuid, true);
6457                         return -ENOENT;
6458                 }
6459                 if (!map->stripes[i].dev) {
6460                         map->stripes[i].dev =
6461                                 add_missing_dev(fs_info->fs_devices, devid,
6462                                                 uuid);
6463                         if (IS_ERR(map->stripes[i].dev)) {
6464                                 free_extent_map(em);
6465                                 btrfs_err(fs_info,
6466                                         "failed to init missing dev %llu: %ld",
6467                                         devid, PTR_ERR(map->stripes[i].dev));
6468                                 return PTR_ERR(map->stripes[i].dev);
6469                         }
6470                         btrfs_report_missing_device(fs_info, devid, uuid, false);
6471                 }
6472                 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6473                                 &(map->stripes[i].dev->dev_state));
6474
6475         }
6476
6477         write_lock(&map_tree->map_tree.lock);
6478         ret = add_extent_mapping(&map_tree->map_tree, em, 0);
6479         write_unlock(&map_tree->map_tree.lock);
6480         if (ret < 0) {
6481                 btrfs_err(fs_info,
6482                           "failed to add chunk map, start=%llu len=%llu: %d",
6483                           em->start, em->len, ret);
6484         }
6485         free_extent_map(em);
6486
6487         return ret;
6488 }
6489
6490 static void fill_device_from_item(struct extent_buffer *leaf,
6491                                  struct btrfs_dev_item *dev_item,
6492                                  struct btrfs_device *device)
6493 {
6494         unsigned long ptr;
6495
6496         device->devid = btrfs_device_id(leaf, dev_item);
6497         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6498         device->total_bytes = device->disk_total_bytes;
6499         device->commit_total_bytes = device->disk_total_bytes;
6500         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6501         device->commit_bytes_used = device->bytes_used;
6502         device->type = btrfs_device_type(leaf, dev_item);
6503         device->io_align = btrfs_device_io_align(leaf, dev_item);
6504         device->io_width = btrfs_device_io_width(leaf, dev_item);
6505         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6506         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6507         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6508
6509         ptr = btrfs_device_uuid(dev_item);
6510         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6511 }
6512
6513 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6514                                                   u8 *fsid)
6515 {
6516         struct btrfs_fs_devices *fs_devices;
6517         int ret;
6518
6519         lockdep_assert_held(&uuid_mutex);
6520         ASSERT(fsid);
6521
6522         fs_devices = fs_info->fs_devices->seed;
6523         while (fs_devices) {
6524                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6525                         return fs_devices;
6526
6527                 fs_devices = fs_devices->seed;
6528         }
6529
6530         fs_devices = find_fsid(fsid);
6531         if (!fs_devices) {
6532                 if (!btrfs_test_opt(fs_info, DEGRADED))
6533                         return ERR_PTR(-ENOENT);
6534
6535                 fs_devices = alloc_fs_devices(fsid);
6536                 if (IS_ERR(fs_devices))
6537                         return fs_devices;
6538
6539                 fs_devices->seeding = 1;
6540                 fs_devices->opened = 1;
6541                 return fs_devices;
6542         }
6543
6544         fs_devices = clone_fs_devices(fs_devices);
6545         if (IS_ERR(fs_devices))
6546                 return fs_devices;
6547
6548         ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6549         if (ret) {
6550                 free_fs_devices(fs_devices);
6551                 fs_devices = ERR_PTR(ret);
6552                 goto out;
6553         }
6554
6555         if (!fs_devices->seeding) {
6556                 close_fs_devices(fs_devices);
6557                 free_fs_devices(fs_devices);
6558                 fs_devices = ERR_PTR(-EINVAL);
6559                 goto out;
6560         }
6561
6562         fs_devices->seed = fs_info->fs_devices->seed;
6563         fs_info->fs_devices->seed = fs_devices;
6564 out:
6565         return fs_devices;
6566 }
6567
6568 static int read_one_dev(struct btrfs_fs_info *fs_info,
6569                         struct extent_buffer *leaf,
6570                         struct btrfs_dev_item *dev_item)
6571 {
6572         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6573         struct btrfs_device *device;
6574         u64 devid;
6575         int ret;
6576         u8 fs_uuid[BTRFS_FSID_SIZE];
6577         u8 dev_uuid[BTRFS_UUID_SIZE];
6578
6579         devid = btrfs_device_id(leaf, dev_item);
6580         read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6581                            BTRFS_UUID_SIZE);
6582         read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6583                            BTRFS_FSID_SIZE);
6584
6585         if (memcmp(fs_uuid, fs_info->fsid, BTRFS_FSID_SIZE)) {
6586                 fs_devices = open_seed_devices(fs_info, fs_uuid);
6587                 if (IS_ERR(fs_devices))
6588                         return PTR_ERR(fs_devices);
6589         }
6590
6591         device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6592                                    fs_uuid, true);
6593         if (!device) {
6594                 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6595                         btrfs_report_missing_device(fs_info, devid,
6596                                                         dev_uuid, true);
6597                         return -ENOENT;
6598                 }
6599
6600                 device = add_missing_dev(fs_devices, devid, dev_uuid);
6601                 if (IS_ERR(device)) {
6602                         btrfs_err(fs_info,
6603                                 "failed to add missing dev %llu: %ld",
6604                                 devid, PTR_ERR(device));
6605                         return PTR_ERR(device);
6606                 }
6607                 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6608         } else {
6609                 if (!device->bdev) {
6610                         if (!btrfs_test_opt(fs_info, DEGRADED)) {
6611                                 btrfs_report_missing_device(fs_info,
6612                                                 devid, dev_uuid, true);
6613                                 return -ENOENT;
6614                         }
6615                         btrfs_report_missing_device(fs_info, devid,
6616                                                         dev_uuid, false);
6617                 }
6618
6619                 if (!device->bdev &&
6620                     !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6621                         /*
6622                          * this happens when a device that was properly setup
6623                          * in the device info lists suddenly goes bad.
6624                          * device->bdev is NULL, and so we have to set
6625                          * device->missing to one here
6626                          */
6627                         device->fs_devices->missing_devices++;
6628                         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6629                 }
6630
6631                 /* Move the device to its own fs_devices */
6632                 if (device->fs_devices != fs_devices) {
6633                         ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6634                                                         &device->dev_state));
6635
6636                         list_move(&device->dev_list, &fs_devices->devices);
6637                         device->fs_devices->num_devices--;
6638                         fs_devices->num_devices++;
6639
6640                         device->fs_devices->missing_devices--;
6641                         fs_devices->missing_devices++;
6642
6643                         device->fs_devices = fs_devices;
6644                 }
6645         }
6646
6647         if (device->fs_devices != fs_info->fs_devices) {
6648                 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6649                 if (device->generation !=
6650                     btrfs_device_generation(leaf, dev_item))
6651                         return -EINVAL;
6652         }
6653
6654         fill_device_from_item(leaf, dev_item, device);
6655         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6656         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6657            !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6658                 device->fs_devices->total_rw_bytes += device->total_bytes;
6659                 atomic64_add(device->total_bytes - device->bytes_used,
6660                                 &fs_info->free_chunk_space);
6661         }
6662         ret = 0;
6663         return ret;
6664 }
6665
6666 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6667 {
6668         struct btrfs_root *root = fs_info->tree_root;
6669         struct btrfs_super_block *super_copy = fs_info->super_copy;
6670         struct extent_buffer *sb;
6671         struct btrfs_disk_key *disk_key;
6672         struct btrfs_chunk *chunk;
6673         u8 *array_ptr;
6674         unsigned long sb_array_offset;
6675         int ret = 0;
6676         u32 num_stripes;
6677         u32 array_size;
6678         u32 len = 0;
6679         u32 cur_offset;
6680         u64 type;
6681         struct btrfs_key key;
6682
6683         ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6684         /*
6685          * This will create extent buffer of nodesize, superblock size is
6686          * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6687          * overallocate but we can keep it as-is, only the first page is used.
6688          */
6689         sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6690         if (IS_ERR(sb))
6691                 return PTR_ERR(sb);
6692         set_extent_buffer_uptodate(sb);
6693         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6694         /*
6695          * The sb extent buffer is artificial and just used to read the system array.
6696          * set_extent_buffer_uptodate() call does not properly mark all it's
6697          * pages up-to-date when the page is larger: extent does not cover the
6698          * whole page and consequently check_page_uptodate does not find all
6699          * the page's extents up-to-date (the hole beyond sb),
6700          * write_extent_buffer then triggers a WARN_ON.
6701          *
6702          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6703          * but sb spans only this function. Add an explicit SetPageUptodate call
6704          * to silence the warning eg. on PowerPC 64.
6705          */
6706         if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6707                 SetPageUptodate(sb->pages[0]);
6708
6709         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6710         array_size = btrfs_super_sys_array_size(super_copy);
6711
6712         array_ptr = super_copy->sys_chunk_array;
6713         sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6714         cur_offset = 0;
6715
6716         while (cur_offset < array_size) {
6717                 disk_key = (struct btrfs_disk_key *)array_ptr;
6718                 len = sizeof(*disk_key);
6719                 if (cur_offset + len > array_size)
6720                         goto out_short_read;
6721
6722                 btrfs_disk_key_to_cpu(&key, disk_key);
6723
6724                 array_ptr += len;
6725                 sb_array_offset += len;
6726                 cur_offset += len;
6727
6728                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
6729                         chunk = (struct btrfs_chunk *)sb_array_offset;
6730                         /*
6731                          * At least one btrfs_chunk with one stripe must be
6732                          * present, exact stripe count check comes afterwards
6733                          */
6734                         len = btrfs_chunk_item_size(1);
6735                         if (cur_offset + len > array_size)
6736                                 goto out_short_read;
6737
6738                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6739                         if (!num_stripes) {
6740                                 btrfs_err(fs_info,
6741                                         "invalid number of stripes %u in sys_array at offset %u",
6742                                         num_stripes, cur_offset);
6743                                 ret = -EIO;
6744                                 break;
6745                         }
6746
6747                         type = btrfs_chunk_type(sb, chunk);
6748                         if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6749                                 btrfs_err(fs_info,
6750                             "invalid chunk type %llu in sys_array at offset %u",
6751                                         type, cur_offset);
6752                                 ret = -EIO;
6753                                 break;
6754                         }
6755
6756                         len = btrfs_chunk_item_size(num_stripes);
6757                         if (cur_offset + len > array_size)
6758                                 goto out_short_read;
6759
6760                         ret = read_one_chunk(fs_info, &key, sb, chunk);
6761                         if (ret)
6762                                 break;
6763                 } else {
6764                         btrfs_err(fs_info,
6765                             "unexpected item type %u in sys_array at offset %u",
6766                                   (u32)key.type, cur_offset);
6767                         ret = -EIO;
6768                         break;
6769                 }
6770                 array_ptr += len;
6771                 sb_array_offset += len;
6772                 cur_offset += len;
6773         }
6774         clear_extent_buffer_uptodate(sb);
6775         free_extent_buffer_stale(sb);
6776         return ret;
6777
6778 out_short_read:
6779         btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6780                         len, cur_offset);
6781         clear_extent_buffer_uptodate(sb);
6782         free_extent_buffer_stale(sb);
6783         return -EIO;
6784 }
6785
6786 /*
6787  * Check if all chunks in the fs are OK for read-write degraded mount
6788  *
6789  * If the @failing_dev is specified, it's accounted as missing.
6790  *
6791  * Return true if all chunks meet the minimal RW mount requirements.
6792  * Return false if any chunk doesn't meet the minimal RW mount requirements.
6793  */
6794 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
6795                                         struct btrfs_device *failing_dev)
6796 {
6797         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6798         struct extent_map *em;
6799         u64 next_start = 0;
6800         bool ret = true;
6801
6802         read_lock(&map_tree->map_tree.lock);
6803         em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1);
6804         read_unlock(&map_tree->map_tree.lock);
6805         /* No chunk at all? Return false anyway */
6806         if (!em) {
6807                 ret = false;
6808                 goto out;
6809         }
6810         while (em) {
6811                 struct map_lookup *map;
6812                 int missing = 0;
6813                 int max_tolerated;
6814                 int i;
6815
6816                 map = em->map_lookup;
6817                 max_tolerated =
6818                         btrfs_get_num_tolerated_disk_barrier_failures(
6819                                         map->type);
6820                 for (i = 0; i < map->num_stripes; i++) {
6821                         struct btrfs_device *dev = map->stripes[i].dev;
6822
6823                         if (!dev || !dev->bdev ||
6824                             test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
6825                             dev->last_flush_error)
6826                                 missing++;
6827                         else if (failing_dev && failing_dev == dev)
6828                                 missing++;
6829                 }
6830                 if (missing > max_tolerated) {
6831                         if (!failing_dev)
6832                                 btrfs_warn(fs_info,
6833         "chunk %llu missing %d devices, max tolerance is %d for writeable mount",
6834                                    em->start, missing, max_tolerated);
6835                         free_extent_map(em);
6836                         ret = false;
6837                         goto out;
6838                 }
6839                 next_start = extent_map_end(em);
6840                 free_extent_map(em);
6841
6842                 read_lock(&map_tree->map_tree.lock);
6843                 em = lookup_extent_mapping(&map_tree->map_tree, next_start,
6844                                            (u64)(-1) - next_start);
6845                 read_unlock(&map_tree->map_tree.lock);
6846         }
6847 out:
6848         return ret;
6849 }
6850
6851 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
6852 {
6853         struct btrfs_root *root = fs_info->chunk_root;
6854         struct btrfs_path *path;
6855         struct extent_buffer *leaf;
6856         struct btrfs_key key;
6857         struct btrfs_key found_key;
6858         int ret;
6859         int slot;
6860         u64 total_dev = 0;
6861
6862         path = btrfs_alloc_path();
6863         if (!path)
6864                 return -ENOMEM;
6865
6866         /*
6867          * uuid_mutex is needed only if we are mounting a sprout FS
6868          * otherwise we don't need it.
6869          */
6870         mutex_lock(&uuid_mutex);
6871         mutex_lock(&fs_info->chunk_mutex);
6872
6873         /*
6874          * It is possible for mount and umount to race in such a way that
6875          * we execute this code path, but open_fs_devices failed to clear
6876          * total_rw_bytes. We certainly want it cleared before reading the
6877          * device items, so clear it here.
6878          */
6879         fs_info->fs_devices->total_rw_bytes = 0;
6880
6881         /*
6882          * Read all device items, and then all the chunk items. All
6883          * device items are found before any chunk item (their object id
6884          * is smaller than the lowest possible object id for a chunk
6885          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
6886          */
6887         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
6888         key.offset = 0;
6889         key.type = 0;
6890         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6891         if (ret < 0)
6892                 goto error;
6893         while (1) {
6894                 leaf = path->nodes[0];
6895                 slot = path->slots[0];
6896                 if (slot >= btrfs_header_nritems(leaf)) {
6897                         ret = btrfs_next_leaf(root, path);
6898                         if (ret == 0)
6899                                 continue;
6900                         if (ret < 0)
6901                                 goto error;
6902                         break;
6903                 }
6904                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6905                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
6906                         struct btrfs_dev_item *dev_item;
6907                         dev_item = btrfs_item_ptr(leaf, slot,
6908                                                   struct btrfs_dev_item);
6909                         ret = read_one_dev(fs_info, leaf, dev_item);
6910                         if (ret)
6911                                 goto error;
6912                         total_dev++;
6913                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6914                         struct btrfs_chunk *chunk;
6915                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6916                         ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
6917                         if (ret)
6918                                 goto error;
6919                 }
6920                 path->slots[0]++;
6921         }
6922
6923         /*
6924          * After loading chunk tree, we've got all device information,
6925          * do another round of validation checks.
6926          */
6927         if (total_dev != fs_info->fs_devices->total_devices) {
6928                 btrfs_warn(fs_info,
6929 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
6930                           btrfs_super_num_devices(fs_info->super_copy),
6931                           total_dev);
6932                 fs_info->fs_devices->total_devices = total_dev;
6933                 btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
6934         }
6935         if (btrfs_super_total_bytes(fs_info->super_copy) <
6936             fs_info->fs_devices->total_rw_bytes) {
6937                 btrfs_err(fs_info,
6938         "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
6939                           btrfs_super_total_bytes(fs_info->super_copy),
6940                           fs_info->fs_devices->total_rw_bytes);
6941                 ret = -EINVAL;
6942                 goto error;
6943         }
6944         ret = 0;
6945 error:
6946         mutex_unlock(&fs_info->chunk_mutex);
6947         mutex_unlock(&uuid_mutex);
6948
6949         btrfs_free_path(path);
6950         return ret;
6951 }
6952
6953 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6954 {
6955         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6956         struct btrfs_device *device;
6957
6958         while (fs_devices) {
6959                 mutex_lock(&fs_devices->device_list_mutex);
6960                 list_for_each_entry(device, &fs_devices->devices, dev_list)
6961                         device->fs_info = fs_info;
6962                 mutex_unlock(&fs_devices->device_list_mutex);
6963
6964                 fs_devices = fs_devices->seed;
6965         }
6966 }
6967
6968 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6969 {
6970         int i;
6971
6972         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6973                 btrfs_dev_stat_reset(dev, i);
6974 }
6975
6976 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6977 {
6978         struct btrfs_key key;
6979         struct btrfs_key found_key;
6980         struct btrfs_root *dev_root = fs_info->dev_root;
6981         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6982         struct extent_buffer *eb;
6983         int slot;
6984         int ret = 0;
6985         struct btrfs_device *device;
6986         struct btrfs_path *path = NULL;
6987         int i;
6988
6989         path = btrfs_alloc_path();
6990         if (!path) {
6991                 ret = -ENOMEM;
6992                 goto out;
6993         }
6994
6995         mutex_lock(&fs_devices->device_list_mutex);
6996         list_for_each_entry(device, &fs_devices->devices, dev_list) {
6997                 int item_size;
6998                 struct btrfs_dev_stats_item *ptr;
6999
7000                 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7001                 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7002                 key.offset = device->devid;
7003                 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
7004                 if (ret) {
7005                         __btrfs_reset_dev_stats(device);
7006                         device->dev_stats_valid = 1;
7007                         btrfs_release_path(path);
7008                         continue;
7009                 }
7010                 slot = path->slots[0];
7011                 eb = path->nodes[0];
7012                 btrfs_item_key_to_cpu(eb, &found_key, slot);
7013                 item_size = btrfs_item_size_nr(eb, slot);
7014
7015                 ptr = btrfs_item_ptr(eb, slot,
7016                                      struct btrfs_dev_stats_item);
7017
7018                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7019                         if (item_size >= (1 + i) * sizeof(__le64))
7020                                 btrfs_dev_stat_set(device, i,
7021                                         btrfs_dev_stats_value(eb, ptr, i));
7022                         else
7023                                 btrfs_dev_stat_reset(device, i);
7024                 }
7025
7026                 device->dev_stats_valid = 1;
7027                 btrfs_dev_stat_print_on_load(device);
7028                 btrfs_release_path(path);
7029         }
7030         mutex_unlock(&fs_devices->device_list_mutex);
7031
7032 out:
7033         btrfs_free_path(path);
7034         return ret < 0 ? ret : 0;
7035 }
7036
7037 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7038                                 struct btrfs_device *device)
7039 {
7040         struct btrfs_fs_info *fs_info = trans->fs_info;
7041         struct btrfs_root *dev_root = fs_info->dev_root;
7042         struct btrfs_path *path;
7043         struct btrfs_key key;
7044         struct extent_buffer *eb;
7045         struct btrfs_dev_stats_item *ptr;
7046         int ret;
7047         int i;
7048
7049         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7050         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7051         key.offset = device->devid;
7052
7053         path = btrfs_alloc_path();
7054         if (!path)
7055                 return -ENOMEM;
7056         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7057         if (ret < 0) {
7058                 btrfs_warn_in_rcu(fs_info,
7059                         "error %d while searching for dev_stats item for device %s",
7060                               ret, rcu_str_deref(device->name));
7061                 goto out;
7062         }
7063
7064         if (ret == 0 &&
7065             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7066                 /* need to delete old one and insert a new one */
7067                 ret = btrfs_del_item(trans, dev_root, path);
7068                 if (ret != 0) {
7069                         btrfs_warn_in_rcu(fs_info,
7070                                 "delete too small dev_stats item for device %s failed %d",
7071                                       rcu_str_deref(device->name), ret);
7072                         goto out;
7073                 }
7074                 ret = 1;
7075         }
7076
7077         if (ret == 1) {
7078                 /* need to insert a new item */
7079                 btrfs_release_path(path);
7080                 ret = btrfs_insert_empty_item(trans, dev_root, path,
7081                                               &key, sizeof(*ptr));
7082                 if (ret < 0) {
7083                         btrfs_warn_in_rcu(fs_info,
7084                                 "insert dev_stats item for device %s failed %d",
7085                                 rcu_str_deref(device->name), ret);
7086                         goto out;
7087                 }
7088         }
7089
7090         eb = path->nodes[0];
7091         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7092         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7093                 btrfs_set_dev_stats_value(eb, ptr, i,
7094                                           btrfs_dev_stat_read(device, i));
7095         btrfs_mark_buffer_dirty(eb);
7096
7097 out:
7098         btrfs_free_path(path);
7099         return ret;
7100 }
7101
7102 /*
7103  * called from commit_transaction. Writes all changed device stats to disk.
7104  */
7105 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
7106                         struct btrfs_fs_info *fs_info)
7107 {
7108         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7109         struct btrfs_device *device;
7110         int stats_cnt;
7111         int ret = 0;
7112
7113         mutex_lock(&fs_devices->device_list_mutex);
7114         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7115                 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7116                 if (!device->dev_stats_valid || stats_cnt == 0)
7117                         continue;
7118
7119
7120                 /*
7121                  * There is a LOAD-LOAD control dependency between the value of
7122                  * dev_stats_ccnt and updating the on-disk values which requires
7123                  * reading the in-memory counters. Such control dependencies
7124                  * require explicit read memory barriers.
7125                  *
7126                  * This memory barriers pairs with smp_mb__before_atomic in
7127                  * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7128                  * barrier implied by atomic_xchg in
7129                  * btrfs_dev_stats_read_and_reset
7130                  */
7131                 smp_rmb();
7132
7133                 ret = update_dev_stat_item(trans, device);
7134                 if (!ret)
7135                         atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7136         }
7137         mutex_unlock(&fs_devices->device_list_mutex);
7138
7139         return ret;
7140 }
7141
7142 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7143 {
7144         btrfs_dev_stat_inc(dev, index);
7145         btrfs_dev_stat_print_on_error(dev);
7146 }
7147
7148 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7149 {
7150         if (!dev->dev_stats_valid)
7151                 return;
7152         btrfs_err_rl_in_rcu(dev->fs_info,
7153                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7154                            rcu_str_deref(dev->name),
7155                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7156                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7157                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7158                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7159                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7160 }
7161
7162 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7163 {
7164         int i;
7165
7166         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7167                 if (btrfs_dev_stat_read(dev, i) != 0)
7168                         break;
7169         if (i == BTRFS_DEV_STAT_VALUES_MAX)
7170                 return; /* all values == 0, suppress message */
7171
7172         btrfs_info_in_rcu(dev->fs_info,
7173                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7174                rcu_str_deref(dev->name),
7175                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7176                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7177                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7178                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7179                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7180 }
7181
7182 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7183                         struct btrfs_ioctl_get_dev_stats *stats)
7184 {
7185         struct btrfs_device *dev;
7186         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7187         int i;
7188
7189         mutex_lock(&fs_devices->device_list_mutex);
7190         dev = btrfs_find_device(fs_info->fs_devices, stats->devid,
7191                                 NULL, NULL, true);
7192         mutex_unlock(&fs_devices->device_list_mutex);
7193
7194         if (!dev) {
7195                 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7196                 return -ENODEV;
7197         } else if (!dev->dev_stats_valid) {
7198                 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7199                 return -ENODEV;
7200         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7201                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7202                         if (stats->nr_items > i)
7203                                 stats->values[i] =
7204                                         btrfs_dev_stat_read_and_reset(dev, i);
7205                         else
7206                                 btrfs_dev_stat_reset(dev, i);
7207                 }
7208                 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7209                            current->comm, task_pid_nr(current));
7210         } else {
7211                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7212                         if (stats->nr_items > i)
7213                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
7214         }
7215         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7216                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7217         return 0;
7218 }
7219
7220 void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
7221 {
7222         struct buffer_head *bh;
7223         struct btrfs_super_block *disk_super;
7224         int copy_num;
7225
7226         if (!bdev)
7227                 return;
7228
7229         for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
7230                 copy_num++) {
7231
7232                 if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
7233                         continue;
7234
7235                 disk_super = (struct btrfs_super_block *)bh->b_data;
7236
7237                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
7238                 set_buffer_dirty(bh);
7239                 sync_dirty_buffer(bh);
7240                 brelse(bh);
7241         }
7242
7243         /* Notify udev that device has changed */
7244         btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
7245
7246         /* Update ctime/mtime for device path for libblkid */
7247         update_dev_time(device_path);
7248 }
7249
7250 /*
7251  * Update the size of all devices, which is used for writing out the
7252  * super blocks.
7253  */
7254 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
7255 {
7256         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7257         struct btrfs_device *curr, *next;
7258
7259         if (list_empty(&fs_devices->resized_devices))
7260                 return;
7261
7262         mutex_lock(&fs_devices->device_list_mutex);
7263         mutex_lock(&fs_info->chunk_mutex);
7264         list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
7265                                  resized_list) {
7266                 list_del_init(&curr->resized_list);
7267                 curr->commit_total_bytes = curr->disk_total_bytes;
7268         }
7269         mutex_unlock(&fs_info->chunk_mutex);
7270         mutex_unlock(&fs_devices->device_list_mutex);
7271 }
7272
7273 /* Must be invoked during the transaction commit */
7274 void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans)
7275 {
7276         struct btrfs_fs_info *fs_info = trans->fs_info;
7277         struct extent_map *em;
7278         struct map_lookup *map;
7279         struct btrfs_device *dev;
7280         int i;
7281
7282         if (list_empty(&trans->pending_chunks))
7283                 return;
7284
7285         /* In order to kick the device replace finish process */
7286         mutex_lock(&fs_info->chunk_mutex);
7287         list_for_each_entry(em, &trans->pending_chunks, list) {
7288                 map = em->map_lookup;
7289
7290                 for (i = 0; i < map->num_stripes; i++) {
7291                         dev = map->stripes[i].dev;
7292                         dev->commit_bytes_used = dev->bytes_used;
7293                         dev->has_pending_chunks = false;
7294                 }
7295         }
7296         mutex_unlock(&fs_info->chunk_mutex);
7297 }
7298
7299 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
7300 {
7301         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7302         while (fs_devices) {
7303                 fs_devices->fs_info = fs_info;
7304                 fs_devices = fs_devices->seed;
7305         }
7306 }
7307
7308 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
7309 {
7310         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7311         while (fs_devices) {
7312                 fs_devices->fs_info = NULL;
7313                 fs_devices = fs_devices->seed;
7314         }
7315 }
7316
7317 /*
7318  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7319  */
7320 int btrfs_bg_type_to_factor(u64 flags)
7321 {
7322         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
7323                      BTRFS_BLOCK_GROUP_RAID10))
7324                 return 2;
7325         return 1;
7326 }
7327
7328
7329 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
7330 {
7331         int index = btrfs_bg_flags_to_raid_index(type);
7332         int ncopies = btrfs_raid_array[index].ncopies;
7333         int data_stripes;
7334
7335         switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
7336         case BTRFS_BLOCK_GROUP_RAID5:
7337                 data_stripes = num_stripes - 1;
7338                 break;
7339         case BTRFS_BLOCK_GROUP_RAID6:
7340                 data_stripes = num_stripes - 2;
7341                 break;
7342         default:
7343                 data_stripes = num_stripes / ncopies;
7344                 break;
7345         }
7346         return div_u64(chunk_len, data_stripes);
7347 }
7348
7349 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7350                                  u64 chunk_offset, u64 devid,
7351                                  u64 physical_offset, u64 physical_len)
7352 {
7353         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
7354         struct extent_map *em;
7355         struct map_lookup *map;
7356         struct btrfs_device *dev;
7357         u64 stripe_len;
7358         bool found = false;
7359         int ret = 0;
7360         int i;
7361
7362         read_lock(&em_tree->lock);
7363         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7364         read_unlock(&em_tree->lock);
7365
7366         if (!em) {
7367                 btrfs_err(fs_info,
7368 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7369                           physical_offset, devid);
7370                 ret = -EUCLEAN;
7371                 goto out;
7372         }
7373
7374         map = em->map_lookup;
7375         stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7376         if (physical_len != stripe_len) {
7377                 btrfs_err(fs_info,
7378 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7379                           physical_offset, devid, em->start, physical_len,
7380                           stripe_len);
7381                 ret = -EUCLEAN;
7382                 goto out;
7383         }
7384
7385         for (i = 0; i < map->num_stripes; i++) {
7386                 if (map->stripes[i].dev->devid == devid &&
7387                     map->stripes[i].physical == physical_offset) {
7388                         found = true;
7389                         if (map->verified_stripes >= map->num_stripes) {
7390                                 btrfs_err(fs_info,
7391                                 "too many dev extents for chunk %llu found",
7392                                           em->start);
7393                                 ret = -EUCLEAN;
7394                                 goto out;
7395                         }
7396                         map->verified_stripes++;
7397                         break;
7398                 }
7399         }
7400         if (!found) {
7401                 btrfs_err(fs_info,
7402         "dev extent physical offset %llu devid %llu has no corresponding chunk",
7403                         physical_offset, devid);
7404                 ret = -EUCLEAN;
7405         }
7406
7407         /* Make sure no dev extent is beyond device bondary */
7408         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7409         if (!dev) {
7410                 btrfs_err(fs_info, "failed to find devid %llu", devid);
7411                 ret = -EUCLEAN;
7412                 goto out;
7413         }
7414
7415         /* It's possible this device is a dummy for seed device */
7416         if (dev->disk_total_bytes == 0) {
7417                 dev = btrfs_find_device(fs_info->fs_devices->seed, devid,
7418                                         NULL, NULL, false);
7419                 if (!dev) {
7420                         btrfs_err(fs_info, "failed to find seed devid %llu",
7421                                   devid);
7422                         ret = -EUCLEAN;
7423                         goto out;
7424                 }
7425         }
7426
7427         if (physical_offset + physical_len > dev->disk_total_bytes) {
7428                 btrfs_err(fs_info,
7429 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7430                           devid, physical_offset, physical_len,
7431                           dev->disk_total_bytes);
7432                 ret = -EUCLEAN;
7433                 goto out;
7434         }
7435 out:
7436         free_extent_map(em);
7437         return ret;
7438 }
7439
7440 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7441 {
7442         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
7443         struct extent_map *em;
7444         struct rb_node *node;
7445         int ret = 0;
7446
7447         read_lock(&em_tree->lock);
7448         for (node = rb_first(&em_tree->map); node; node = rb_next(node)) {
7449                 em = rb_entry(node, struct extent_map, rb_node);
7450                 if (em->map_lookup->num_stripes !=
7451                     em->map_lookup->verified_stripes) {
7452                         btrfs_err(fs_info,
7453                         "chunk %llu has missing dev extent, have %d expect %d",
7454                                   em->start, em->map_lookup->verified_stripes,
7455                                   em->map_lookup->num_stripes);
7456                         ret = -EUCLEAN;
7457                         goto out;
7458                 }
7459         }
7460 out:
7461         read_unlock(&em_tree->lock);
7462         return ret;
7463 }
7464
7465 /*
7466  * Ensure that all dev extents are mapped to correct chunk, otherwise
7467  * later chunk allocation/free would cause unexpected behavior.
7468  *
7469  * NOTE: This will iterate through the whole device tree, which should be of
7470  * the same size level as the chunk tree.  This slightly increases mount time.
7471  */
7472 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7473 {
7474         struct btrfs_path *path;
7475         struct btrfs_root *root = fs_info->dev_root;
7476         struct btrfs_key key;
7477         u64 prev_devid = 0;
7478         u64 prev_dev_ext_end = 0;
7479         int ret = 0;
7480
7481         key.objectid = 1;
7482         key.type = BTRFS_DEV_EXTENT_KEY;
7483         key.offset = 0;
7484
7485         path = btrfs_alloc_path();
7486         if (!path)
7487                 return -ENOMEM;
7488
7489         path->reada = READA_FORWARD;
7490         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7491         if (ret < 0)
7492                 goto out;
7493
7494         if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7495                 ret = btrfs_next_item(root, path);
7496                 if (ret < 0)
7497                         goto out;
7498                 /* No dev extents at all? Not good */
7499                 if (ret > 0) {
7500                         ret = -EUCLEAN;
7501                         goto out;
7502                 }
7503         }
7504         while (1) {
7505                 struct extent_buffer *leaf = path->nodes[0];
7506                 struct btrfs_dev_extent *dext;
7507                 int slot = path->slots[0];
7508                 u64 chunk_offset;
7509                 u64 physical_offset;
7510                 u64 physical_len;
7511                 u64 devid;
7512
7513                 btrfs_item_key_to_cpu(leaf, &key, slot);
7514                 if (key.type != BTRFS_DEV_EXTENT_KEY)
7515                         break;
7516                 devid = key.objectid;
7517                 physical_offset = key.offset;
7518
7519                 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7520                 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7521                 physical_len = btrfs_dev_extent_length(leaf, dext);
7522
7523                 /* Check if this dev extent overlaps with the previous one */
7524                 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7525                         btrfs_err(fs_info,
7526 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7527                                   devid, physical_offset, prev_dev_ext_end);
7528                         ret = -EUCLEAN;
7529                         goto out;
7530                 }
7531
7532                 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7533                                             physical_offset, physical_len);
7534                 if (ret < 0)
7535                         goto out;
7536                 prev_devid = devid;
7537                 prev_dev_ext_end = physical_offset + physical_len;
7538
7539                 ret = btrfs_next_item(root, path);
7540                 if (ret < 0)
7541                         goto out;
7542                 if (ret > 0) {
7543                         ret = 0;
7544                         break;
7545                 }
7546         }
7547
7548         /* Ensure all chunks have corresponding dev extents */
7549         ret = verify_chunk_dev_extent_mapping(fs_info);
7550 out:
7551         btrfs_free_path(path);
7552         return ret;
7553 }