GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / md / bcache / btree.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/bcache.txt.
22  */
23
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38
39 #include <trace/events/bcache.h>
40
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90
91 #define MAX_NEED_GC             64
92 #define MAX_SAVE_PRIO           72
93
94 #define PTR_DIRTY_BIT           (((uint64_t) 1 << 36))
95
96 #define PTR_HASH(c, k)                                                  \
97         (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
98
99 #define insert_lock(s, b)       ((b)->level <= (s)->lock)
100
101 /*
102  * These macros are for recursing down the btree - they handle the details of
103  * locking and looking up nodes in the cache for you. They're best treated as
104  * mere syntax when reading code that uses them.
105  *
106  * op->lock determines whether we take a read or a write lock at a given depth.
107  * If you've got a read lock and find that you need a write lock (i.e. you're
108  * going to have to split), set op->lock and return -EINTR; btree_root() will
109  * call you again and you'll have the correct lock.
110  */
111
112 /**
113  * btree - recurse down the btree on a specified key
114  * @fn:         function to call, which will be passed the child node
115  * @key:        key to recurse on
116  * @b:          parent btree node
117  * @op:         pointer to struct btree_op
118  */
119 #define btree(fn, key, b, op, ...)                                      \
120 ({                                                                      \
121         int _r, l = (b)->level - 1;                                     \
122         bool _w = l <= (op)->lock;                                      \
123         struct btree *_child = bch_btree_node_get((b)->c, op, key, l,   \
124                                                   _w, b);               \
125         if (!IS_ERR(_child)) {                                          \
126                 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);       \
127                 rw_unlock(_w, _child);                                  \
128         } else                                                          \
129                 _r = PTR_ERR(_child);                                   \
130         _r;                                                             \
131 })
132
133 /**
134  * btree_root - call a function on the root of the btree
135  * @fn:         function to call, which will be passed the child node
136  * @c:          cache set
137  * @op:         pointer to struct btree_op
138  */
139 #define btree_root(fn, c, op, ...)                                      \
140 ({                                                                      \
141         int _r = -EINTR;                                                \
142         do {                                                            \
143                 struct btree *_b = (c)->root;                           \
144                 bool _w = insert_lock(op, _b);                          \
145                 rw_lock(_w, _b, _b->level);                             \
146                 if (_b == (c)->root &&                                  \
147                     _w == insert_lock(op, _b)) {                        \
148                         _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);   \
149                 }                                                       \
150                 rw_unlock(_w, _b);                                      \
151                 bch_cannibalize_unlock(c);                              \
152                 if (_r == -EINTR)                                       \
153                         schedule();                                     \
154         } while (_r == -EINTR);                                         \
155                                                                         \
156         finish_wait(&(c)->btree_cache_wait, &(op)->wait);               \
157         _r;                                                             \
158 })
159
160 static inline struct bset *write_block(struct btree *b)
161 {
162         return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
163 }
164
165 static void bch_btree_init_next(struct btree *b)
166 {
167         /* If not a leaf node, always sort */
168         if (b->level && b->keys.nsets)
169                 bch_btree_sort(&b->keys, &b->c->sort);
170         else
171                 bch_btree_sort_lazy(&b->keys, &b->c->sort);
172
173         if (b->written < btree_blocks(b))
174                 bch_bset_init_next(&b->keys, write_block(b),
175                                    bset_magic(&b->c->sb));
176
177 }
178
179 /* Btree key manipulation */
180
181 void bkey_put(struct cache_set *c, struct bkey *k)
182 {
183         unsigned i;
184
185         for (i = 0; i < KEY_PTRS(k); i++)
186                 if (ptr_available(c, k, i))
187                         atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
188 }
189
190 /* Btree IO */
191
192 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
193 {
194         uint64_t crc = b->key.ptr[0];
195         void *data = (void *) i + 8, *end = bset_bkey_last(i);
196
197         crc = bch_crc64_update(crc, data, end - data);
198         return crc ^ 0xffffffffffffffffULL;
199 }
200
201 void bch_btree_node_read_done(struct btree *b)
202 {
203         const char *err = "bad btree header";
204         struct bset *i = btree_bset_first(b);
205         struct btree_iter *iter;
206
207         iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
208         iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
209         iter->used = 0;
210
211 #ifdef CONFIG_BCACHE_DEBUG
212         iter->b = &b->keys;
213 #endif
214
215         if (!i->seq)
216                 goto err;
217
218         for (;
219              b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
220              i = write_block(b)) {
221                 err = "unsupported bset version";
222                 if (i->version > BCACHE_BSET_VERSION)
223                         goto err;
224
225                 err = "bad btree header";
226                 if (b->written + set_blocks(i, block_bytes(b->c)) >
227                     btree_blocks(b))
228                         goto err;
229
230                 err = "bad magic";
231                 if (i->magic != bset_magic(&b->c->sb))
232                         goto err;
233
234                 err = "bad checksum";
235                 switch (i->version) {
236                 case 0:
237                         if (i->csum != csum_set(i))
238                                 goto err;
239                         break;
240                 case BCACHE_BSET_VERSION:
241                         if (i->csum != btree_csum_set(b, i))
242                                 goto err;
243                         break;
244                 }
245
246                 err = "empty set";
247                 if (i != b->keys.set[0].data && !i->keys)
248                         goto err;
249
250                 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
251
252                 b->written += set_blocks(i, block_bytes(b->c));
253         }
254
255         err = "corrupted btree";
256         for (i = write_block(b);
257              bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
258              i = ((void *) i) + block_bytes(b->c))
259                 if (i->seq == b->keys.set[0].data->seq)
260                         goto err;
261
262         bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
263
264         i = b->keys.set[0].data;
265         err = "short btree key";
266         if (b->keys.set[0].size &&
267             bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
268                 goto err;
269
270         if (b->written < btree_blocks(b))
271                 bch_bset_init_next(&b->keys, write_block(b),
272                                    bset_magic(&b->c->sb));
273 out:
274         mempool_free(iter, b->c->fill_iter);
275         return;
276 err:
277         set_btree_node_io_error(b);
278         bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
279                             err, PTR_BUCKET_NR(b->c, &b->key, 0),
280                             bset_block_offset(b, i), i->keys);
281         goto out;
282 }
283
284 static void btree_node_read_endio(struct bio *bio)
285 {
286         struct closure *cl = bio->bi_private;
287         closure_put(cl);
288 }
289
290 static void bch_btree_node_read(struct btree *b)
291 {
292         uint64_t start_time = local_clock();
293         struct closure cl;
294         struct bio *bio;
295
296         trace_bcache_btree_read(b);
297
298         closure_init_stack(&cl);
299
300         bio = bch_bbio_alloc(b->c);
301         bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
302         bio->bi_end_io  = btree_node_read_endio;
303         bio->bi_private = &cl;
304         bio->bi_opf = REQ_OP_READ | REQ_META;
305
306         bch_bio_map(bio, b->keys.set[0].data);
307
308         bch_submit_bbio(bio, b->c, &b->key, 0);
309         closure_sync(&cl);
310
311         if (bio->bi_status)
312                 set_btree_node_io_error(b);
313
314         bch_bbio_free(bio, b->c);
315
316         if (btree_node_io_error(b))
317                 goto err;
318
319         bch_btree_node_read_done(b);
320         bch_time_stats_update(&b->c->btree_read_time, start_time);
321
322         return;
323 err:
324         bch_cache_set_error(b->c, "io error reading bucket %zu",
325                             PTR_BUCKET_NR(b->c, &b->key, 0));
326 }
327
328 static void btree_complete_write(struct btree *b, struct btree_write *w)
329 {
330         if (w->prio_blocked &&
331             !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
332                 wake_up_allocators(b->c);
333
334         if (w->journal) {
335                 atomic_dec_bug(w->journal);
336                 __closure_wake_up(&b->c->journal.wait);
337         }
338
339         w->prio_blocked = 0;
340         w->journal      = NULL;
341 }
342
343 static void btree_node_write_unlock(struct closure *cl)
344 {
345         struct btree *b = container_of(cl, struct btree, io);
346
347         up(&b->io_mutex);
348 }
349
350 static void __btree_node_write_done(struct closure *cl)
351 {
352         struct btree *b = container_of(cl, struct btree, io);
353         struct btree_write *w = btree_prev_write(b);
354
355         bch_bbio_free(b->bio, b->c);
356         b->bio = NULL;
357         btree_complete_write(b, w);
358
359         if (btree_node_dirty(b))
360                 schedule_delayed_work(&b->work, 30 * HZ);
361
362         closure_return_with_destructor(cl, btree_node_write_unlock);
363 }
364
365 static void btree_node_write_done(struct closure *cl)
366 {
367         struct btree *b = container_of(cl, struct btree, io);
368
369         bio_free_pages(b->bio);
370         __btree_node_write_done(cl);
371 }
372
373 static void btree_node_write_endio(struct bio *bio)
374 {
375         struct closure *cl = bio->bi_private;
376         struct btree *b = container_of(cl, struct btree, io);
377
378         if (bio->bi_status)
379                 set_btree_node_io_error(b);
380
381         bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
382         closure_put(cl);
383 }
384
385 static void do_btree_node_write(struct btree *b)
386 {
387         struct closure *cl = &b->io;
388         struct bset *i = btree_bset_last(b);
389         BKEY_PADDED(key) k;
390
391         i->version      = BCACHE_BSET_VERSION;
392         i->csum         = btree_csum_set(b, i);
393
394         BUG_ON(b->bio);
395         b->bio = bch_bbio_alloc(b->c);
396
397         b->bio->bi_end_io       = btree_node_write_endio;
398         b->bio->bi_private      = cl;
399         b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
400         b->bio->bi_opf          = REQ_OP_WRITE | REQ_META | REQ_FUA;
401         bch_bio_map(b->bio, i);
402
403         /*
404          * If we're appending to a leaf node, we don't technically need FUA -
405          * this write just needs to be persisted before the next journal write,
406          * which will be marked FLUSH|FUA.
407          *
408          * Similarly if we're writing a new btree root - the pointer is going to
409          * be in the next journal entry.
410          *
411          * But if we're writing a new btree node (that isn't a root) or
412          * appending to a non leaf btree node, we need either FUA or a flush
413          * when we write the parent with the new pointer. FUA is cheaper than a
414          * flush, and writes appending to leaf nodes aren't blocking anything so
415          * just make all btree node writes FUA to keep things sane.
416          */
417
418         bkey_copy(&k.key, &b->key);
419         SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
420                        bset_sector_offset(&b->keys, i));
421
422         if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
423                 int j;
424                 struct bio_vec *bv;
425                 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
426
427                 bio_for_each_segment_all(bv, b->bio, j)
428                         memcpy(page_address(bv->bv_page),
429                                base + j * PAGE_SIZE, PAGE_SIZE);
430
431                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
432
433                 continue_at(cl, btree_node_write_done, NULL);
434         } else {
435                 b->bio->bi_vcnt = 0;
436                 bch_bio_map(b->bio, i);
437
438                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
439
440                 closure_sync(cl);
441                 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
442         }
443 }
444
445 void __bch_btree_node_write(struct btree *b, struct closure *parent)
446 {
447         struct bset *i = btree_bset_last(b);
448
449         lockdep_assert_held(&b->write_lock);
450
451         trace_bcache_btree_write(b);
452
453         BUG_ON(current->bio_list);
454         BUG_ON(b->written >= btree_blocks(b));
455         BUG_ON(b->written && !i->keys);
456         BUG_ON(btree_bset_first(b)->seq != i->seq);
457         bch_check_keys(&b->keys, "writing");
458
459         cancel_delayed_work(&b->work);
460
461         /* If caller isn't waiting for write, parent refcount is cache set */
462         down(&b->io_mutex);
463         closure_init(&b->io, parent ?: &b->c->cl);
464
465         clear_bit(BTREE_NODE_dirty,      &b->flags);
466         change_bit(BTREE_NODE_write_idx, &b->flags);
467
468         do_btree_node_write(b);
469
470         atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
471                         &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
472
473         b->written += set_blocks(i, block_bytes(b->c));
474 }
475
476 void bch_btree_node_write(struct btree *b, struct closure *parent)
477 {
478         unsigned nsets = b->keys.nsets;
479
480         lockdep_assert_held(&b->lock);
481
482         __bch_btree_node_write(b, parent);
483
484         /*
485          * do verify if there was more than one set initially (i.e. we did a
486          * sort) and we sorted down to a single set:
487          */
488         if (nsets && !b->keys.nsets)
489                 bch_btree_verify(b);
490
491         bch_btree_init_next(b);
492 }
493
494 static void bch_btree_node_write_sync(struct btree *b)
495 {
496         struct closure cl;
497
498         closure_init_stack(&cl);
499
500         mutex_lock(&b->write_lock);
501         bch_btree_node_write(b, &cl);
502         mutex_unlock(&b->write_lock);
503
504         closure_sync(&cl);
505 }
506
507 static void btree_node_write_work(struct work_struct *w)
508 {
509         struct btree *b = container_of(to_delayed_work(w), struct btree, work);
510
511         mutex_lock(&b->write_lock);
512         if (btree_node_dirty(b))
513                 __bch_btree_node_write(b, NULL);
514         mutex_unlock(&b->write_lock);
515 }
516
517 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
518 {
519         struct bset *i = btree_bset_last(b);
520         struct btree_write *w = btree_current_write(b);
521
522         lockdep_assert_held(&b->write_lock);
523
524         BUG_ON(!b->written);
525         BUG_ON(!i->keys);
526
527         if (!btree_node_dirty(b))
528                 schedule_delayed_work(&b->work, 30 * HZ);
529
530         set_btree_node_dirty(b);
531
532         if (journal_ref) {
533                 if (w->journal &&
534                     journal_pin_cmp(b->c, w->journal, journal_ref)) {
535                         atomic_dec_bug(w->journal);
536                         w->journal = NULL;
537                 }
538
539                 if (!w->journal) {
540                         w->journal = journal_ref;
541                         atomic_inc(w->journal);
542                 }
543         }
544
545         /* Force write if set is too big */
546         if (set_bytes(i) > PAGE_SIZE - 48 &&
547             !current->bio_list)
548                 bch_btree_node_write(b, NULL);
549 }
550
551 /*
552  * Btree in memory cache - allocation/freeing
553  * mca -> memory cache
554  */
555
556 #define mca_reserve(c)  (((c->root && c->root->level)           \
557                           ? c->root->level : 1) * 8 + 16)
558 #define mca_can_free(c)                                         \
559         max_t(int, 0, c->btree_cache_used - mca_reserve(c))
560
561 static void mca_data_free(struct btree *b)
562 {
563         BUG_ON(b->io_mutex.count != 1);
564
565         bch_btree_keys_free(&b->keys);
566
567         b->c->btree_cache_used--;
568         list_move(&b->list, &b->c->btree_cache_freed);
569 }
570
571 static void mca_bucket_free(struct btree *b)
572 {
573         BUG_ON(btree_node_dirty(b));
574
575         b->key.ptr[0] = 0;
576         hlist_del_init_rcu(&b->hash);
577         list_move(&b->list, &b->c->btree_cache_freeable);
578 }
579
580 static unsigned btree_order(struct bkey *k)
581 {
582         return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
583 }
584
585 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
586 {
587         if (!bch_btree_keys_alloc(&b->keys,
588                                   max_t(unsigned,
589                                         ilog2(b->c->btree_pages),
590                                         btree_order(k)),
591                                   gfp)) {
592                 b->c->btree_cache_used++;
593                 list_move(&b->list, &b->c->btree_cache);
594         } else {
595                 list_move(&b->list, &b->c->btree_cache_freed);
596         }
597 }
598
599 static struct btree *mca_bucket_alloc(struct cache_set *c,
600                                       struct bkey *k, gfp_t gfp)
601 {
602         struct btree *b = kzalloc(sizeof(struct btree), gfp);
603         if (!b)
604                 return NULL;
605
606         init_rwsem(&b->lock);
607         lockdep_set_novalidate_class(&b->lock);
608         mutex_init(&b->write_lock);
609         lockdep_set_novalidate_class(&b->write_lock);
610         INIT_LIST_HEAD(&b->list);
611         INIT_DELAYED_WORK(&b->work, btree_node_write_work);
612         b->c = c;
613         sema_init(&b->io_mutex, 1);
614
615         mca_data_alloc(b, k, gfp);
616         return b;
617 }
618
619 static int mca_reap(struct btree *b, unsigned min_order, bool flush)
620 {
621         struct closure cl;
622
623         closure_init_stack(&cl);
624         lockdep_assert_held(&b->c->bucket_lock);
625
626         if (!down_write_trylock(&b->lock))
627                 return -ENOMEM;
628
629         BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
630
631         if (b->keys.page_order < min_order)
632                 goto out_unlock;
633
634         if (!flush) {
635                 if (btree_node_dirty(b))
636                         goto out_unlock;
637
638                 if (down_trylock(&b->io_mutex))
639                         goto out_unlock;
640                 up(&b->io_mutex);
641         }
642
643         mutex_lock(&b->write_lock);
644         if (btree_node_dirty(b))
645                 __bch_btree_node_write(b, &cl);
646         mutex_unlock(&b->write_lock);
647
648         closure_sync(&cl);
649
650         /* wait for any in flight btree write */
651         down(&b->io_mutex);
652         up(&b->io_mutex);
653
654         return 0;
655 out_unlock:
656         rw_unlock(true, b);
657         return -ENOMEM;
658 }
659
660 static unsigned long bch_mca_scan(struct shrinker *shrink,
661                                   struct shrink_control *sc)
662 {
663         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
664         struct btree *b, *t;
665         unsigned long i, nr = sc->nr_to_scan;
666         unsigned long freed = 0;
667
668         if (c->shrinker_disabled)
669                 return SHRINK_STOP;
670
671         if (c->btree_cache_alloc_lock)
672                 return SHRINK_STOP;
673
674         /* Return -1 if we can't do anything right now */
675         if (sc->gfp_mask & __GFP_IO)
676                 mutex_lock(&c->bucket_lock);
677         else if (!mutex_trylock(&c->bucket_lock))
678                 return -1;
679
680         /*
681          * It's _really_ critical that we don't free too many btree nodes - we
682          * have to always leave ourselves a reserve. The reserve is how we
683          * guarantee that allocating memory for a new btree node can always
684          * succeed, so that inserting keys into the btree can always succeed and
685          * IO can always make forward progress:
686          */
687         nr /= c->btree_pages;
688         if (nr == 0)
689                 nr = 1;
690         nr = min_t(unsigned long, nr, mca_can_free(c));
691
692         i = 0;
693         list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
694                 if (freed >= nr)
695                         break;
696
697                 if (++i > 3 &&
698                     !mca_reap(b, 0, false)) {
699                         mca_data_free(b);
700                         rw_unlock(true, b);
701                         freed++;
702                 }
703         }
704
705         for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
706                 if (list_empty(&c->btree_cache))
707                         goto out;
708
709                 b = list_first_entry(&c->btree_cache, struct btree, list);
710                 list_rotate_left(&c->btree_cache);
711
712                 if (!b->accessed &&
713                     !mca_reap(b, 0, false)) {
714                         mca_bucket_free(b);
715                         mca_data_free(b);
716                         rw_unlock(true, b);
717                         freed++;
718                 } else
719                         b->accessed = 0;
720         }
721 out:
722         mutex_unlock(&c->bucket_lock);
723         return freed;
724 }
725
726 static unsigned long bch_mca_count(struct shrinker *shrink,
727                                    struct shrink_control *sc)
728 {
729         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
730
731         if (c->shrinker_disabled)
732                 return 0;
733
734         if (c->btree_cache_alloc_lock)
735                 return 0;
736
737         return mca_can_free(c) * c->btree_pages;
738 }
739
740 void bch_btree_cache_free(struct cache_set *c)
741 {
742         struct btree *b;
743         struct closure cl;
744         closure_init_stack(&cl);
745
746         if (c->shrink.list.next)
747                 unregister_shrinker(&c->shrink);
748
749         mutex_lock(&c->bucket_lock);
750
751 #ifdef CONFIG_BCACHE_DEBUG
752         if (c->verify_data)
753                 list_move(&c->verify_data->list, &c->btree_cache);
754
755         free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
756 #endif
757
758         list_splice(&c->btree_cache_freeable,
759                     &c->btree_cache);
760
761         while (!list_empty(&c->btree_cache)) {
762                 b = list_first_entry(&c->btree_cache, struct btree, list);
763
764                 if (btree_node_dirty(b))
765                         btree_complete_write(b, btree_current_write(b));
766                 clear_bit(BTREE_NODE_dirty, &b->flags);
767
768                 mca_data_free(b);
769         }
770
771         while (!list_empty(&c->btree_cache_freed)) {
772                 b = list_first_entry(&c->btree_cache_freed,
773                                      struct btree, list);
774                 list_del(&b->list);
775                 cancel_delayed_work_sync(&b->work);
776                 kfree(b);
777         }
778
779         mutex_unlock(&c->bucket_lock);
780 }
781
782 int bch_btree_cache_alloc(struct cache_set *c)
783 {
784         unsigned i;
785
786         for (i = 0; i < mca_reserve(c); i++)
787                 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
788                         return -ENOMEM;
789
790         list_splice_init(&c->btree_cache,
791                          &c->btree_cache_freeable);
792
793 #ifdef CONFIG_BCACHE_DEBUG
794         mutex_init(&c->verify_lock);
795
796         c->verify_ondisk = (void *)
797                 __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c)));
798
799         c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
800
801         if (c->verify_data &&
802             c->verify_data->keys.set->data)
803                 list_del_init(&c->verify_data->list);
804         else
805                 c->verify_data = NULL;
806 #endif
807
808         c->shrink.count_objects = bch_mca_count;
809         c->shrink.scan_objects = bch_mca_scan;
810         c->shrink.seeks = 4;
811         c->shrink.batch = c->btree_pages * 2;
812
813         if (register_shrinker(&c->shrink))
814                 pr_warn("bcache: %s: could not register shrinker",
815                                 __func__);
816
817         return 0;
818 }
819
820 /* Btree in memory cache - hash table */
821
822 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
823 {
824         return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
825 }
826
827 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
828 {
829         struct btree *b;
830
831         rcu_read_lock();
832         hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
833                 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
834                         goto out;
835         b = NULL;
836 out:
837         rcu_read_unlock();
838         return b;
839 }
840
841 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
842 {
843         spin_lock(&c->btree_cannibalize_lock);
844         if (likely(c->btree_cache_alloc_lock == NULL)) {
845                 c->btree_cache_alloc_lock = current;
846         } else if (c->btree_cache_alloc_lock != current) {
847                 if (op)
848                         prepare_to_wait(&c->btree_cache_wait, &op->wait,
849                                         TASK_UNINTERRUPTIBLE);
850                 spin_unlock(&c->btree_cannibalize_lock);
851                 return -EINTR;
852         }
853         spin_unlock(&c->btree_cannibalize_lock);
854
855         return 0;
856 }
857
858 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
859                                      struct bkey *k)
860 {
861         struct btree *b;
862
863         trace_bcache_btree_cache_cannibalize(c);
864
865         if (mca_cannibalize_lock(c, op))
866                 return ERR_PTR(-EINTR);
867
868         list_for_each_entry_reverse(b, &c->btree_cache, list)
869                 if (!mca_reap(b, btree_order(k), false))
870                         return b;
871
872         list_for_each_entry_reverse(b, &c->btree_cache, list)
873                 if (!mca_reap(b, btree_order(k), true))
874                         return b;
875
876         WARN(1, "btree cache cannibalize failed\n");
877         return ERR_PTR(-ENOMEM);
878 }
879
880 /*
881  * We can only have one thread cannibalizing other cached btree nodes at a time,
882  * or we'll deadlock. We use an open coded mutex to ensure that, which a
883  * cannibalize_bucket() will take. This means every time we unlock the root of
884  * the btree, we need to release this lock if we have it held.
885  */
886 static void bch_cannibalize_unlock(struct cache_set *c)
887 {
888         spin_lock(&c->btree_cannibalize_lock);
889         if (c->btree_cache_alloc_lock == current) {
890                 c->btree_cache_alloc_lock = NULL;
891                 wake_up(&c->btree_cache_wait);
892         }
893         spin_unlock(&c->btree_cannibalize_lock);
894 }
895
896 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
897                                struct bkey *k, int level)
898 {
899         struct btree *b;
900
901         BUG_ON(current->bio_list);
902
903         lockdep_assert_held(&c->bucket_lock);
904
905         if (mca_find(c, k))
906                 return NULL;
907
908         /* btree_free() doesn't free memory; it sticks the node on the end of
909          * the list. Check if there's any freed nodes there:
910          */
911         list_for_each_entry(b, &c->btree_cache_freeable, list)
912                 if (!mca_reap(b, btree_order(k), false))
913                         goto out;
914
915         /* We never free struct btree itself, just the memory that holds the on
916          * disk node. Check the freed list before allocating a new one:
917          */
918         list_for_each_entry(b, &c->btree_cache_freed, list)
919                 if (!mca_reap(b, 0, false)) {
920                         mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
921                         if (!b->keys.set[0].data)
922                                 goto err;
923                         else
924                                 goto out;
925                 }
926
927         b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
928         if (!b)
929                 goto err;
930
931         BUG_ON(!down_write_trylock(&b->lock));
932         if (!b->keys.set->data)
933                 goto err;
934 out:
935         BUG_ON(b->io_mutex.count != 1);
936
937         bkey_copy(&b->key, k);
938         list_move(&b->list, &c->btree_cache);
939         hlist_del_init_rcu(&b->hash);
940         hlist_add_head_rcu(&b->hash, mca_hash(c, k));
941
942         lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
943         b->parent       = (void *) ~0UL;
944         b->flags        = 0;
945         b->written      = 0;
946         b->level        = level;
947
948         if (!b->level)
949                 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
950                                     &b->c->expensive_debug_checks);
951         else
952                 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
953                                     &b->c->expensive_debug_checks);
954
955         return b;
956 err:
957         if (b)
958                 rw_unlock(true, b);
959
960         b = mca_cannibalize(c, op, k);
961         if (!IS_ERR(b))
962                 goto out;
963
964         return b;
965 }
966
967 /**
968  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
969  * in from disk if necessary.
970  *
971  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
972  *
973  * The btree node will have either a read or a write lock held, depending on
974  * level and op->lock.
975  */
976 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
977                                  struct bkey *k, int level, bool write,
978                                  struct btree *parent)
979 {
980         int i = 0;
981         struct btree *b;
982
983         BUG_ON(level < 0);
984 retry:
985         b = mca_find(c, k);
986
987         if (!b) {
988                 if (current->bio_list)
989                         return ERR_PTR(-EAGAIN);
990
991                 mutex_lock(&c->bucket_lock);
992                 b = mca_alloc(c, op, k, level);
993                 mutex_unlock(&c->bucket_lock);
994
995                 if (!b)
996                         goto retry;
997                 if (IS_ERR(b))
998                         return b;
999
1000                 bch_btree_node_read(b);
1001
1002                 if (!write)
1003                         downgrade_write(&b->lock);
1004         } else {
1005                 rw_lock(write, b, level);
1006                 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1007                         rw_unlock(write, b);
1008                         goto retry;
1009                 }
1010                 BUG_ON(b->level != level);
1011         }
1012
1013         b->parent = parent;
1014         b->accessed = 1;
1015
1016         for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1017                 prefetch(b->keys.set[i].tree);
1018                 prefetch(b->keys.set[i].data);
1019         }
1020
1021         for (; i <= b->keys.nsets; i++)
1022                 prefetch(b->keys.set[i].data);
1023
1024         if (btree_node_io_error(b)) {
1025                 rw_unlock(write, b);
1026                 return ERR_PTR(-EIO);
1027         }
1028
1029         BUG_ON(!b->written);
1030
1031         return b;
1032 }
1033
1034 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1035 {
1036         struct btree *b;
1037
1038         mutex_lock(&parent->c->bucket_lock);
1039         b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1040         mutex_unlock(&parent->c->bucket_lock);
1041
1042         if (!IS_ERR_OR_NULL(b)) {
1043                 b->parent = parent;
1044                 bch_btree_node_read(b);
1045                 rw_unlock(true, b);
1046         }
1047 }
1048
1049 /* Btree alloc */
1050
1051 static void btree_node_free(struct btree *b)
1052 {
1053         trace_bcache_btree_node_free(b);
1054
1055         BUG_ON(b == b->c->root);
1056
1057         mutex_lock(&b->write_lock);
1058
1059         if (btree_node_dirty(b))
1060                 btree_complete_write(b, btree_current_write(b));
1061         clear_bit(BTREE_NODE_dirty, &b->flags);
1062
1063         mutex_unlock(&b->write_lock);
1064
1065         cancel_delayed_work(&b->work);
1066
1067         mutex_lock(&b->c->bucket_lock);
1068         bch_bucket_free(b->c, &b->key);
1069         mca_bucket_free(b);
1070         mutex_unlock(&b->c->bucket_lock);
1071 }
1072
1073 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1074                                      int level, bool wait,
1075                                      struct btree *parent)
1076 {
1077         BKEY_PADDED(key) k;
1078         struct btree *b = ERR_PTR(-EAGAIN);
1079
1080         mutex_lock(&c->bucket_lock);
1081 retry:
1082         if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1083                 goto err;
1084
1085         bkey_put(c, &k.key);
1086         SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1087
1088         b = mca_alloc(c, op, &k.key, level);
1089         if (IS_ERR(b))
1090                 goto err_free;
1091
1092         if (!b) {
1093                 cache_bug(c,
1094                         "Tried to allocate bucket that was in btree cache");
1095                 goto retry;
1096         }
1097
1098         b->accessed = 1;
1099         b->parent = parent;
1100         bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1101
1102         mutex_unlock(&c->bucket_lock);
1103
1104         trace_bcache_btree_node_alloc(b);
1105         return b;
1106 err_free:
1107         bch_bucket_free(c, &k.key);
1108 err:
1109         mutex_unlock(&c->bucket_lock);
1110
1111         trace_bcache_btree_node_alloc_fail(c);
1112         return b;
1113 }
1114
1115 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1116                                           struct btree_op *op, int level,
1117                                           struct btree *parent)
1118 {
1119         return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1120 }
1121
1122 static struct btree *btree_node_alloc_replacement(struct btree *b,
1123                                                   struct btree_op *op)
1124 {
1125         struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1126         if (!IS_ERR_OR_NULL(n)) {
1127                 mutex_lock(&n->write_lock);
1128                 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1129                 bkey_copy_key(&n->key, &b->key);
1130                 mutex_unlock(&n->write_lock);
1131         }
1132
1133         return n;
1134 }
1135
1136 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1137 {
1138         unsigned i;
1139
1140         mutex_lock(&b->c->bucket_lock);
1141
1142         atomic_inc(&b->c->prio_blocked);
1143
1144         bkey_copy(k, &b->key);
1145         bkey_copy_key(k, &ZERO_KEY);
1146
1147         for (i = 0; i < KEY_PTRS(k); i++)
1148                 SET_PTR_GEN(k, i,
1149                             bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1150                                         PTR_BUCKET(b->c, &b->key, i)));
1151
1152         mutex_unlock(&b->c->bucket_lock);
1153 }
1154
1155 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1156 {
1157         struct cache_set *c = b->c;
1158         struct cache *ca;
1159         unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1160
1161         mutex_lock(&c->bucket_lock);
1162
1163         for_each_cache(ca, c, i)
1164                 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1165                         if (op)
1166                                 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1167                                                 TASK_UNINTERRUPTIBLE);
1168                         mutex_unlock(&c->bucket_lock);
1169                         return -EINTR;
1170                 }
1171
1172         mutex_unlock(&c->bucket_lock);
1173
1174         return mca_cannibalize_lock(b->c, op);
1175 }
1176
1177 /* Garbage collection */
1178
1179 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1180                                     struct bkey *k)
1181 {
1182         uint8_t stale = 0;
1183         unsigned i;
1184         struct bucket *g;
1185
1186         /*
1187          * ptr_invalid() can't return true for the keys that mark btree nodes as
1188          * freed, but since ptr_bad() returns true we'll never actually use them
1189          * for anything and thus we don't want mark their pointers here
1190          */
1191         if (!bkey_cmp(k, &ZERO_KEY))
1192                 return stale;
1193
1194         for (i = 0; i < KEY_PTRS(k); i++) {
1195                 if (!ptr_available(c, k, i))
1196                         continue;
1197
1198                 g = PTR_BUCKET(c, k, i);
1199
1200                 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1201                         g->last_gc = PTR_GEN(k, i);
1202
1203                 if (ptr_stale(c, k, i)) {
1204                         stale = max(stale, ptr_stale(c, k, i));
1205                         continue;
1206                 }
1207
1208                 cache_bug_on(GC_MARK(g) &&
1209                              (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1210                              c, "inconsistent ptrs: mark = %llu, level = %i",
1211                              GC_MARK(g), level);
1212
1213                 if (level)
1214                         SET_GC_MARK(g, GC_MARK_METADATA);
1215                 else if (KEY_DIRTY(k))
1216                         SET_GC_MARK(g, GC_MARK_DIRTY);
1217                 else if (!GC_MARK(g))
1218                         SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1219
1220                 /* guard against overflow */
1221                 SET_GC_SECTORS_USED(g, min_t(unsigned,
1222                                              GC_SECTORS_USED(g) + KEY_SIZE(k),
1223                                              MAX_GC_SECTORS_USED));
1224
1225                 BUG_ON(!GC_SECTORS_USED(g));
1226         }
1227
1228         return stale;
1229 }
1230
1231 #define btree_mark_key(b, k)    __bch_btree_mark_key(b->c, b->level, k)
1232
1233 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1234 {
1235         unsigned i;
1236
1237         for (i = 0; i < KEY_PTRS(k); i++)
1238                 if (ptr_available(c, k, i) &&
1239                     !ptr_stale(c, k, i)) {
1240                         struct bucket *b = PTR_BUCKET(c, k, i);
1241
1242                         b->gen = PTR_GEN(k, i);
1243
1244                         if (level && bkey_cmp(k, &ZERO_KEY))
1245                                 b->prio = BTREE_PRIO;
1246                         else if (!level && b->prio == BTREE_PRIO)
1247                                 b->prio = INITIAL_PRIO;
1248                 }
1249
1250         __bch_btree_mark_key(c, level, k);
1251 }
1252
1253 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1254 {
1255         uint8_t stale = 0;
1256         unsigned keys = 0, good_keys = 0;
1257         struct bkey *k;
1258         struct btree_iter iter;
1259         struct bset_tree *t;
1260
1261         gc->nodes++;
1262
1263         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1264                 stale = max(stale, btree_mark_key(b, k));
1265                 keys++;
1266
1267                 if (bch_ptr_bad(&b->keys, k))
1268                         continue;
1269
1270                 gc->key_bytes += bkey_u64s(k);
1271                 gc->nkeys++;
1272                 good_keys++;
1273
1274                 gc->data += KEY_SIZE(k);
1275         }
1276
1277         for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1278                 btree_bug_on(t->size &&
1279                              bset_written(&b->keys, t) &&
1280                              bkey_cmp(&b->key, &t->end) < 0,
1281                              b, "found short btree key in gc");
1282
1283         if (b->c->gc_always_rewrite)
1284                 return true;
1285
1286         if (stale > 10)
1287                 return true;
1288
1289         if ((keys - good_keys) * 2 > keys)
1290                 return true;
1291
1292         return false;
1293 }
1294
1295 #define GC_MERGE_NODES  4U
1296
1297 struct gc_merge_info {
1298         struct btree    *b;
1299         unsigned        keys;
1300 };
1301
1302 static int bch_btree_insert_node(struct btree *, struct btree_op *,
1303                                  struct keylist *, atomic_t *, struct bkey *);
1304
1305 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1306                              struct gc_stat *gc, struct gc_merge_info *r)
1307 {
1308         unsigned i, nodes = 0, keys = 0, blocks;
1309         struct btree *new_nodes[GC_MERGE_NODES];
1310         struct keylist keylist;
1311         struct closure cl;
1312         struct bkey *k;
1313
1314         bch_keylist_init(&keylist);
1315
1316         if (btree_check_reserve(b, NULL))
1317                 return 0;
1318
1319         memset(new_nodes, 0, sizeof(new_nodes));
1320         closure_init_stack(&cl);
1321
1322         while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1323                 keys += r[nodes++].keys;
1324
1325         blocks = btree_default_blocks(b->c) * 2 / 3;
1326
1327         if (nodes < 2 ||
1328             __set_blocks(b->keys.set[0].data, keys,
1329                          block_bytes(b->c)) > blocks * (nodes - 1))
1330                 return 0;
1331
1332         for (i = 0; i < nodes; i++) {
1333                 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1334                 if (IS_ERR_OR_NULL(new_nodes[i]))
1335                         goto out_nocoalesce;
1336         }
1337
1338         /*
1339          * We have to check the reserve here, after we've allocated our new
1340          * nodes, to make sure the insert below will succeed - we also check
1341          * before as an optimization to potentially avoid a bunch of expensive
1342          * allocs/sorts
1343          */
1344         if (btree_check_reserve(b, NULL))
1345                 goto out_nocoalesce;
1346
1347         for (i = 0; i < nodes; i++)
1348                 mutex_lock(&new_nodes[i]->write_lock);
1349
1350         for (i = nodes - 1; i > 0; --i) {
1351                 struct bset *n1 = btree_bset_first(new_nodes[i]);
1352                 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1353                 struct bkey *k, *last = NULL;
1354
1355                 keys = 0;
1356
1357                 if (i > 1) {
1358                         for (k = n2->start;
1359                              k < bset_bkey_last(n2);
1360                              k = bkey_next(k)) {
1361                                 if (__set_blocks(n1, n1->keys + keys +
1362                                                  bkey_u64s(k),
1363                                                  block_bytes(b->c)) > blocks)
1364                                         break;
1365
1366                                 last = k;
1367                                 keys += bkey_u64s(k);
1368                         }
1369                 } else {
1370                         /*
1371                          * Last node we're not getting rid of - we're getting
1372                          * rid of the node at r[0]. Have to try and fit all of
1373                          * the remaining keys into this node; we can't ensure
1374                          * they will always fit due to rounding and variable
1375                          * length keys (shouldn't be possible in practice,
1376                          * though)
1377                          */
1378                         if (__set_blocks(n1, n1->keys + n2->keys,
1379                                          block_bytes(b->c)) >
1380                             btree_blocks(new_nodes[i]))
1381                                 goto out_unlock_nocoalesce;
1382
1383                         keys = n2->keys;
1384                         /* Take the key of the node we're getting rid of */
1385                         last = &r->b->key;
1386                 }
1387
1388                 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1389                        btree_blocks(new_nodes[i]));
1390
1391                 if (last)
1392                         bkey_copy_key(&new_nodes[i]->key, last);
1393
1394                 memcpy(bset_bkey_last(n1),
1395                        n2->start,
1396                        (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1397
1398                 n1->keys += keys;
1399                 r[i].keys = n1->keys;
1400
1401                 memmove(n2->start,
1402                         bset_bkey_idx(n2, keys),
1403                         (void *) bset_bkey_last(n2) -
1404                         (void *) bset_bkey_idx(n2, keys));
1405
1406                 n2->keys -= keys;
1407
1408                 if (__bch_keylist_realloc(&keylist,
1409                                           bkey_u64s(&new_nodes[i]->key)))
1410                         goto out_unlock_nocoalesce;
1411
1412                 bch_btree_node_write(new_nodes[i], &cl);
1413                 bch_keylist_add(&keylist, &new_nodes[i]->key);
1414         }
1415
1416         for (i = 0; i < nodes; i++)
1417                 mutex_unlock(&new_nodes[i]->write_lock);
1418
1419         closure_sync(&cl);
1420
1421         /* We emptied out this node */
1422         BUG_ON(btree_bset_first(new_nodes[0])->keys);
1423         btree_node_free(new_nodes[0]);
1424         rw_unlock(true, new_nodes[0]);
1425         new_nodes[0] = NULL;
1426
1427         for (i = 0; i < nodes; i++) {
1428                 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1429                         goto out_nocoalesce;
1430
1431                 make_btree_freeing_key(r[i].b, keylist.top);
1432                 bch_keylist_push(&keylist);
1433         }
1434
1435         bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1436         BUG_ON(!bch_keylist_empty(&keylist));
1437
1438         for (i = 0; i < nodes; i++) {
1439                 btree_node_free(r[i].b);
1440                 rw_unlock(true, r[i].b);
1441
1442                 r[i].b = new_nodes[i];
1443         }
1444
1445         memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1446         r[nodes - 1].b = ERR_PTR(-EINTR);
1447
1448         trace_bcache_btree_gc_coalesce(nodes);
1449         gc->nodes--;
1450
1451         bch_keylist_free(&keylist);
1452
1453         /* Invalidated our iterator */
1454         return -EINTR;
1455
1456 out_unlock_nocoalesce:
1457         for (i = 0; i < nodes; i++)
1458                 mutex_unlock(&new_nodes[i]->write_lock);
1459
1460 out_nocoalesce:
1461         closure_sync(&cl);
1462         bch_keylist_free(&keylist);
1463
1464         while ((k = bch_keylist_pop(&keylist)))
1465                 if (!bkey_cmp(k, &ZERO_KEY))
1466                         atomic_dec(&b->c->prio_blocked);
1467
1468         for (i = 0; i < nodes; i++)
1469                 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1470                         btree_node_free(new_nodes[i]);
1471                         rw_unlock(true, new_nodes[i]);
1472                 }
1473         return 0;
1474 }
1475
1476 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1477                                  struct btree *replace)
1478 {
1479         struct keylist keys;
1480         struct btree *n;
1481
1482         if (btree_check_reserve(b, NULL))
1483                 return 0;
1484
1485         n = btree_node_alloc_replacement(replace, NULL);
1486
1487         /* recheck reserve after allocating replacement node */
1488         if (btree_check_reserve(b, NULL)) {
1489                 btree_node_free(n);
1490                 rw_unlock(true, n);
1491                 return 0;
1492         }
1493
1494         bch_btree_node_write_sync(n);
1495
1496         bch_keylist_init(&keys);
1497         bch_keylist_add(&keys, &n->key);
1498
1499         make_btree_freeing_key(replace, keys.top);
1500         bch_keylist_push(&keys);
1501
1502         bch_btree_insert_node(b, op, &keys, NULL, NULL);
1503         BUG_ON(!bch_keylist_empty(&keys));
1504
1505         btree_node_free(replace);
1506         rw_unlock(true, n);
1507
1508         /* Invalidated our iterator */
1509         return -EINTR;
1510 }
1511
1512 static unsigned btree_gc_count_keys(struct btree *b)
1513 {
1514         struct bkey *k;
1515         struct btree_iter iter;
1516         unsigned ret = 0;
1517
1518         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1519                 ret += bkey_u64s(k);
1520
1521         return ret;
1522 }
1523
1524 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1525                             struct closure *writes, struct gc_stat *gc)
1526 {
1527         int ret = 0;
1528         bool should_rewrite;
1529         struct bkey *k;
1530         struct btree_iter iter;
1531         struct gc_merge_info r[GC_MERGE_NODES];
1532         struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1533
1534         bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1535
1536         for (i = r; i < r + ARRAY_SIZE(r); i++)
1537                 i->b = ERR_PTR(-EINTR);
1538
1539         while (1) {
1540                 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1541                 if (k) {
1542                         r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1543                                                   true, b);
1544                         if (IS_ERR(r->b)) {
1545                                 ret = PTR_ERR(r->b);
1546                                 break;
1547                         }
1548
1549                         r->keys = btree_gc_count_keys(r->b);
1550
1551                         ret = btree_gc_coalesce(b, op, gc, r);
1552                         if (ret)
1553                                 break;
1554                 }
1555
1556                 if (!last->b)
1557                         break;
1558
1559                 if (!IS_ERR(last->b)) {
1560                         should_rewrite = btree_gc_mark_node(last->b, gc);
1561                         if (should_rewrite) {
1562                                 ret = btree_gc_rewrite_node(b, op, last->b);
1563                                 if (ret)
1564                                         break;
1565                         }
1566
1567                         if (last->b->level) {
1568                                 ret = btree_gc_recurse(last->b, op, writes, gc);
1569                                 if (ret)
1570                                         break;
1571                         }
1572
1573                         bkey_copy_key(&b->c->gc_done, &last->b->key);
1574
1575                         /*
1576                          * Must flush leaf nodes before gc ends, since replace
1577                          * operations aren't journalled
1578                          */
1579                         mutex_lock(&last->b->write_lock);
1580                         if (btree_node_dirty(last->b))
1581                                 bch_btree_node_write(last->b, writes);
1582                         mutex_unlock(&last->b->write_lock);
1583                         rw_unlock(true, last->b);
1584                 }
1585
1586                 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1587                 r->b = NULL;
1588
1589                 if (need_resched()) {
1590                         ret = -EAGAIN;
1591                         break;
1592                 }
1593         }
1594
1595         for (i = r; i < r + ARRAY_SIZE(r); i++)
1596                 if (!IS_ERR_OR_NULL(i->b)) {
1597                         mutex_lock(&i->b->write_lock);
1598                         if (btree_node_dirty(i->b))
1599                                 bch_btree_node_write(i->b, writes);
1600                         mutex_unlock(&i->b->write_lock);
1601                         rw_unlock(true, i->b);
1602                 }
1603
1604         return ret;
1605 }
1606
1607 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1608                              struct closure *writes, struct gc_stat *gc)
1609 {
1610         struct btree *n = NULL;
1611         int ret = 0;
1612         bool should_rewrite;
1613
1614         should_rewrite = btree_gc_mark_node(b, gc);
1615         if (should_rewrite) {
1616                 n = btree_node_alloc_replacement(b, NULL);
1617
1618                 if (!IS_ERR_OR_NULL(n)) {
1619                         bch_btree_node_write_sync(n);
1620
1621                         bch_btree_set_root(n);
1622                         btree_node_free(b);
1623                         rw_unlock(true, n);
1624
1625                         return -EINTR;
1626                 }
1627         }
1628
1629         __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1630
1631         if (b->level) {
1632                 ret = btree_gc_recurse(b, op, writes, gc);
1633                 if (ret)
1634                         return ret;
1635         }
1636
1637         bkey_copy_key(&b->c->gc_done, &b->key);
1638
1639         return ret;
1640 }
1641
1642 static void btree_gc_start(struct cache_set *c)
1643 {
1644         struct cache *ca;
1645         struct bucket *b;
1646         unsigned i;
1647
1648         if (!c->gc_mark_valid)
1649                 return;
1650
1651         mutex_lock(&c->bucket_lock);
1652
1653         c->gc_mark_valid = 0;
1654         c->gc_done = ZERO_KEY;
1655
1656         for_each_cache(ca, c, i)
1657                 for_each_bucket(b, ca) {
1658                         b->last_gc = b->gen;
1659                         if (!atomic_read(&b->pin)) {
1660                                 SET_GC_MARK(b, 0);
1661                                 SET_GC_SECTORS_USED(b, 0);
1662                         }
1663                 }
1664
1665         mutex_unlock(&c->bucket_lock);
1666 }
1667
1668 static size_t bch_btree_gc_finish(struct cache_set *c)
1669 {
1670         size_t available = 0;
1671         struct bucket *b;
1672         struct cache *ca;
1673         unsigned i;
1674
1675         mutex_lock(&c->bucket_lock);
1676
1677         set_gc_sectors(c);
1678         c->gc_mark_valid = 1;
1679         c->need_gc      = 0;
1680
1681         for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1682                 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1683                             GC_MARK_METADATA);
1684
1685         /* don't reclaim buckets to which writeback keys point */
1686         rcu_read_lock();
1687         for (i = 0; i < c->nr_uuids; i++) {
1688                 struct bcache_device *d = c->devices[i];
1689                 struct cached_dev *dc;
1690                 struct keybuf_key *w, *n;
1691                 unsigned j;
1692
1693                 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1694                         continue;
1695                 dc = container_of(d, struct cached_dev, disk);
1696
1697                 spin_lock(&dc->writeback_keys.lock);
1698                 rbtree_postorder_for_each_entry_safe(w, n,
1699                                         &dc->writeback_keys.keys, node)
1700                         for (j = 0; j < KEY_PTRS(&w->key); j++)
1701                                 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1702                                             GC_MARK_DIRTY);
1703                 spin_unlock(&dc->writeback_keys.lock);
1704         }
1705         rcu_read_unlock();
1706
1707         for_each_cache(ca, c, i) {
1708                 uint64_t *i;
1709
1710                 ca->invalidate_needs_gc = 0;
1711
1712                 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1713                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1714
1715                 for (i = ca->prio_buckets;
1716                      i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1717                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1718
1719                 for_each_bucket(b, ca) {
1720                         c->need_gc      = max(c->need_gc, bucket_gc_gen(b));
1721
1722                         if (atomic_read(&b->pin))
1723                                 continue;
1724
1725                         BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1726
1727                         if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1728                                 available++;
1729                 }
1730         }
1731
1732         mutex_unlock(&c->bucket_lock);
1733         return available;
1734 }
1735
1736 static void bch_btree_gc(struct cache_set *c)
1737 {
1738         int ret;
1739         unsigned long available;
1740         struct gc_stat stats;
1741         struct closure writes;
1742         struct btree_op op;
1743         uint64_t start_time = local_clock();
1744
1745         trace_bcache_gc_start(c);
1746
1747         memset(&stats, 0, sizeof(struct gc_stat));
1748         closure_init_stack(&writes);
1749         bch_btree_op_init(&op, SHRT_MAX);
1750
1751         btree_gc_start(c);
1752
1753         do {
1754                 ret = btree_root(gc_root, c, &op, &writes, &stats);
1755                 closure_sync(&writes);
1756                 cond_resched();
1757
1758                 if (ret && ret != -EAGAIN)
1759                         pr_warn("gc failed!");
1760         } while (ret);
1761
1762         available = bch_btree_gc_finish(c);
1763         wake_up_allocators(c);
1764
1765         bch_time_stats_update(&c->btree_gc_time, start_time);
1766
1767         stats.key_bytes *= sizeof(uint64_t);
1768         stats.data      <<= 9;
1769         stats.in_use    = (c->nbuckets - available) * 100 / c->nbuckets;
1770         memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1771
1772         trace_bcache_gc_end(c);
1773
1774         bch_moving_gc(c);
1775 }
1776
1777 static bool gc_should_run(struct cache_set *c)
1778 {
1779         struct cache *ca;
1780         unsigned i;
1781
1782         for_each_cache(ca, c, i)
1783                 if (ca->invalidate_needs_gc)
1784                         return true;
1785
1786         if (atomic_read(&c->sectors_to_gc) < 0)
1787                 return true;
1788
1789         return false;
1790 }
1791
1792 static int bch_gc_thread(void *arg)
1793 {
1794         struct cache_set *c = arg;
1795
1796         while (1) {
1797                 wait_event_interruptible(c->gc_wait,
1798                            kthread_should_stop() || gc_should_run(c));
1799
1800                 if (kthread_should_stop())
1801                         break;
1802
1803                 set_gc_sectors(c);
1804                 bch_btree_gc(c);
1805         }
1806
1807         return 0;
1808 }
1809
1810 int bch_gc_thread_start(struct cache_set *c)
1811 {
1812         c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1813         if (IS_ERR(c->gc_thread))
1814                 return PTR_ERR(c->gc_thread);
1815
1816         return 0;
1817 }
1818
1819 /* Initial partial gc */
1820
1821 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1822 {
1823         int ret = 0;
1824         struct bkey *k, *p = NULL;
1825         struct btree_iter iter;
1826
1827         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1828                 bch_initial_mark_key(b->c, b->level, k);
1829
1830         bch_initial_mark_key(b->c, b->level + 1, &b->key);
1831
1832         if (b->level) {
1833                 bch_btree_iter_init(&b->keys, &iter, NULL);
1834
1835                 do {
1836                         k = bch_btree_iter_next_filter(&iter, &b->keys,
1837                                                        bch_ptr_bad);
1838                         if (k)
1839                                 btree_node_prefetch(b, k);
1840
1841                         if (p)
1842                                 ret = btree(check_recurse, p, b, op);
1843
1844                         p = k;
1845                 } while (p && !ret);
1846         }
1847
1848         return ret;
1849 }
1850
1851 int bch_btree_check(struct cache_set *c)
1852 {
1853         struct btree_op op;
1854
1855         bch_btree_op_init(&op, SHRT_MAX);
1856
1857         return btree_root(check_recurse, c, &op);
1858 }
1859
1860 void bch_initial_gc_finish(struct cache_set *c)
1861 {
1862         struct cache *ca;
1863         struct bucket *b;
1864         unsigned i;
1865
1866         bch_btree_gc_finish(c);
1867
1868         mutex_lock(&c->bucket_lock);
1869
1870         /*
1871          * We need to put some unused buckets directly on the prio freelist in
1872          * order to get the allocator thread started - it needs freed buckets in
1873          * order to rewrite the prios and gens, and it needs to rewrite prios
1874          * and gens in order to free buckets.
1875          *
1876          * This is only safe for buckets that have no live data in them, which
1877          * there should always be some of.
1878          */
1879         for_each_cache(ca, c, i) {
1880                 for_each_bucket(b, ca) {
1881                         if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1882                             fifo_full(&ca->free[RESERVE_BTREE]))
1883                                 break;
1884
1885                         if (bch_can_invalidate_bucket(ca, b) &&
1886                             !GC_MARK(b)) {
1887                                 __bch_invalidate_one_bucket(ca, b);
1888                                 if (!fifo_push(&ca->free[RESERVE_PRIO],
1889                                    b - ca->buckets))
1890                                         fifo_push(&ca->free[RESERVE_BTREE],
1891                                                   b - ca->buckets);
1892                         }
1893                 }
1894         }
1895
1896         mutex_unlock(&c->bucket_lock);
1897 }
1898
1899 /* Btree insertion */
1900
1901 static bool btree_insert_key(struct btree *b, struct bkey *k,
1902                              struct bkey *replace_key)
1903 {
1904         unsigned status;
1905
1906         BUG_ON(bkey_cmp(k, &b->key) > 0);
1907
1908         status = bch_btree_insert_key(&b->keys, k, replace_key);
1909         if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1910                 bch_check_keys(&b->keys, "%u for %s", status,
1911                                replace_key ? "replace" : "insert");
1912
1913                 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1914                                               status);
1915                 return true;
1916         } else
1917                 return false;
1918 }
1919
1920 static size_t insert_u64s_remaining(struct btree *b)
1921 {
1922         long ret = bch_btree_keys_u64s_remaining(&b->keys);
1923
1924         /*
1925          * Might land in the middle of an existing extent and have to split it
1926          */
1927         if (b->keys.ops->is_extents)
1928                 ret -= KEY_MAX_U64S;
1929
1930         return max(ret, 0L);
1931 }
1932
1933 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1934                                   struct keylist *insert_keys,
1935                                   struct bkey *replace_key)
1936 {
1937         bool ret = false;
1938         int oldsize = bch_count_data(&b->keys);
1939
1940         while (!bch_keylist_empty(insert_keys)) {
1941                 struct bkey *k = insert_keys->keys;
1942
1943                 if (bkey_u64s(k) > insert_u64s_remaining(b))
1944                         break;
1945
1946                 if (bkey_cmp(k, &b->key) <= 0) {
1947                         if (!b->level)
1948                                 bkey_put(b->c, k);
1949
1950                         ret |= btree_insert_key(b, k, replace_key);
1951                         bch_keylist_pop_front(insert_keys);
1952                 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1953                         BKEY_PADDED(key) temp;
1954                         bkey_copy(&temp.key, insert_keys->keys);
1955
1956                         bch_cut_back(&b->key, &temp.key);
1957                         bch_cut_front(&b->key, insert_keys->keys);
1958
1959                         ret |= btree_insert_key(b, &temp.key, replace_key);
1960                         break;
1961                 } else {
1962                         break;
1963                 }
1964         }
1965
1966         if (!ret)
1967                 op->insert_collision = true;
1968
1969         BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1970
1971         BUG_ON(bch_count_data(&b->keys) < oldsize);
1972         return ret;
1973 }
1974
1975 static int btree_split(struct btree *b, struct btree_op *op,
1976                        struct keylist *insert_keys,
1977                        struct bkey *replace_key)
1978 {
1979         bool split;
1980         struct btree *n1, *n2 = NULL, *n3 = NULL;
1981         uint64_t start_time = local_clock();
1982         struct closure cl;
1983         struct keylist parent_keys;
1984
1985         closure_init_stack(&cl);
1986         bch_keylist_init(&parent_keys);
1987
1988         if (btree_check_reserve(b, op)) {
1989                 if (!b->level)
1990                         return -EINTR;
1991                 else
1992                         WARN(1, "insufficient reserve for split\n");
1993         }
1994
1995         n1 = btree_node_alloc_replacement(b, op);
1996         if (IS_ERR(n1))
1997                 goto err;
1998
1999         split = set_blocks(btree_bset_first(n1),
2000                            block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2001
2002         if (split) {
2003                 unsigned keys = 0;
2004
2005                 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2006
2007                 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2008                 if (IS_ERR(n2))
2009                         goto err_free1;
2010
2011                 if (!b->parent) {
2012                         n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2013                         if (IS_ERR(n3))
2014                                 goto err_free2;
2015                 }
2016
2017                 mutex_lock(&n1->write_lock);
2018                 mutex_lock(&n2->write_lock);
2019
2020                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2021
2022                 /*
2023                  * Has to be a linear search because we don't have an auxiliary
2024                  * search tree yet
2025                  */
2026
2027                 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2028                         keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2029                                                         keys));
2030
2031                 bkey_copy_key(&n1->key,
2032                               bset_bkey_idx(btree_bset_first(n1), keys));
2033                 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2034
2035                 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2036                 btree_bset_first(n1)->keys = keys;
2037
2038                 memcpy(btree_bset_first(n2)->start,
2039                        bset_bkey_last(btree_bset_first(n1)),
2040                        btree_bset_first(n2)->keys * sizeof(uint64_t));
2041
2042                 bkey_copy_key(&n2->key, &b->key);
2043
2044                 bch_keylist_add(&parent_keys, &n2->key);
2045                 bch_btree_node_write(n2, &cl);
2046                 mutex_unlock(&n2->write_lock);
2047                 rw_unlock(true, n2);
2048         } else {
2049                 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2050
2051                 mutex_lock(&n1->write_lock);
2052                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2053         }
2054
2055         bch_keylist_add(&parent_keys, &n1->key);
2056         bch_btree_node_write(n1, &cl);
2057         mutex_unlock(&n1->write_lock);
2058
2059         if (n3) {
2060                 /* Depth increases, make a new root */
2061                 mutex_lock(&n3->write_lock);
2062                 bkey_copy_key(&n3->key, &MAX_KEY);
2063                 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2064                 bch_btree_node_write(n3, &cl);
2065                 mutex_unlock(&n3->write_lock);
2066
2067                 closure_sync(&cl);
2068                 bch_btree_set_root(n3);
2069                 rw_unlock(true, n3);
2070         } else if (!b->parent) {
2071                 /* Root filled up but didn't need to be split */
2072                 closure_sync(&cl);
2073                 bch_btree_set_root(n1);
2074         } else {
2075                 /* Split a non root node */
2076                 closure_sync(&cl);
2077                 make_btree_freeing_key(b, parent_keys.top);
2078                 bch_keylist_push(&parent_keys);
2079
2080                 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2081                 BUG_ON(!bch_keylist_empty(&parent_keys));
2082         }
2083
2084         btree_node_free(b);
2085         rw_unlock(true, n1);
2086
2087         bch_time_stats_update(&b->c->btree_split_time, start_time);
2088
2089         return 0;
2090 err_free2:
2091         bkey_put(b->c, &n2->key);
2092         btree_node_free(n2);
2093         rw_unlock(true, n2);
2094 err_free1:
2095         bkey_put(b->c, &n1->key);
2096         btree_node_free(n1);
2097         rw_unlock(true, n1);
2098 err:
2099         WARN(1, "bcache: btree split failed (level %u)", b->level);
2100
2101         if (n3 == ERR_PTR(-EAGAIN) ||
2102             n2 == ERR_PTR(-EAGAIN) ||
2103             n1 == ERR_PTR(-EAGAIN))
2104                 return -EAGAIN;
2105
2106         return -ENOMEM;
2107 }
2108
2109 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2110                                  struct keylist *insert_keys,
2111                                  atomic_t *journal_ref,
2112                                  struct bkey *replace_key)
2113 {
2114         struct closure cl;
2115
2116         BUG_ON(b->level && replace_key);
2117
2118         closure_init_stack(&cl);
2119
2120         mutex_lock(&b->write_lock);
2121
2122         if (write_block(b) != btree_bset_last(b) &&
2123             b->keys.last_set_unwritten)
2124                 bch_btree_init_next(b); /* just wrote a set */
2125
2126         if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2127                 mutex_unlock(&b->write_lock);
2128                 goto split;
2129         }
2130
2131         BUG_ON(write_block(b) != btree_bset_last(b));
2132
2133         if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2134                 if (!b->level)
2135                         bch_btree_leaf_dirty(b, journal_ref);
2136                 else
2137                         bch_btree_node_write(b, &cl);
2138         }
2139
2140         mutex_unlock(&b->write_lock);
2141
2142         /* wait for btree node write if necessary, after unlock */
2143         closure_sync(&cl);
2144
2145         return 0;
2146 split:
2147         if (current->bio_list) {
2148                 op->lock = b->c->root->level + 1;
2149                 return -EAGAIN;
2150         } else if (op->lock <= b->c->root->level) {
2151                 op->lock = b->c->root->level + 1;
2152                 return -EINTR;
2153         } else {
2154                 /* Invalidated all iterators */
2155                 int ret = btree_split(b, op, insert_keys, replace_key);
2156
2157                 if (bch_keylist_empty(insert_keys))
2158                         return 0;
2159                 else if (!ret)
2160                         return -EINTR;
2161                 return ret;
2162         }
2163 }
2164
2165 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2166                                struct bkey *check_key)
2167 {
2168         int ret = -EINTR;
2169         uint64_t btree_ptr = b->key.ptr[0];
2170         unsigned long seq = b->seq;
2171         struct keylist insert;
2172         bool upgrade = op->lock == -1;
2173
2174         bch_keylist_init(&insert);
2175
2176         if (upgrade) {
2177                 rw_unlock(false, b);
2178                 rw_lock(true, b, b->level);
2179
2180                 if (b->key.ptr[0] != btree_ptr ||
2181                    b->seq != seq + 1) {
2182                        op->lock = b->level;
2183                         goto out;
2184                }
2185         }
2186
2187         SET_KEY_PTRS(check_key, 1);
2188         get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2189
2190         SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2191
2192         bch_keylist_add(&insert, check_key);
2193
2194         ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2195
2196         BUG_ON(!ret && !bch_keylist_empty(&insert));
2197 out:
2198         if (upgrade)
2199                 downgrade_write(&b->lock);
2200         return ret;
2201 }
2202
2203 struct btree_insert_op {
2204         struct btree_op op;
2205         struct keylist  *keys;
2206         atomic_t        *journal_ref;
2207         struct bkey     *replace_key;
2208 };
2209
2210 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2211 {
2212         struct btree_insert_op *op = container_of(b_op,
2213                                         struct btree_insert_op, op);
2214
2215         int ret = bch_btree_insert_node(b, &op->op, op->keys,
2216                                         op->journal_ref, op->replace_key);
2217         if (ret && !bch_keylist_empty(op->keys))
2218                 return ret;
2219         else
2220                 return MAP_DONE;
2221 }
2222
2223 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2224                      atomic_t *journal_ref, struct bkey *replace_key)
2225 {
2226         struct btree_insert_op op;
2227         int ret = 0;
2228
2229         BUG_ON(current->bio_list);
2230         BUG_ON(bch_keylist_empty(keys));
2231
2232         bch_btree_op_init(&op.op, 0);
2233         op.keys         = keys;
2234         op.journal_ref  = journal_ref;
2235         op.replace_key  = replace_key;
2236
2237         while (!ret && !bch_keylist_empty(keys)) {
2238                 op.op.lock = 0;
2239                 ret = bch_btree_map_leaf_nodes(&op.op, c,
2240                                                &START_KEY(keys->keys),
2241                                                btree_insert_fn);
2242         }
2243
2244         if (ret) {
2245                 struct bkey *k;
2246
2247                 pr_err("error %i", ret);
2248
2249                 while ((k = bch_keylist_pop(keys)))
2250                         bkey_put(c, k);
2251         } else if (op.op.insert_collision)
2252                 ret = -ESRCH;
2253
2254         return ret;
2255 }
2256
2257 void bch_btree_set_root(struct btree *b)
2258 {
2259         unsigned i;
2260         struct closure cl;
2261
2262         closure_init_stack(&cl);
2263
2264         trace_bcache_btree_set_root(b);
2265
2266         BUG_ON(!b->written);
2267
2268         for (i = 0; i < KEY_PTRS(&b->key); i++)
2269                 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2270
2271         mutex_lock(&b->c->bucket_lock);
2272         list_del_init(&b->list);
2273         mutex_unlock(&b->c->bucket_lock);
2274
2275         b->c->root = b;
2276
2277         bch_journal_meta(b->c, &cl);
2278         closure_sync(&cl);
2279 }
2280
2281 /* Map across nodes or keys */
2282
2283 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2284                                        struct bkey *from,
2285                                        btree_map_nodes_fn *fn, int flags)
2286 {
2287         int ret = MAP_CONTINUE;
2288
2289         if (b->level) {
2290                 struct bkey *k;
2291                 struct btree_iter iter;
2292
2293                 bch_btree_iter_init(&b->keys, &iter, from);
2294
2295                 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2296                                                        bch_ptr_bad))) {
2297                         ret = btree(map_nodes_recurse, k, b,
2298                                     op, from, fn, flags);
2299                         from = NULL;
2300
2301                         if (ret != MAP_CONTINUE)
2302                                 return ret;
2303                 }
2304         }
2305
2306         if (!b->level || flags == MAP_ALL_NODES)
2307                 ret = fn(op, b);
2308
2309         return ret;
2310 }
2311
2312 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2313                           struct bkey *from, btree_map_nodes_fn *fn, int flags)
2314 {
2315         return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2316 }
2317
2318 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2319                                       struct bkey *from, btree_map_keys_fn *fn,
2320                                       int flags)
2321 {
2322         int ret = MAP_CONTINUE;
2323         struct bkey *k;
2324         struct btree_iter iter;
2325
2326         bch_btree_iter_init(&b->keys, &iter, from);
2327
2328         while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2329                 ret = !b->level
2330                         ? fn(op, b, k)
2331                         : btree(map_keys_recurse, k, b, op, from, fn, flags);
2332                 from = NULL;
2333
2334                 if (ret != MAP_CONTINUE)
2335                         return ret;
2336         }
2337
2338         if (!b->level && (flags & MAP_END_KEY))
2339                 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2340                                      KEY_OFFSET(&b->key), 0));
2341
2342         return ret;
2343 }
2344
2345 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2346                        struct bkey *from, btree_map_keys_fn *fn, int flags)
2347 {
2348         return btree_root(map_keys_recurse, c, op, from, fn, flags);
2349 }
2350
2351 /* Keybuf code */
2352
2353 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2354 {
2355         /* Overlapping keys compare equal */
2356         if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2357                 return -1;
2358         if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2359                 return 1;
2360         return 0;
2361 }
2362
2363 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2364                                             struct keybuf_key *r)
2365 {
2366         return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2367 }
2368
2369 struct refill {
2370         struct btree_op op;
2371         unsigned        nr_found;
2372         struct keybuf   *buf;
2373         struct bkey     *end;
2374         keybuf_pred_fn  *pred;
2375 };
2376
2377 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2378                             struct bkey *k)
2379 {
2380         struct refill *refill = container_of(op, struct refill, op);
2381         struct keybuf *buf = refill->buf;
2382         int ret = MAP_CONTINUE;
2383
2384         if (bkey_cmp(k, refill->end) > 0) {
2385                 ret = MAP_DONE;
2386                 goto out;
2387         }
2388
2389         if (!KEY_SIZE(k)) /* end key */
2390                 goto out;
2391
2392         if (refill->pred(buf, k)) {
2393                 struct keybuf_key *w;
2394
2395                 spin_lock(&buf->lock);
2396
2397                 w = array_alloc(&buf->freelist);
2398                 if (!w) {
2399                         spin_unlock(&buf->lock);
2400                         return MAP_DONE;
2401                 }
2402
2403                 w->private = NULL;
2404                 bkey_copy(&w->key, k);
2405
2406                 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2407                         array_free(&buf->freelist, w);
2408                 else
2409                         refill->nr_found++;
2410
2411                 if (array_freelist_empty(&buf->freelist))
2412                         ret = MAP_DONE;
2413
2414                 spin_unlock(&buf->lock);
2415         }
2416 out:
2417         buf->last_scanned = *k;
2418         return ret;
2419 }
2420
2421 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2422                        struct bkey *end, keybuf_pred_fn *pred)
2423 {
2424         struct bkey start = buf->last_scanned;
2425         struct refill refill;
2426
2427         cond_resched();
2428
2429         bch_btree_op_init(&refill.op, -1);
2430         refill.nr_found = 0;
2431         refill.buf      = buf;
2432         refill.end      = end;
2433         refill.pred     = pred;
2434
2435         bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2436                            refill_keybuf_fn, MAP_END_KEY);
2437
2438         trace_bcache_keyscan(refill.nr_found,
2439                              KEY_INODE(&start), KEY_OFFSET(&start),
2440                              KEY_INODE(&buf->last_scanned),
2441                              KEY_OFFSET(&buf->last_scanned));
2442
2443         spin_lock(&buf->lock);
2444
2445         if (!RB_EMPTY_ROOT(&buf->keys)) {
2446                 struct keybuf_key *w;
2447                 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2448                 buf->start      = START_KEY(&w->key);
2449
2450                 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2451                 buf->end        = w->key;
2452         } else {
2453                 buf->start      = MAX_KEY;
2454                 buf->end        = MAX_KEY;
2455         }
2456
2457         spin_unlock(&buf->lock);
2458 }
2459
2460 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2461 {
2462         rb_erase(&w->node, &buf->keys);
2463         array_free(&buf->freelist, w);
2464 }
2465
2466 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2467 {
2468         spin_lock(&buf->lock);
2469         __bch_keybuf_del(buf, w);
2470         spin_unlock(&buf->lock);
2471 }
2472
2473 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2474                                   struct bkey *end)
2475 {
2476         bool ret = false;
2477         struct keybuf_key *p, *w, s;
2478         s.key = *start;
2479
2480         if (bkey_cmp(end, &buf->start) <= 0 ||
2481             bkey_cmp(start, &buf->end) >= 0)
2482                 return false;
2483
2484         spin_lock(&buf->lock);
2485         w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2486
2487         while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2488                 p = w;
2489                 w = RB_NEXT(w, node);
2490
2491                 if (p->private)
2492                         ret = true;
2493                 else
2494                         __bch_keybuf_del(buf, p);
2495         }
2496
2497         spin_unlock(&buf->lock);
2498         return ret;
2499 }
2500
2501 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2502 {
2503         struct keybuf_key *w;
2504         spin_lock(&buf->lock);
2505
2506         w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2507
2508         while (w && w->private)
2509                 w = RB_NEXT(w, node);
2510
2511         if (w)
2512                 w->private = ERR_PTR(-EINTR);
2513
2514         spin_unlock(&buf->lock);
2515         return w;
2516 }
2517
2518 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2519                                           struct keybuf *buf,
2520                                           struct bkey *end,
2521                                           keybuf_pred_fn *pred)
2522 {
2523         struct keybuf_key *ret;
2524
2525         while (1) {
2526                 ret = bch_keybuf_next(buf);
2527                 if (ret)
2528                         break;
2529
2530                 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2531                         pr_debug("scan finished");
2532                         break;
2533                 }
2534
2535                 bch_refill_keybuf(c, buf, end, pred);
2536         }
2537
2538         return ret;
2539 }
2540
2541 void bch_keybuf_init(struct keybuf *buf)
2542 {
2543         buf->last_scanned       = MAX_KEY;
2544         buf->keys               = RB_ROOT;
2545
2546         spin_lock_init(&buf->lock);
2547         array_allocator_init(&buf->freelist);
2548 }