GNU Linux-libre 4.4.284-gnu1
[releases.git] / drivers / md / dm-era-target.c
1 #include "dm.h"
2 #include "persistent-data/dm-transaction-manager.h"
3 #include "persistent-data/dm-bitset.h"
4 #include "persistent-data/dm-space-map.h"
5
6 #include <linux/dm-io.h>
7 #include <linux/dm-kcopyd.h>
8 #include <linux/init.h>
9 #include <linux/mempool.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #define DM_MSG_PREFIX "era"
15
16 #define SUPERBLOCK_LOCATION 0
17 #define SUPERBLOCK_MAGIC 2126579579
18 #define SUPERBLOCK_CSUM_XOR 146538381
19 #define MIN_ERA_VERSION 1
20 #define MAX_ERA_VERSION 1
21 #define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
22 #define MIN_BLOCK_SIZE 8
23
24 /*----------------------------------------------------------------
25  * Writeset
26  *--------------------------------------------------------------*/
27 struct writeset_metadata {
28         uint32_t nr_bits;
29         dm_block_t root;
30 };
31
32 struct writeset {
33         struct writeset_metadata md;
34
35         /*
36          * An in core copy of the bits to save constantly doing look ups on
37          * disk.
38          */
39         unsigned long *bits;
40 };
41
42 /*
43  * This does not free off the on disk bitset as this will normally be done
44  * after digesting into the era array.
45  */
46 static void writeset_free(struct writeset *ws)
47 {
48         vfree(ws->bits);
49         ws->bits = NULL;
50 }
51
52 static int setup_on_disk_bitset(struct dm_disk_bitset *info,
53                                 unsigned nr_bits, dm_block_t *root)
54 {
55         int r;
56
57         r = dm_bitset_empty(info, root);
58         if (r)
59                 return r;
60
61         return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
62 }
63
64 static size_t bitset_size(unsigned nr_bits)
65 {
66         return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
67 }
68
69 /*
70  * Allocates memory for the in core bitset.
71  */
72 static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
73 {
74         ws->bits = vzalloc(bitset_size(nr_blocks));
75         if (!ws->bits) {
76                 DMERR("%s: couldn't allocate in memory bitset", __func__);
77                 return -ENOMEM;
78         }
79
80         return 0;
81 }
82
83 /*
84  * Wipes the in-core bitset, and creates a new on disk bitset.
85  */
86 static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
87                          dm_block_t nr_blocks)
88 {
89         int r;
90
91         memset(ws->bits, 0, bitset_size(nr_blocks));
92
93         ws->md.nr_bits = nr_blocks;
94         r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
95         if (r) {
96                 DMERR("%s: setup_on_disk_bitset failed", __func__);
97                 return r;
98         }
99
100         return 0;
101 }
102
103 static bool writeset_marked(struct writeset *ws, dm_block_t block)
104 {
105         return test_bit(block, ws->bits);
106 }
107
108 static int writeset_marked_on_disk(struct dm_disk_bitset *info,
109                                    struct writeset_metadata *m, dm_block_t block,
110                                    bool *result)
111 {
112         dm_block_t old = m->root;
113
114         /*
115          * The bitset was flushed when it was archived, so we know there'll
116          * be no change to the root.
117          */
118         int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
119         if (r) {
120                 DMERR("%s: dm_bitset_test_bit failed", __func__);
121                 return r;
122         }
123
124         BUG_ON(m->root != old);
125
126         return r;
127 }
128
129 /*
130  * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was.
131  */
132 static int writeset_test_and_set(struct dm_disk_bitset *info,
133                                  struct writeset *ws, uint32_t block)
134 {
135         int r;
136
137         if (!test_bit(block, ws->bits)) {
138                 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
139                 if (r) {
140                         /* FIXME: fail mode */
141                         return r;
142                 }
143
144                 return 0;
145         }
146
147         return 1;
148 }
149
150 /*----------------------------------------------------------------
151  * On disk metadata layout
152  *--------------------------------------------------------------*/
153 #define SPACE_MAP_ROOT_SIZE 128
154 #define UUID_LEN 16
155
156 struct writeset_disk {
157         __le32 nr_bits;
158         __le64 root;
159 } __packed;
160
161 struct superblock_disk {
162         __le32 csum;
163         __le32 flags;
164         __le64 blocknr;
165
166         __u8 uuid[UUID_LEN];
167         __le64 magic;
168         __le32 version;
169
170         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
171
172         __le32 data_block_size;
173         __le32 metadata_block_size;
174         __le32 nr_blocks;
175
176         __le32 current_era;
177         struct writeset_disk current_writeset;
178
179         /*
180          * Only these two fields are valid within the metadata snapshot.
181          */
182         __le64 writeset_tree_root;
183         __le64 era_array_root;
184
185         __le64 metadata_snap;
186 } __packed;
187
188 /*----------------------------------------------------------------
189  * Superblock validation
190  *--------------------------------------------------------------*/
191 static void sb_prepare_for_write(struct dm_block_validator *v,
192                                  struct dm_block *b,
193                                  size_t sb_block_size)
194 {
195         struct superblock_disk *disk = dm_block_data(b);
196
197         disk->blocknr = cpu_to_le64(dm_block_location(b));
198         disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags,
199                                                 sb_block_size - sizeof(__le32),
200                                                 SUPERBLOCK_CSUM_XOR));
201 }
202
203 static int check_metadata_version(struct superblock_disk *disk)
204 {
205         uint32_t metadata_version = le32_to_cpu(disk->version);
206         if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
207                 DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
208                       metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
209                 return -EINVAL;
210         }
211
212         return 0;
213 }
214
215 static int sb_check(struct dm_block_validator *v,
216                     struct dm_block *b,
217                     size_t sb_block_size)
218 {
219         struct superblock_disk *disk = dm_block_data(b);
220         __le32 csum_le;
221
222         if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) {
223                 DMERR("sb_check failed: blocknr %llu: wanted %llu",
224                       le64_to_cpu(disk->blocknr),
225                       (unsigned long long)dm_block_location(b));
226                 return -ENOTBLK;
227         }
228
229         if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) {
230                 DMERR("sb_check failed: magic %llu: wanted %llu",
231                       le64_to_cpu(disk->magic),
232                       (unsigned long long) SUPERBLOCK_MAGIC);
233                 return -EILSEQ;
234         }
235
236         csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags,
237                                              sb_block_size - sizeof(__le32),
238                                              SUPERBLOCK_CSUM_XOR));
239         if (csum_le != disk->csum) {
240                 DMERR("sb_check failed: csum %u: wanted %u",
241                       le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
242                 return -EILSEQ;
243         }
244
245         return check_metadata_version(disk);
246 }
247
248 static struct dm_block_validator sb_validator = {
249         .name = "superblock",
250         .prepare_for_write = sb_prepare_for_write,
251         .check = sb_check
252 };
253
254 /*----------------------------------------------------------------
255  * Low level metadata handling
256  *--------------------------------------------------------------*/
257 #define DM_ERA_METADATA_BLOCK_SIZE 4096
258 #define DM_ERA_METADATA_CACHE_SIZE 64
259 #define ERA_MAX_CONCURRENT_LOCKS 5
260
261 struct era_metadata {
262         struct block_device *bdev;
263         struct dm_block_manager *bm;
264         struct dm_space_map *sm;
265         struct dm_transaction_manager *tm;
266
267         dm_block_t block_size;
268         uint32_t nr_blocks;
269
270         uint32_t current_era;
271
272         /*
273          * We preallocate 2 writesets.  When an era rolls over we
274          * switch between them. This means the allocation is done at
275          * preresume time, rather than on the io path.
276          */
277         struct writeset writesets[2];
278         struct writeset *current_writeset;
279
280         dm_block_t writeset_tree_root;
281         dm_block_t era_array_root;
282
283         struct dm_disk_bitset bitset_info;
284         struct dm_btree_info writeset_tree_info;
285         struct dm_array_info era_array_info;
286
287         dm_block_t metadata_snap;
288
289         /*
290          * A flag that is set whenever a writeset has been archived.
291          */
292         bool archived_writesets;
293
294         /*
295          * Reading the space map root can fail, so we read it into this
296          * buffer before the superblock is locked and updated.
297          */
298         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
299 };
300
301 static int superblock_read_lock(struct era_metadata *md,
302                                 struct dm_block **sblock)
303 {
304         return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION,
305                                &sb_validator, sblock);
306 }
307
308 static int superblock_lock_zero(struct era_metadata *md,
309                                 struct dm_block **sblock)
310 {
311         return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION,
312                                      &sb_validator, sblock);
313 }
314
315 static int superblock_lock(struct era_metadata *md,
316                            struct dm_block **sblock)
317 {
318         return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION,
319                                 &sb_validator, sblock);
320 }
321
322 /* FIXME: duplication with cache and thin */
323 static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
324 {
325         int r;
326         unsigned i;
327         struct dm_block *b;
328         __le64 *data_le, zero = cpu_to_le64(0);
329         unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
330
331         /*
332          * We can't use a validator here - it may be all zeroes.
333          */
334         r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b);
335         if (r)
336                 return r;
337
338         data_le = dm_block_data(b);
339         *result = true;
340         for (i = 0; i < sb_block_size; i++) {
341                 if (data_le[i] != zero) {
342                         *result = false;
343                         break;
344                 }
345         }
346
347         dm_bm_unlock(b);
348
349         return 0;
350 }
351
352 /*----------------------------------------------------------------*/
353
354 static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk)
355 {
356         disk->nr_bits = cpu_to_le32(core->nr_bits);
357         disk->root = cpu_to_le64(core->root);
358 }
359
360 static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core)
361 {
362         core->nr_bits = le32_to_cpu(disk->nr_bits);
363         core->root = le64_to_cpu(disk->root);
364 }
365
366 static void ws_inc(void *context, const void *value)
367 {
368         struct era_metadata *md = context;
369         struct writeset_disk ws_d;
370         dm_block_t b;
371
372         memcpy(&ws_d, value, sizeof(ws_d));
373         b = le64_to_cpu(ws_d.root);
374
375         dm_tm_inc(md->tm, b);
376 }
377
378 static void ws_dec(void *context, const void *value)
379 {
380         struct era_metadata *md = context;
381         struct writeset_disk ws_d;
382         dm_block_t b;
383
384         memcpy(&ws_d, value, sizeof(ws_d));
385         b = le64_to_cpu(ws_d.root);
386
387         dm_bitset_del(&md->bitset_info, b);
388 }
389
390 static int ws_eq(void *context, const void *value1, const void *value2)
391 {
392         return !memcmp(value1, value2, sizeof(struct writeset_disk));
393 }
394
395 /*----------------------------------------------------------------*/
396
397 static void setup_writeset_tree_info(struct era_metadata *md)
398 {
399         struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
400         md->writeset_tree_info.tm = md->tm;
401         md->writeset_tree_info.levels = 1;
402         vt->context = md;
403         vt->size = sizeof(struct writeset_disk);
404         vt->inc = ws_inc;
405         vt->dec = ws_dec;
406         vt->equal = ws_eq;
407 }
408
409 static void setup_era_array_info(struct era_metadata *md)
410
411 {
412         struct dm_btree_value_type vt;
413         vt.context = NULL;
414         vt.size = sizeof(__le32);
415         vt.inc = NULL;
416         vt.dec = NULL;
417         vt.equal = NULL;
418
419         dm_array_info_init(&md->era_array_info, md->tm, &vt);
420 }
421
422 static void setup_infos(struct era_metadata *md)
423 {
424         dm_disk_bitset_init(md->tm, &md->bitset_info);
425         setup_writeset_tree_info(md);
426         setup_era_array_info(md);
427 }
428
429 /*----------------------------------------------------------------*/
430
431 static int create_fresh_metadata(struct era_metadata *md)
432 {
433         int r;
434
435         r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION,
436                                  &md->tm, &md->sm);
437         if (r < 0) {
438                 DMERR("dm_tm_create_with_sm failed");
439                 return r;
440         }
441
442         setup_infos(md);
443
444         r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root);
445         if (r) {
446                 DMERR("couldn't create new writeset tree");
447                 goto bad;
448         }
449
450         r = dm_array_empty(&md->era_array_info, &md->era_array_root);
451         if (r) {
452                 DMERR("couldn't create era array");
453                 goto bad;
454         }
455
456         return 0;
457
458 bad:
459         dm_sm_destroy(md->sm);
460         dm_tm_destroy(md->tm);
461
462         return r;
463 }
464
465 static int save_sm_root(struct era_metadata *md)
466 {
467         int r;
468         size_t metadata_len;
469
470         r = dm_sm_root_size(md->sm, &metadata_len);
471         if (r < 0)
472                 return r;
473
474         return dm_sm_copy_root(md->sm, &md->metadata_space_map_root,
475                                metadata_len);
476 }
477
478 static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk)
479 {
480         memcpy(&disk->metadata_space_map_root,
481                &md->metadata_space_map_root,
482                sizeof(md->metadata_space_map_root));
483 }
484
485 /*
486  * Writes a superblock, including the static fields that don't get updated
487  * with every commit (possible optimisation here).  'md' should be fully
488  * constructed when this is called.
489  */
490 static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk)
491 {
492         disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC);
493         disk->flags = cpu_to_le32(0ul);
494
495         /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */
496         memset(disk->uuid, 0, sizeof(disk->uuid));
497         disk->version = cpu_to_le32(MAX_ERA_VERSION);
498
499         copy_sm_root(md, disk);
500
501         disk->data_block_size = cpu_to_le32(md->block_size);
502         disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
503         disk->nr_blocks = cpu_to_le32(md->nr_blocks);
504         disk->current_era = cpu_to_le32(md->current_era);
505
506         ws_pack(&md->current_writeset->md, &disk->current_writeset);
507         disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root);
508         disk->era_array_root = cpu_to_le64(md->era_array_root);
509         disk->metadata_snap = cpu_to_le64(md->metadata_snap);
510 }
511
512 static int write_superblock(struct era_metadata *md)
513 {
514         int r;
515         struct dm_block *sblock;
516         struct superblock_disk *disk;
517
518         r = save_sm_root(md);
519         if (r) {
520                 DMERR("%s: save_sm_root failed", __func__);
521                 return r;
522         }
523
524         r = superblock_lock_zero(md, &sblock);
525         if (r)
526                 return r;
527
528         disk = dm_block_data(sblock);
529         prepare_superblock(md, disk);
530
531         return dm_tm_commit(md->tm, sblock);
532 }
533
534 /*
535  * Assumes block_size and the infos are set.
536  */
537 static int format_metadata(struct era_metadata *md)
538 {
539         int r;
540
541         r = create_fresh_metadata(md);
542         if (r)
543                 return r;
544
545         r = write_superblock(md);
546         if (r) {
547                 dm_sm_destroy(md->sm);
548                 dm_tm_destroy(md->tm);
549                 return r;
550         }
551
552         return 0;
553 }
554
555 static int open_metadata(struct era_metadata *md)
556 {
557         int r;
558         struct dm_block *sblock;
559         struct superblock_disk *disk;
560
561         r = superblock_read_lock(md, &sblock);
562         if (r) {
563                 DMERR("couldn't read_lock superblock");
564                 return r;
565         }
566
567         disk = dm_block_data(sblock);
568
569         /* Verify the data block size hasn't changed */
570         if (le32_to_cpu(disk->data_block_size) != md->block_size) {
571                 DMERR("changing the data block size (from %u to %llu) is not supported",
572                       le32_to_cpu(disk->data_block_size), md->block_size);
573                 r = -EINVAL;
574                 goto bad;
575         }
576
577         r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
578                                disk->metadata_space_map_root,
579                                sizeof(disk->metadata_space_map_root),
580                                &md->tm, &md->sm);
581         if (r) {
582                 DMERR("dm_tm_open_with_sm failed");
583                 goto bad;
584         }
585
586         setup_infos(md);
587
588         md->nr_blocks = le32_to_cpu(disk->nr_blocks);
589         md->current_era = le32_to_cpu(disk->current_era);
590
591         ws_unpack(&disk->current_writeset, &md->current_writeset->md);
592         md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
593         md->era_array_root = le64_to_cpu(disk->era_array_root);
594         md->metadata_snap = le64_to_cpu(disk->metadata_snap);
595         md->archived_writesets = true;
596
597         dm_bm_unlock(sblock);
598
599         return 0;
600
601 bad:
602         dm_bm_unlock(sblock);
603         return r;
604 }
605
606 static int open_or_format_metadata(struct era_metadata *md,
607                                    bool may_format)
608 {
609         int r;
610         bool unformatted = false;
611
612         r = superblock_all_zeroes(md->bm, &unformatted);
613         if (r)
614                 return r;
615
616         if (unformatted)
617                 return may_format ? format_metadata(md) : -EPERM;
618
619         return open_metadata(md);
620 }
621
622 static int create_persistent_data_objects(struct era_metadata *md,
623                                           bool may_format)
624 {
625         int r;
626
627         md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
628                                          DM_ERA_METADATA_CACHE_SIZE,
629                                          ERA_MAX_CONCURRENT_LOCKS);
630         if (IS_ERR(md->bm)) {
631                 DMERR("could not create block manager");
632                 return PTR_ERR(md->bm);
633         }
634
635         r = open_or_format_metadata(md, may_format);
636         if (r)
637                 dm_block_manager_destroy(md->bm);
638
639         return r;
640 }
641
642 static void destroy_persistent_data_objects(struct era_metadata *md)
643 {
644         dm_sm_destroy(md->sm);
645         dm_tm_destroy(md->tm);
646         dm_block_manager_destroy(md->bm);
647 }
648
649 /*
650  * This waits until all era_map threads have picked up the new filter.
651  */
652 static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset)
653 {
654         rcu_assign_pointer(md->current_writeset, new_writeset);
655         synchronize_rcu();
656 }
657
658 /*----------------------------------------------------------------
659  * Writesets get 'digested' into the main era array.
660  *
661  * We're using a coroutine here so the worker thread can do the digestion,
662  * thus avoiding synchronisation of the metadata.  Digesting a whole
663  * writeset in one go would cause too much latency.
664  *--------------------------------------------------------------*/
665 struct digest {
666         uint32_t era;
667         unsigned nr_bits, current_bit;
668         struct writeset_metadata writeset;
669         __le32 value;
670         struct dm_disk_bitset info;
671
672         int (*step)(struct era_metadata *, struct digest *);
673 };
674
675 static int metadata_digest_lookup_writeset(struct era_metadata *md,
676                                            struct digest *d);
677
678 static int metadata_digest_remove_writeset(struct era_metadata *md,
679                                            struct digest *d)
680 {
681         int r;
682         uint64_t key = d->era;
683
684         r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root,
685                             &key, &md->writeset_tree_root);
686         if (r) {
687                 DMERR("%s: dm_btree_remove failed", __func__);
688                 return r;
689         }
690
691         d->step = metadata_digest_lookup_writeset;
692         return 0;
693 }
694
695 #define INSERTS_PER_STEP 100
696
697 static int metadata_digest_transcribe_writeset(struct era_metadata *md,
698                                                struct digest *d)
699 {
700         int r;
701         bool marked;
702         unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
703
704         for (b = d->current_bit; b < e; b++) {
705                 r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
706                 if (r) {
707                         DMERR("%s: writeset_marked_on_disk failed", __func__);
708                         return r;
709                 }
710
711                 if (!marked)
712                         continue;
713
714                 __dm_bless_for_disk(&d->value);
715                 r = dm_array_set_value(&md->era_array_info, md->era_array_root,
716                                        b, &d->value, &md->era_array_root);
717                 if (r) {
718                         DMERR("%s: dm_array_set_value failed", __func__);
719                         return r;
720                 }
721         }
722
723         if (b == d->nr_bits)
724                 d->step = metadata_digest_remove_writeset;
725         else
726                 d->current_bit = b;
727
728         return 0;
729 }
730
731 static int metadata_digest_lookup_writeset(struct era_metadata *md,
732                                            struct digest *d)
733 {
734         int r;
735         uint64_t key;
736         struct writeset_disk disk;
737
738         r = dm_btree_find_lowest_key(&md->writeset_tree_info,
739                                      md->writeset_tree_root, &key);
740         if (r < 0)
741                 return r;
742
743         d->era = key;
744
745         r = dm_btree_lookup(&md->writeset_tree_info,
746                             md->writeset_tree_root, &key, &disk);
747         if (r) {
748                 if (r == -ENODATA) {
749                         d->step = NULL;
750                         return 0;
751                 }
752
753                 DMERR("%s: dm_btree_lookup failed", __func__);
754                 return r;
755         }
756
757         ws_unpack(&disk, &d->writeset);
758         d->value = cpu_to_le32(key);
759
760         /*
761          * We initialise another bitset info to avoid any caching side effects
762          * with the previous one.
763          */
764         dm_disk_bitset_init(md->tm, &d->info);
765
766         d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
767         d->current_bit = 0;
768         d->step = metadata_digest_transcribe_writeset;
769
770         return 0;
771 }
772
773 static int metadata_digest_start(struct era_metadata *md, struct digest *d)
774 {
775         if (d->step)
776                 return 0;
777
778         memset(d, 0, sizeof(*d));
779         d->step = metadata_digest_lookup_writeset;
780
781         return 0;
782 }
783
784 /*----------------------------------------------------------------
785  * High level metadata interface.  Target methods should use these, and not
786  * the lower level ones.
787  *--------------------------------------------------------------*/
788 static struct era_metadata *metadata_open(struct block_device *bdev,
789                                           sector_t block_size,
790                                           bool may_format)
791 {
792         int r;
793         struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL);
794
795         if (!md)
796                 return NULL;
797
798         md->bdev = bdev;
799         md->block_size = block_size;
800
801         md->writesets[0].md.root = INVALID_WRITESET_ROOT;
802         md->writesets[1].md.root = INVALID_WRITESET_ROOT;
803         md->current_writeset = &md->writesets[0];
804
805         r = create_persistent_data_objects(md, may_format);
806         if (r) {
807                 kfree(md);
808                 return ERR_PTR(r);
809         }
810
811         return md;
812 }
813
814 static void metadata_close(struct era_metadata *md)
815 {
816         writeset_free(&md->writesets[0]);
817         writeset_free(&md->writesets[1]);
818         destroy_persistent_data_objects(md);
819         kfree(md);
820 }
821
822 static bool valid_nr_blocks(dm_block_t n)
823 {
824         /*
825          * dm_bitset restricts us to 2^32.  test_bit & co. restrict us
826          * further to 2^31 - 1
827          */
828         return n < (1ull << 31);
829 }
830
831 static int metadata_resize(struct era_metadata *md, void *arg)
832 {
833         int r;
834         dm_block_t *new_size = arg;
835         __le32 value;
836
837         if (!valid_nr_blocks(*new_size)) {
838                 DMERR("Invalid number of origin blocks %llu",
839                       (unsigned long long) *new_size);
840                 return -EINVAL;
841         }
842
843         writeset_free(&md->writesets[0]);
844         writeset_free(&md->writesets[1]);
845
846         r = writeset_alloc(&md->writesets[0], *new_size);
847         if (r) {
848                 DMERR("%s: writeset_alloc failed for writeset 0", __func__);
849                 return r;
850         }
851
852         r = writeset_alloc(&md->writesets[1], *new_size);
853         if (r) {
854                 DMERR("%s: writeset_alloc failed for writeset 1", __func__);
855                 writeset_free(&md->writesets[0]);
856                 return r;
857         }
858
859         value = cpu_to_le32(0u);
860         __dm_bless_for_disk(&value);
861         r = dm_array_resize(&md->era_array_info, md->era_array_root,
862                             md->nr_blocks, *new_size,
863                             &value, &md->era_array_root);
864         if (r) {
865                 DMERR("%s: dm_array_resize failed", __func__);
866                 writeset_free(&md->writesets[0]);
867                 writeset_free(&md->writesets[1]);
868                 return r;
869         }
870
871         md->nr_blocks = *new_size;
872         return 0;
873 }
874
875 static int metadata_era_archive(struct era_metadata *md)
876 {
877         int r;
878         uint64_t keys[1];
879         struct writeset_disk value;
880
881         r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
882                             &md->current_writeset->md.root);
883         if (r) {
884                 DMERR("%s: dm_bitset_flush failed", __func__);
885                 return r;
886         }
887
888         ws_pack(&md->current_writeset->md, &value);
889
890         keys[0] = md->current_era;
891         __dm_bless_for_disk(&value);
892         r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root,
893                             keys, &value, &md->writeset_tree_root);
894         if (r) {
895                 DMERR("%s: couldn't insert writeset into btree", __func__);
896                 /* FIXME: fail mode */
897                 return r;
898         }
899
900         md->current_writeset->md.root = INVALID_WRITESET_ROOT;
901         md->archived_writesets = true;
902
903         return 0;
904 }
905
906 static struct writeset *next_writeset(struct era_metadata *md)
907 {
908         return (md->current_writeset == &md->writesets[0]) ?
909                 &md->writesets[1] : &md->writesets[0];
910 }
911
912 static int metadata_new_era(struct era_metadata *md)
913 {
914         int r;
915         struct writeset *new_writeset = next_writeset(md);
916
917         r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
918         if (r) {
919                 DMERR("%s: writeset_init failed", __func__);
920                 return r;
921         }
922
923         swap_writeset(md, new_writeset);
924         md->current_era++;
925
926         return 0;
927 }
928
929 static int metadata_era_rollover(struct era_metadata *md)
930 {
931         int r;
932
933         if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
934                 r = metadata_era_archive(md);
935                 if (r) {
936                         DMERR("%s: metadata_archive_era failed", __func__);
937                         /* FIXME: fail mode? */
938                         return r;
939                 }
940         }
941
942         r = metadata_new_era(md);
943         if (r) {
944                 DMERR("%s: new era failed", __func__);
945                 /* FIXME: fail mode */
946                 return r;
947         }
948
949         return 0;
950 }
951
952 static bool metadata_current_marked(struct era_metadata *md, dm_block_t block)
953 {
954         bool r;
955         struct writeset *ws;
956
957         rcu_read_lock();
958         ws = rcu_dereference(md->current_writeset);
959         r = writeset_marked(ws, block);
960         rcu_read_unlock();
961
962         return r;
963 }
964
965 static int metadata_commit(struct era_metadata *md)
966 {
967         int r;
968         struct dm_block *sblock;
969
970         if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
971                 r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
972                                     &md->current_writeset->md.root);
973                 if (r) {
974                         DMERR("%s: bitset flush failed", __func__);
975                         return r;
976                 }
977         }
978
979         r = dm_tm_pre_commit(md->tm);
980         if (r) {
981                 DMERR("%s: pre commit failed", __func__);
982                 return r;
983         }
984
985         r = save_sm_root(md);
986         if (r) {
987                 DMERR("%s: save_sm_root failed", __func__);
988                 return r;
989         }
990
991         r = superblock_lock(md, &sblock);
992         if (r) {
993                 DMERR("%s: superblock lock failed", __func__);
994                 return r;
995         }
996
997         prepare_superblock(md, dm_block_data(sblock));
998
999         return dm_tm_commit(md->tm, sblock);
1000 }
1001
1002 static int metadata_checkpoint(struct era_metadata *md)
1003 {
1004         /*
1005          * For now we just rollover, but later I want to put a check in to
1006          * avoid this if the filter is still pretty fresh.
1007          */
1008         return metadata_era_rollover(md);
1009 }
1010
1011 /*
1012  * Metadata snapshots allow userland to access era data.
1013  */
1014 static int metadata_take_snap(struct era_metadata *md)
1015 {
1016         int r, inc;
1017         struct dm_block *clone;
1018
1019         if (md->metadata_snap != SUPERBLOCK_LOCATION) {
1020                 DMERR("%s: metadata snapshot already exists", __func__);
1021                 return -EINVAL;
1022         }
1023
1024         r = metadata_era_rollover(md);
1025         if (r) {
1026                 DMERR("%s: era rollover failed", __func__);
1027                 return r;
1028         }
1029
1030         r = metadata_commit(md);
1031         if (r) {
1032                 DMERR("%s: pre commit failed", __func__);
1033                 return r;
1034         }
1035
1036         r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION);
1037         if (r) {
1038                 DMERR("%s: couldn't increment superblock", __func__);
1039                 return r;
1040         }
1041
1042         r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION,
1043                                &sb_validator, &clone, &inc);
1044         if (r) {
1045                 DMERR("%s: couldn't shadow superblock", __func__);
1046                 dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION);
1047                 return r;
1048         }
1049         BUG_ON(!inc);
1050
1051         r = dm_sm_inc_block(md->sm, md->writeset_tree_root);
1052         if (r) {
1053                 DMERR("%s: couldn't inc writeset tree root", __func__);
1054                 dm_tm_unlock(md->tm, clone);
1055                 return r;
1056         }
1057
1058         r = dm_sm_inc_block(md->sm, md->era_array_root);
1059         if (r) {
1060                 DMERR("%s: couldn't inc era tree root", __func__);
1061                 dm_sm_dec_block(md->sm, md->writeset_tree_root);
1062                 dm_tm_unlock(md->tm, clone);
1063                 return r;
1064         }
1065
1066         md->metadata_snap = dm_block_location(clone);
1067
1068         dm_tm_unlock(md->tm, clone);
1069
1070         return 0;
1071 }
1072
1073 static int metadata_drop_snap(struct era_metadata *md)
1074 {
1075         int r;
1076         dm_block_t location;
1077         struct dm_block *clone;
1078         struct superblock_disk *disk;
1079
1080         if (md->metadata_snap == SUPERBLOCK_LOCATION) {
1081                 DMERR("%s: no snap to drop", __func__);
1082                 return -EINVAL;
1083         }
1084
1085         r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone);
1086         if (r) {
1087                 DMERR("%s: couldn't read lock superblock clone", __func__);
1088                 return r;
1089         }
1090
1091         /*
1092          * Whatever happens now we'll commit with no record of the metadata
1093          * snap.
1094          */
1095         md->metadata_snap = SUPERBLOCK_LOCATION;
1096
1097         disk = dm_block_data(clone);
1098         r = dm_btree_del(&md->writeset_tree_info,
1099                          le64_to_cpu(disk->writeset_tree_root));
1100         if (r) {
1101                 DMERR("%s: error deleting writeset tree clone", __func__);
1102                 dm_tm_unlock(md->tm, clone);
1103                 return r;
1104         }
1105
1106         r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root));
1107         if (r) {
1108                 DMERR("%s: error deleting era array clone", __func__);
1109                 dm_tm_unlock(md->tm, clone);
1110                 return r;
1111         }
1112
1113         location = dm_block_location(clone);
1114         dm_tm_unlock(md->tm, clone);
1115
1116         return dm_sm_dec_block(md->sm, location);
1117 }
1118
1119 struct metadata_stats {
1120         dm_block_t used;
1121         dm_block_t total;
1122         dm_block_t snap;
1123         uint32_t era;
1124 };
1125
1126 static int metadata_get_stats(struct era_metadata *md, void *ptr)
1127 {
1128         int r;
1129         struct metadata_stats *s = ptr;
1130         dm_block_t nr_free, nr_total;
1131
1132         r = dm_sm_get_nr_free(md->sm, &nr_free);
1133         if (r) {
1134                 DMERR("dm_sm_get_nr_free returned %d", r);
1135                 return r;
1136         }
1137
1138         r = dm_sm_get_nr_blocks(md->sm, &nr_total);
1139         if (r) {
1140                 DMERR("dm_pool_get_metadata_dev_size returned %d", r);
1141                 return r;
1142         }
1143
1144         s->used = nr_total - nr_free;
1145         s->total = nr_total;
1146         s->snap = md->metadata_snap;
1147         s->era = md->current_era;
1148
1149         return 0;
1150 }
1151
1152 /*----------------------------------------------------------------*/
1153
1154 struct era {
1155         struct dm_target *ti;
1156         struct dm_target_callbacks callbacks;
1157
1158         struct dm_dev *metadata_dev;
1159         struct dm_dev *origin_dev;
1160
1161         dm_block_t nr_blocks;
1162         uint32_t sectors_per_block;
1163         int sectors_per_block_shift;
1164         struct era_metadata *md;
1165
1166         struct workqueue_struct *wq;
1167         struct work_struct worker;
1168
1169         spinlock_t deferred_lock;
1170         struct bio_list deferred_bios;
1171
1172         spinlock_t rpc_lock;
1173         struct list_head rpc_calls;
1174
1175         struct digest digest;
1176         atomic_t suspended;
1177 };
1178
1179 struct rpc {
1180         struct list_head list;
1181
1182         int (*fn0)(struct era_metadata *);
1183         int (*fn1)(struct era_metadata *, void *);
1184         void *arg;
1185         int result;
1186
1187         struct completion complete;
1188 };
1189
1190 /*----------------------------------------------------------------
1191  * Remapping.
1192  *---------------------------------------------------------------*/
1193 static bool block_size_is_power_of_two(struct era *era)
1194 {
1195         return era->sectors_per_block_shift >= 0;
1196 }
1197
1198 static dm_block_t get_block(struct era *era, struct bio *bio)
1199 {
1200         sector_t block_nr = bio->bi_iter.bi_sector;
1201
1202         if (!block_size_is_power_of_two(era))
1203                 (void) sector_div(block_nr, era->sectors_per_block);
1204         else
1205                 block_nr >>= era->sectors_per_block_shift;
1206
1207         return block_nr;
1208 }
1209
1210 static void remap_to_origin(struct era *era, struct bio *bio)
1211 {
1212         bio->bi_bdev = era->origin_dev->bdev;
1213 }
1214
1215 /*----------------------------------------------------------------
1216  * Worker thread
1217  *--------------------------------------------------------------*/
1218 static void wake_worker(struct era *era)
1219 {
1220         if (!atomic_read(&era->suspended))
1221                 queue_work(era->wq, &era->worker);
1222 }
1223
1224 static void process_old_eras(struct era *era)
1225 {
1226         int r;
1227
1228         if (!era->digest.step)
1229                 return;
1230
1231         r = era->digest.step(era->md, &era->digest);
1232         if (r < 0) {
1233                 DMERR("%s: digest step failed, stopping digestion", __func__);
1234                 era->digest.step = NULL;
1235
1236         } else if (era->digest.step)
1237                 wake_worker(era);
1238 }
1239
1240 static void process_deferred_bios(struct era *era)
1241 {
1242         int r;
1243         struct bio_list deferred_bios, marked_bios;
1244         struct bio *bio;
1245         struct blk_plug plug;
1246         bool commit_needed = false;
1247         bool failed = false;
1248         struct writeset *ws = era->md->current_writeset;
1249
1250         bio_list_init(&deferred_bios);
1251         bio_list_init(&marked_bios);
1252
1253         spin_lock(&era->deferred_lock);
1254         bio_list_merge(&deferred_bios, &era->deferred_bios);
1255         bio_list_init(&era->deferred_bios);
1256         spin_unlock(&era->deferred_lock);
1257
1258         if (bio_list_empty(&deferred_bios))
1259                 return;
1260
1261         while ((bio = bio_list_pop(&deferred_bios))) {
1262                 r = writeset_test_and_set(&era->md->bitset_info, ws,
1263                                           get_block(era, bio));
1264                 if (r < 0) {
1265                         /*
1266                          * This is bad news, we need to rollback.
1267                          * FIXME: finish.
1268                          */
1269                         failed = true;
1270                 } else if (r == 0)
1271                         commit_needed = true;
1272
1273                 bio_list_add(&marked_bios, bio);
1274         }
1275
1276         if (commit_needed) {
1277                 r = metadata_commit(era->md);
1278                 if (r)
1279                         failed = true;
1280         }
1281
1282         if (failed)
1283                 while ((bio = bio_list_pop(&marked_bios)))
1284                         bio_io_error(bio);
1285         else {
1286                 blk_start_plug(&plug);
1287                 while ((bio = bio_list_pop(&marked_bios))) {
1288                         /*
1289                          * Only update the in-core writeset if the on-disk one
1290                          * was updated too.
1291                          */
1292                         if (commit_needed)
1293                                 set_bit(get_block(era, bio), ws->bits);
1294                         generic_make_request(bio);
1295                 }
1296                 blk_finish_plug(&plug);
1297         }
1298 }
1299
1300 static void process_rpc_calls(struct era *era)
1301 {
1302         int r;
1303         bool need_commit = false;
1304         struct list_head calls;
1305         struct rpc *rpc, *tmp;
1306
1307         INIT_LIST_HEAD(&calls);
1308         spin_lock(&era->rpc_lock);
1309         list_splice_init(&era->rpc_calls, &calls);
1310         spin_unlock(&era->rpc_lock);
1311
1312         list_for_each_entry_safe(rpc, tmp, &calls, list) {
1313                 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
1314                 need_commit = true;
1315         }
1316
1317         if (need_commit) {
1318                 r = metadata_commit(era->md);
1319                 if (r)
1320                         list_for_each_entry_safe(rpc, tmp, &calls, list)
1321                                 rpc->result = r;
1322         }
1323
1324         list_for_each_entry_safe(rpc, tmp, &calls, list)
1325                 complete(&rpc->complete);
1326 }
1327
1328 static void kick_off_digest(struct era *era)
1329 {
1330         if (era->md->archived_writesets) {
1331                 era->md->archived_writesets = false;
1332                 metadata_digest_start(era->md, &era->digest);
1333         }
1334 }
1335
1336 static void do_work(struct work_struct *ws)
1337 {
1338         struct era *era = container_of(ws, struct era, worker);
1339
1340         kick_off_digest(era);
1341         process_old_eras(era);
1342         process_deferred_bios(era);
1343         process_rpc_calls(era);
1344 }
1345
1346 static void defer_bio(struct era *era, struct bio *bio)
1347 {
1348         spin_lock(&era->deferred_lock);
1349         bio_list_add(&era->deferred_bios, bio);
1350         spin_unlock(&era->deferred_lock);
1351
1352         wake_worker(era);
1353 }
1354
1355 /*
1356  * Make an rpc call to the worker to change the metadata.
1357  */
1358 static int perform_rpc(struct era *era, struct rpc *rpc)
1359 {
1360         rpc->result = 0;
1361         init_completion(&rpc->complete);
1362
1363         spin_lock(&era->rpc_lock);
1364         list_add(&rpc->list, &era->rpc_calls);
1365         spin_unlock(&era->rpc_lock);
1366
1367         wake_worker(era);
1368         wait_for_completion(&rpc->complete);
1369
1370         return rpc->result;
1371 }
1372
1373 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
1374 {
1375         struct rpc rpc;
1376         rpc.fn0 = fn;
1377         rpc.fn1 = NULL;
1378
1379         return perform_rpc(era, &rpc);
1380 }
1381
1382 static int in_worker1(struct era *era,
1383                       int (*fn)(struct era_metadata *, void *), void *arg)
1384 {
1385         struct rpc rpc;
1386         rpc.fn0 = NULL;
1387         rpc.fn1 = fn;
1388         rpc.arg = arg;
1389
1390         return perform_rpc(era, &rpc);
1391 }
1392
1393 static void start_worker(struct era *era)
1394 {
1395         atomic_set(&era->suspended, 0);
1396 }
1397
1398 static void stop_worker(struct era *era)
1399 {
1400         atomic_set(&era->suspended, 1);
1401         flush_workqueue(era->wq);
1402 }
1403
1404 /*----------------------------------------------------------------
1405  * Target methods
1406  *--------------------------------------------------------------*/
1407 static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
1408 {
1409         struct request_queue *q = bdev_get_queue(dev->bdev);
1410         return bdi_congested(&q->backing_dev_info, bdi_bits);
1411 }
1412
1413 static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1414 {
1415         struct era *era = container_of(cb, struct era, callbacks);
1416         return dev_is_congested(era->origin_dev, bdi_bits);
1417 }
1418
1419 static void era_destroy(struct era *era)
1420 {
1421         if (era->md)
1422                 metadata_close(era->md);
1423
1424         if (era->wq)
1425                 destroy_workqueue(era->wq);
1426
1427         if (era->origin_dev)
1428                 dm_put_device(era->ti, era->origin_dev);
1429
1430         if (era->metadata_dev)
1431                 dm_put_device(era->ti, era->metadata_dev);
1432
1433         kfree(era);
1434 }
1435
1436 static dm_block_t calc_nr_blocks(struct era *era)
1437 {
1438         return dm_sector_div_up(era->ti->len, era->sectors_per_block);
1439 }
1440
1441 static bool valid_block_size(dm_block_t block_size)
1442 {
1443         bool greater_than_zero = block_size > 0;
1444         bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0;
1445
1446         return greater_than_zero && multiple_of_min_block_size;
1447 }
1448
1449 /*
1450  * <metadata dev> <data dev> <data block size (sectors)>
1451  */
1452 static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
1453 {
1454         int r;
1455         char dummy;
1456         struct era *era;
1457         struct era_metadata *md;
1458
1459         if (argc != 3) {
1460                 ti->error = "Invalid argument count";
1461                 return -EINVAL;
1462         }
1463
1464         era = kzalloc(sizeof(*era), GFP_KERNEL);
1465         if (!era) {
1466                 ti->error = "Error allocating era structure";
1467                 return -ENOMEM;
1468         }
1469
1470         era->ti = ti;
1471
1472         r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev);
1473         if (r) {
1474                 ti->error = "Error opening metadata device";
1475                 era_destroy(era);
1476                 return -EINVAL;
1477         }
1478
1479         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev);
1480         if (r) {
1481                 ti->error = "Error opening data device";
1482                 era_destroy(era);
1483                 return -EINVAL;
1484         }
1485
1486         r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
1487         if (r != 1) {
1488                 ti->error = "Error parsing block size";
1489                 era_destroy(era);
1490                 return -EINVAL;
1491         }
1492
1493         r = dm_set_target_max_io_len(ti, era->sectors_per_block);
1494         if (r) {
1495                 ti->error = "could not set max io len";
1496                 era_destroy(era);
1497                 return -EINVAL;
1498         }
1499
1500         if (!valid_block_size(era->sectors_per_block)) {
1501                 ti->error = "Invalid block size";
1502                 era_destroy(era);
1503                 return -EINVAL;
1504         }
1505         if (era->sectors_per_block & (era->sectors_per_block - 1))
1506                 era->sectors_per_block_shift = -1;
1507         else
1508                 era->sectors_per_block_shift = __ffs(era->sectors_per_block);
1509
1510         md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
1511         if (IS_ERR(md)) {
1512                 ti->error = "Error reading metadata";
1513                 era_destroy(era);
1514                 return PTR_ERR(md);
1515         }
1516         era->md = md;
1517
1518         era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1519         if (!era->wq) {
1520                 ti->error = "could not create workqueue for metadata object";
1521                 era_destroy(era);
1522                 return -ENOMEM;
1523         }
1524         INIT_WORK(&era->worker, do_work);
1525
1526         spin_lock_init(&era->deferred_lock);
1527         bio_list_init(&era->deferred_bios);
1528
1529         spin_lock_init(&era->rpc_lock);
1530         INIT_LIST_HEAD(&era->rpc_calls);
1531
1532         ti->private = era;
1533         ti->num_flush_bios = 1;
1534         ti->flush_supported = true;
1535
1536         ti->num_discard_bios = 1;
1537         ti->discards_supported = true;
1538         era->callbacks.congested_fn = era_is_congested;
1539         dm_table_add_target_callbacks(ti->table, &era->callbacks);
1540
1541         return 0;
1542 }
1543
1544 static void era_dtr(struct dm_target *ti)
1545 {
1546         era_destroy(ti->private);
1547 }
1548
1549 static int era_map(struct dm_target *ti, struct bio *bio)
1550 {
1551         struct era *era = ti->private;
1552         dm_block_t block = get_block(era, bio);
1553
1554         /*
1555          * All bios get remapped to the origin device.  We do this now, but
1556          * it may not get issued until later.  Depending on whether the
1557          * block is marked in this era.
1558          */
1559         remap_to_origin(era, bio);
1560
1561         /*
1562          * REQ_FLUSH bios carry no data, so we're not interested in them.
1563          */
1564         if (!(bio->bi_rw & REQ_FLUSH) &&
1565             (bio_data_dir(bio) == WRITE) &&
1566             !metadata_current_marked(era->md, block)) {
1567                 defer_bio(era, bio);
1568                 return DM_MAPIO_SUBMITTED;
1569         }
1570
1571         return DM_MAPIO_REMAPPED;
1572 }
1573
1574 static void era_postsuspend(struct dm_target *ti)
1575 {
1576         int r;
1577         struct era *era = ti->private;
1578
1579         r = in_worker0(era, metadata_era_archive);
1580         if (r) {
1581                 DMERR("%s: couldn't archive current era", __func__);
1582                 /* FIXME: fail mode */
1583         }
1584
1585         stop_worker(era);
1586 }
1587
1588 static int era_preresume(struct dm_target *ti)
1589 {
1590         int r;
1591         struct era *era = ti->private;
1592         dm_block_t new_size = calc_nr_blocks(era);
1593
1594         if (era->nr_blocks != new_size) {
1595                 r = metadata_resize(era->md, &new_size);
1596                 if (r) {
1597                         DMERR("%s: metadata_resize failed", __func__);
1598                         return r;
1599                 }
1600
1601                 r = metadata_commit(era->md);
1602                 if (r) {
1603                         DMERR("%s: metadata_commit failed", __func__);
1604                         return r;
1605                 }
1606
1607                 era->nr_blocks = new_size;
1608         }
1609
1610         start_worker(era);
1611
1612         r = in_worker0(era, metadata_era_rollover);
1613         if (r) {
1614                 DMERR("%s: metadata_era_rollover failed", __func__);
1615                 return r;
1616         }
1617
1618         return 0;
1619 }
1620
1621 /*
1622  * Status format:
1623  *
1624  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1625  * <current era> <held metadata root | '-'>
1626  */
1627 static void era_status(struct dm_target *ti, status_type_t type,
1628                        unsigned status_flags, char *result, unsigned maxlen)
1629 {
1630         int r;
1631         struct era *era = ti->private;
1632         ssize_t sz = 0;
1633         struct metadata_stats stats;
1634         char buf[BDEVNAME_SIZE];
1635
1636         switch (type) {
1637         case STATUSTYPE_INFO:
1638                 r = in_worker1(era, metadata_get_stats, &stats);
1639                 if (r)
1640                         goto err;
1641
1642                 DMEMIT("%u %llu/%llu %u",
1643                        (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
1644                        (unsigned long long) stats.used,
1645                        (unsigned long long) stats.total,
1646                        (unsigned) stats.era);
1647
1648                 if (stats.snap != SUPERBLOCK_LOCATION)
1649                         DMEMIT(" %llu", stats.snap);
1650                 else
1651                         DMEMIT(" -");
1652                 break;
1653
1654         case STATUSTYPE_TABLE:
1655                 format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
1656                 DMEMIT("%s ", buf);
1657                 format_dev_t(buf, era->origin_dev->bdev->bd_dev);
1658                 DMEMIT("%s %u", buf, era->sectors_per_block);
1659                 break;
1660         }
1661
1662         return;
1663
1664 err:
1665         DMEMIT("Error");
1666 }
1667
1668 static int era_message(struct dm_target *ti, unsigned argc, char **argv)
1669 {
1670         struct era *era = ti->private;
1671
1672         if (argc != 1) {
1673                 DMERR("incorrect number of message arguments");
1674                 return -EINVAL;
1675         }
1676
1677         if (!strcasecmp(argv[0], "checkpoint"))
1678                 return in_worker0(era, metadata_checkpoint);
1679
1680         if (!strcasecmp(argv[0], "take_metadata_snap"))
1681                 return in_worker0(era, metadata_take_snap);
1682
1683         if (!strcasecmp(argv[0], "drop_metadata_snap"))
1684                 return in_worker0(era, metadata_drop_snap);
1685
1686         DMERR("unsupported message '%s'", argv[0]);
1687         return -EINVAL;
1688 }
1689
1690 static sector_t get_dev_size(struct dm_dev *dev)
1691 {
1692         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1693 }
1694
1695 static int era_iterate_devices(struct dm_target *ti,
1696                                iterate_devices_callout_fn fn, void *data)
1697 {
1698         struct era *era = ti->private;
1699         return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
1700 }
1701
1702 static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
1703 {
1704         struct era *era = ti->private;
1705         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
1706
1707         /*
1708          * If the system-determined stacked limits are compatible with the
1709          * era device's blocksize (io_opt is a factor) do not override them.
1710          */
1711         if (io_opt_sectors < era->sectors_per_block ||
1712             do_div(io_opt_sectors, era->sectors_per_block)) {
1713                 blk_limits_io_min(limits, 0);
1714                 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
1715         }
1716 }
1717
1718 /*----------------------------------------------------------------*/
1719
1720 static struct target_type era_target = {
1721         .name = "era",
1722         .version = {1, 0, 0},
1723         .module = THIS_MODULE,
1724         .ctr = era_ctr,
1725         .dtr = era_dtr,
1726         .map = era_map,
1727         .postsuspend = era_postsuspend,
1728         .preresume = era_preresume,
1729         .status = era_status,
1730         .message = era_message,
1731         .iterate_devices = era_iterate_devices,
1732         .io_hints = era_io_hints
1733 };
1734
1735 static int __init dm_era_init(void)
1736 {
1737         int r;
1738
1739         r = dm_register_target(&era_target);
1740         if (r) {
1741                 DMERR("era target registration failed: %d", r);
1742                 return r;
1743         }
1744
1745         return 0;
1746 }
1747
1748 static void __exit dm_era_exit(void)
1749 {
1750         dm_unregister_target(&era_target);
1751 }
1752
1753 module_init(dm_era_init);
1754 module_exit(dm_era_exit);
1755
1756 MODULE_DESCRIPTION(DM_NAME " era target");
1757 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
1758 MODULE_LICENSE("GPL");