2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
29 enum log_ent_request {
34 static struct device *to_dev(struct arena_info *arena)
36 return &arena->nd_btt->dev;
39 static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
41 return offset + nd_btt->initial_offset;
44 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
45 void *buf, size_t n, unsigned long flags)
47 struct nd_btt *nd_btt = arena->nd_btt;
48 struct nd_namespace_common *ndns = nd_btt->ndns;
50 /* arena offsets may be shifted from the base of the device */
51 offset = adjust_initial_offset(nd_btt, offset);
52 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
55 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
56 void *buf, size_t n, unsigned long flags)
58 struct nd_btt *nd_btt = arena->nd_btt;
59 struct nd_namespace_common *ndns = nd_btt->ndns;
61 /* arena offsets may be shifted from the base of the device */
62 offset = adjust_initial_offset(nd_btt, offset);
63 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
66 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
71 * infooff and info2off should always be at least 512B aligned.
72 * We rely on that to make sure rw_bytes does error clearing
73 * correctly, so make sure that is the case.
75 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
76 "arena->infooff: %#llx is unaligned\n", arena->infooff);
77 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
78 "arena->info2off: %#llx is unaligned\n", arena->info2off);
80 ret = arena_write_bytes(arena, arena->info2off, super,
81 sizeof(struct btt_sb), 0);
85 return arena_write_bytes(arena, arena->infooff, super,
86 sizeof(struct btt_sb), 0);
89 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
91 return arena_read_bytes(arena, arena->infooff, super,
92 sizeof(struct btt_sb), 0);
96 * 'raw' version of btt_map write
98 * mapping is in little-endian
99 * mapping contains 'E' and 'Z' flags as desired
101 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
104 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
106 if (unlikely(lba >= arena->external_nlba))
107 dev_err_ratelimited(to_dev(arena),
108 "%s: lba %#x out of range (max: %#x)\n",
109 __func__, lba, arena->external_nlba);
110 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
113 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
114 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
120 * This 'mapping' is supposed to be just the LBA mapping, without
121 * any flags set, so strip the flag bits.
123 mapping = ent_lba(mapping);
125 ze = (z_flag << 1) + e_flag;
129 * We want to set neither of the Z or E flags, and
130 * in the actual layout, this means setting the bit
131 * positions of both to '1' to indicate a 'normal'
134 mapping |= MAP_ENT_NORMAL;
137 mapping |= (1 << MAP_ERR_SHIFT);
140 mapping |= (1 << MAP_TRIM_SHIFT);
144 * The case where Z and E are both sent in as '1' could be
145 * construed as a valid 'normal' case, but we decide not to,
148 dev_err_ratelimited(to_dev(arena),
149 "Invalid use of Z and E flags\n");
153 mapping_le = cpu_to_le32(mapping);
154 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
157 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
158 int *trim, int *error, unsigned long rwb_flags)
162 u32 raw_mapping, postmap, ze, z_flag, e_flag;
163 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
165 if (unlikely(lba >= arena->external_nlba))
166 dev_err_ratelimited(to_dev(arena),
167 "%s: lba %#x out of range (max: %#x)\n",
168 __func__, lba, arena->external_nlba);
170 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
174 raw_mapping = le32_to_cpu(in);
176 z_flag = ent_z_flag(raw_mapping);
177 e_flag = ent_e_flag(raw_mapping);
178 ze = (z_flag << 1) + e_flag;
179 postmap = ent_lba(raw_mapping);
181 /* Reuse the {z,e}_flag variables for *trim and *error */
187 /* Initial state. Return postmap = premap */
213 static int btt_log_group_read(struct arena_info *arena, u32 lane,
214 struct log_group *log)
216 return arena_read_bytes(arena,
217 arena->logoff + (lane * LOG_GRP_SIZE), log,
221 static struct dentry *debugfs_root;
223 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
229 /* If for some reason, parent bttN was not created, exit */
233 snprintf(dirname, 32, "arena%d", idx);
234 d = debugfs_create_dir(dirname, parent);
235 if (IS_ERR_OR_NULL(d))
239 debugfs_create_x64("size", S_IRUGO, d, &a->size);
240 debugfs_create_x64("external_lba_start", S_IRUGO, d,
241 &a->external_lba_start);
242 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
243 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
244 &a->internal_lbasize);
245 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
246 debugfs_create_u32("external_lbasize", S_IRUGO, d,
247 &a->external_lbasize);
248 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
249 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
250 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
251 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
252 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
253 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
254 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
255 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
256 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
257 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
258 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
259 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
262 static void btt_debugfs_init(struct btt *btt)
265 struct arena_info *arena;
267 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
269 if (IS_ERR_OR_NULL(btt->debugfs_dir))
272 list_for_each_entry(arena, &btt->arena_list, list) {
273 arena_debugfs_init(arena, btt->debugfs_dir, i);
278 static u32 log_seq(struct log_group *log, int log_idx)
280 return le32_to_cpu(log->ent[log_idx].seq);
284 * This function accepts two log entries, and uses the
285 * sequence number to find the 'older' entry.
286 * It also updates the sequence number in this old entry to
287 * make it the 'new' one if the mark_flag is set.
288 * Finally, it returns which of the entries was the older one.
290 * TODO The logic feels a bit kludge-y. make it better..
292 static int btt_log_get_old(struct arena_info *a, struct log_group *log)
294 int idx0 = a->log_index[0];
295 int idx1 = a->log_index[1];
299 * the first ever time this is seen, the entry goes into [0]
300 * the next time, the following logic works out to put this
301 * (next) entry into [1]
303 if (log_seq(log, idx0) == 0) {
304 log->ent[idx0].seq = cpu_to_le32(1);
308 if (log_seq(log, idx0) == log_seq(log, idx1))
310 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
313 if (log_seq(log, idx0) < log_seq(log, idx1)) {
314 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
319 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
329 * This function copies the desired (old/new) log entry into ent if
330 * it is not NULL. It returns the sub-slot number (0 or 1)
331 * where the desired log entry was found. Negative return values
334 static int btt_log_read(struct arena_info *arena, u32 lane,
335 struct log_entry *ent, int old_flag)
338 int old_ent, ret_ent;
339 struct log_group log;
341 ret = btt_log_group_read(arena, lane, &log);
345 old_ent = btt_log_get_old(arena, &log);
346 if (old_ent < 0 || old_ent > 1) {
347 dev_err(to_dev(arena),
348 "log corruption (%d): lane %d seq [%d, %d]\n",
349 old_ent, lane, log.ent[arena->log_index[0]].seq,
350 log.ent[arena->log_index[1]].seq);
351 /* TODO set error state? */
355 ret_ent = (old_flag ? old_ent : (1 - old_ent));
358 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
364 * This function commits a log entry to media
365 * It does _not_ prepare the freelist entry for the next write
366 * btt_flog_write is the wrapper for updating the freelist elements
368 static int __btt_log_write(struct arena_info *arena, u32 lane,
369 u32 sub, struct log_entry *ent, unsigned long flags)
372 u32 group_slot = arena->log_index[sub];
373 unsigned int log_half = LOG_ENT_SIZE / 2;
377 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
378 (group_slot * LOG_ENT_SIZE);
379 /* split the 16B write into atomic, durable halves */
380 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
386 return arena_write_bytes(arena, ns_off, src, log_half, flags);
389 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
390 struct log_entry *ent)
394 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
398 /* prepare the next free entry */
399 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
400 if (++(arena->freelist[lane].seq) == 4)
401 arena->freelist[lane].seq = 1;
402 if (ent_e_flag(le32_to_cpu(ent->old_map)))
403 arena->freelist[lane].has_err = 1;
404 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
410 * This function initializes the BTT map to the initial state, which is
411 * all-zeroes, and indicates an identity mapping
413 static int btt_map_init(struct arena_info *arena)
418 size_t chunk_size = SZ_2M;
419 size_t mapsize = arena->logoff - arena->mapoff;
421 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
426 * mapoff should always be at least 512B aligned. We rely on that to
427 * make sure rw_bytes does error clearing correctly, so make sure that
430 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
431 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
434 size_t size = min(mapsize, chunk_size);
436 dev_WARN_ONCE(to_dev(arena), size < 512,
437 "chunk size: %#zx is unaligned\n", size);
438 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
454 * This function initializes the BTT log with 'fake' entries pointing
455 * to the initial reserved set of blocks as being free
457 static int btt_log_init(struct arena_info *arena)
459 size_t logsize = arena->info2off - arena->logoff;
460 size_t chunk_size = SZ_4K, offset = 0;
461 struct log_entry ent;
466 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
470 * logoff should always be at least 512B aligned. We rely on that to
471 * make sure rw_bytes does error clearing correctly, so make sure that
474 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
475 "arena->logoff: %#llx is unaligned\n", arena->logoff);
478 size_t size = min(logsize, chunk_size);
480 dev_WARN_ONCE(to_dev(arena), size < 512,
481 "chunk size: %#zx is unaligned\n", size);
482 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
492 for (i = 0; i < arena->nfree; i++) {
493 ent.lba = cpu_to_le32(i);
494 ent.old_map = cpu_to_le32(arena->external_nlba + i);
495 ent.new_map = cpu_to_le32(arena->external_nlba + i);
496 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
497 ret = __btt_log_write(arena, i, 0, &ent, 0);
507 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
509 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
512 static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
516 if (arena->freelist[lane].has_err) {
517 void *zero_page = page_address(ZERO_PAGE(0));
518 u32 lba = arena->freelist[lane].block;
519 u64 nsoff = to_namespace_offset(arena, lba);
520 unsigned long len = arena->sector_size;
522 mutex_lock(&arena->err_lock);
525 unsigned long chunk = min(len, PAGE_SIZE);
527 ret = arena_write_bytes(arena, nsoff, zero_page,
534 arena->freelist[lane].has_err = 0;
536 mutex_unlock(&arena->err_lock);
541 static int btt_freelist_init(struct arena_info *arena)
544 struct log_entry log_new;
545 u32 i, map_entry, log_oldmap, log_newmap;
547 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
549 if (!arena->freelist)
552 for (i = 0; i < arena->nfree; i++) {
553 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
557 /* old and new map entries with any flags stripped out */
558 log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
559 log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
561 /* sub points to the next one to be overwritten */
562 arena->freelist[i].sub = 1 - new;
563 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
564 arena->freelist[i].block = log_oldmap;
567 * FIXME: if error clearing fails during init, we want to make
570 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
571 !ent_normal(le32_to_cpu(log_new.old_map))) {
572 arena->freelist[i].has_err = 1;
573 ret = arena_clear_freelist_error(arena, i);
575 dev_err_ratelimited(to_dev(arena),
576 "Unable to clear known errors\n");
579 /* This implies a newly created or untouched flog entry */
580 if (log_oldmap == log_newmap)
583 /* Check if map recovery is needed */
584 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
590 * The map_entry from btt_read_map is stripped of any flag bits,
591 * so use the stripped out versions from the log as well for
592 * testing whether recovery is needed. For restoration, use the
593 * 'raw' version of the log entries as that captured what we
594 * were going to write originally.
596 if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
598 * Last transaction wrote the flog, but wasn't able
599 * to complete the map write. So fix up the map.
601 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
602 le32_to_cpu(log_new.new_map), 0, 0, 0);
611 static bool ent_is_padding(struct log_entry *ent)
613 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
618 * Detecting valid log indices: We read a log group (see the comments in btt.h
619 * for a description of a 'log_group' and its 'slots'), and iterate over its
620 * four slots. We expect that a padding slot will be all-zeroes, and use this
621 * to detect a padding slot vs. an actual entry.
623 * If a log_group is in the initial state, i.e. hasn't been used since the
624 * creation of this BTT layout, it will have three of the four slots with
625 * zeroes. We skip over these log_groups for the detection of log_index. If
626 * all log_groups are in the initial state (i.e. the BTT has never been
627 * written to), it is safe to assume the 'new format' of log entries in slots
630 static int log_set_indices(struct arena_info *arena)
632 bool idx_set = false, initial_state = true;
633 int ret, log_index[2] = {-1, -1};
634 u32 i, j, next_idx = 0;
635 struct log_group log;
638 for (i = 0; i < arena->nfree; i++) {
639 ret = btt_log_group_read(arena, i, &log);
643 for (j = 0; j < 4; j++) {
645 if (ent_is_padding(&log.ent[j])) {
649 /* Skip if index has been recorded */
650 if ((next_idx == 1) &&
653 /* valid entry, record index */
654 log_index[next_idx] = j;
658 /* two valid entries found */
660 } else if (next_idx > 2) {
661 /* too many valid indices */
666 * once the indices have been set, just verify
667 * that all subsequent log groups are either in
668 * their initial state or follow the same
671 if (j == log_index[0]) {
672 /* entry must be 'valid' */
673 if (ent_is_padding(&log.ent[j]))
675 } else if (j == log_index[1]) {
678 * log_index[1] can be padding if the
679 * lane never got used and it is still
680 * in the initial state (three 'padding'
684 /* entry must be invalid (padding) */
685 if (!ent_is_padding(&log.ent[j]))
691 * If any of the log_groups have more than one valid,
692 * non-padding entry, then the we are no longer in the
696 initial_state = false;
700 if (!initial_state && !idx_set)
704 * If all the entries in the log were in the initial state,
705 * assume new padding scheme
711 * Only allow the known permutations of log/padding indices,
712 * i.e. (0, 1), and (0, 2)
714 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
715 ; /* known index possibilities */
717 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
721 arena->log_index[0] = log_index[0];
722 arena->log_index[1] = log_index[1];
723 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
724 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
728 static int btt_rtt_init(struct arena_info *arena)
730 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
731 if (arena->rtt == NULL)
737 static int btt_maplocks_init(struct arena_info *arena)
741 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
743 if (!arena->map_locks)
746 for (i = 0; i < arena->nfree; i++)
747 spin_lock_init(&arena->map_locks[i].lock);
752 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
753 size_t start, size_t arena_off)
755 struct arena_info *arena;
756 u64 logsize, mapsize, datasize;
757 u64 available = size;
759 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
762 arena->nd_btt = btt->nd_btt;
763 arena->sector_size = btt->sector_size;
769 arena->external_lba_start = start;
770 arena->external_lbasize = btt->lbasize;
771 arena->internal_lbasize = roundup(arena->external_lbasize,
772 INT_LBASIZE_ALIGNMENT);
773 arena->nfree = BTT_DEFAULT_NFREE;
774 arena->version_major = btt->nd_btt->version_major;
775 arena->version_minor = btt->nd_btt->version_minor;
777 if (available % BTT_PG_SIZE)
778 available -= (available % BTT_PG_SIZE);
780 /* Two pages are reserved for the super block and its copy */
781 available -= 2 * BTT_PG_SIZE;
783 /* The log takes a fixed amount of space based on nfree */
784 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
785 available -= logsize;
787 /* Calculate optimal split between map and data area */
788 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
789 arena->internal_lbasize + MAP_ENT_SIZE);
790 arena->external_nlba = arena->internal_nlba - arena->nfree;
792 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
793 datasize = available - mapsize;
795 /* 'Absolute' values, relative to start of storage space */
796 arena->infooff = arena_off;
797 arena->dataoff = arena->infooff + BTT_PG_SIZE;
798 arena->mapoff = arena->dataoff + datasize;
799 arena->logoff = arena->mapoff + mapsize;
800 arena->info2off = arena->logoff + logsize;
802 /* Default log indices are (0,1) */
803 arena->log_index[0] = 0;
804 arena->log_index[1] = 1;
808 static void free_arenas(struct btt *btt)
810 struct arena_info *arena, *next;
812 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
813 list_del(&arena->list);
815 kfree(arena->map_locks);
816 kfree(arena->freelist);
817 debugfs_remove_recursive(arena->debugfs_dir);
823 * This function reads an existing valid btt superblock and
824 * populates the corresponding arena_info struct
826 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
829 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
830 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
831 arena->external_nlba = le32_to_cpu(super->external_nlba);
832 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
833 arena->nfree = le32_to_cpu(super->nfree);
834 arena->version_major = le16_to_cpu(super->version_major);
835 arena->version_minor = le16_to_cpu(super->version_minor);
837 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
838 le64_to_cpu(super->nextoff));
839 arena->infooff = arena_off;
840 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
841 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
842 arena->logoff = arena_off + le64_to_cpu(super->logoff);
843 arena->info2off = arena_off + le64_to_cpu(super->info2off);
845 arena->size = (le64_to_cpu(super->nextoff) > 0)
846 ? (le64_to_cpu(super->nextoff))
847 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
849 arena->flags = le32_to_cpu(super->flags);
852 static int discover_arenas(struct btt *btt)
855 struct arena_info *arena;
856 struct btt_sb *super;
857 size_t remaining = btt->rawsize;
862 super = kzalloc(sizeof(*super), GFP_KERNEL);
867 /* Alloc memory for arena */
868 arena = alloc_arena(btt, 0, 0, 0);
874 arena->infooff = cur_off;
875 ret = btt_info_read(arena, super);
879 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
880 if (remaining == btt->rawsize) {
881 btt->init_state = INIT_NOTFOUND;
882 dev_info(to_dev(arena), "No existing arenas\n");
885 dev_err(to_dev(arena),
886 "Found corrupted metadata!\n");
892 arena->external_lba_start = cur_nlba;
893 parse_arena_meta(arena, super, cur_off);
895 ret = log_set_indices(arena);
897 dev_err(to_dev(arena),
898 "Unable to deduce log/padding indices\n");
902 mutex_init(&arena->err_lock);
903 ret = btt_freelist_init(arena);
907 ret = btt_rtt_init(arena);
911 ret = btt_maplocks_init(arena);
915 list_add_tail(&arena->list, &btt->arena_list);
917 remaining -= arena->size;
918 cur_off += arena->size;
919 cur_nlba += arena->external_nlba;
922 if (arena->nextoff == 0)
925 btt->num_arenas = num_arenas;
926 btt->nlba = cur_nlba;
927 btt->init_state = INIT_READY;
940 static int create_arenas(struct btt *btt)
942 size_t remaining = btt->rawsize;
946 struct arena_info *arena;
947 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
949 remaining -= arena_size;
950 if (arena_size < ARENA_MIN_SIZE)
953 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
958 btt->nlba += arena->external_nlba;
959 if (remaining >= ARENA_MIN_SIZE)
960 arena->nextoff = arena->size;
963 cur_off += arena_size;
964 list_add_tail(&arena->list, &btt->arena_list);
971 * This function completes arena initialization by writing
973 * It is only called for an uninitialized arena when a write
974 * to that arena occurs for the first time.
976 static int btt_arena_write_layout(struct arena_info *arena)
980 struct btt_sb *super;
981 struct nd_btt *nd_btt = arena->nd_btt;
982 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
984 ret = btt_map_init(arena);
988 ret = btt_log_init(arena);
992 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
996 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
997 memcpy(super->uuid, nd_btt->uuid, 16);
998 memcpy(super->parent_uuid, parent_uuid, 16);
999 super->flags = cpu_to_le32(arena->flags);
1000 super->version_major = cpu_to_le16(arena->version_major);
1001 super->version_minor = cpu_to_le16(arena->version_minor);
1002 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
1003 super->external_nlba = cpu_to_le32(arena->external_nlba);
1004 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
1005 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
1006 super->nfree = cpu_to_le32(arena->nfree);
1007 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
1008 super->nextoff = cpu_to_le64(arena->nextoff);
1010 * Subtract arena->infooff (arena start) so numbers are relative
1013 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1014 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1015 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1016 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1019 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1020 super->checksum = cpu_to_le64(sum);
1022 ret = btt_info_write(arena, super);
1029 * This function completes the initialization for the BTT namespace
1030 * such that it is ready to accept IOs
1032 static int btt_meta_init(struct btt *btt)
1035 struct arena_info *arena;
1037 mutex_lock(&btt->init_lock);
1038 list_for_each_entry(arena, &btt->arena_list, list) {
1039 ret = btt_arena_write_layout(arena);
1043 ret = btt_freelist_init(arena);
1047 ret = btt_rtt_init(arena);
1051 ret = btt_maplocks_init(arena);
1056 btt->init_state = INIT_READY;
1059 mutex_unlock(&btt->init_lock);
1063 static u32 btt_meta_size(struct btt *btt)
1065 return btt->lbasize - btt->sector_size;
1069 * This function calculates the arena in which the given LBA lies
1070 * by doing a linear walk. This is acceptable since we expect only
1071 * a few arenas. If we have backing devices that get much larger,
1072 * we can construct a balanced binary tree of arenas at init time
1073 * so that this range search becomes faster.
1075 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1076 struct arena_info **arena)
1078 struct arena_info *arena_list;
1079 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1081 list_for_each_entry(arena_list, &btt->arena_list, list) {
1082 if (lba < arena_list->external_nlba) {
1083 *arena = arena_list;
1087 lba -= arena_list->external_nlba;
1094 * The following (lock_map, unlock_map) are mostly just to improve
1095 * readability, since they index into an array of locks
1097 static void lock_map(struct arena_info *arena, u32 premap)
1098 __acquires(&arena->map_locks[idx].lock)
1100 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1102 spin_lock(&arena->map_locks[idx].lock);
1105 static void unlock_map(struct arena_info *arena, u32 premap)
1106 __releases(&arena->map_locks[idx].lock)
1108 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1110 spin_unlock(&arena->map_locks[idx].lock);
1113 static int btt_data_read(struct arena_info *arena, struct page *page,
1114 unsigned int off, u32 lba, u32 len)
1117 u64 nsoff = to_namespace_offset(arena, lba);
1118 void *mem = kmap_atomic(page);
1120 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1126 static int btt_data_write(struct arena_info *arena, u32 lba,
1127 struct page *page, unsigned int off, u32 len)
1130 u64 nsoff = to_namespace_offset(arena, lba);
1131 void *mem = kmap_atomic(page);
1133 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1139 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1141 void *mem = kmap_atomic(page);
1143 memset(mem + off, 0, len);
1147 #ifdef CONFIG_BLK_DEV_INTEGRITY
1148 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1149 struct arena_info *arena, u32 postmap, int rw)
1151 unsigned int len = btt_meta_size(btt);
1158 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1161 unsigned int cur_len;
1165 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1167 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1168 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1169 * can use those directly
1172 cur_len = min(len, bv.bv_len);
1173 mem = kmap_atomic(bv.bv_page);
1175 ret = arena_write_bytes(arena, meta_nsoff,
1176 mem + bv.bv_offset, cur_len,
1179 ret = arena_read_bytes(arena, meta_nsoff,
1180 mem + bv.bv_offset, cur_len,
1188 meta_nsoff += cur_len;
1189 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1196 #else /* CONFIG_BLK_DEV_INTEGRITY */
1197 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1198 struct arena_info *arena, u32 postmap, int rw)
1204 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1205 struct page *page, unsigned int off, sector_t sector,
1210 struct arena_info *arena = NULL;
1211 u32 lane = 0, premap, postmap;
1216 lane = nd_region_acquire_lane(btt->nd_region);
1218 ret = lba_to_arena(btt, sector, &premap, &arena);
1222 cur_len = min(btt->sector_size, len);
1224 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1230 * We loop to make sure that the post map LBA didn't change
1231 * from under us between writing the RTT and doing the actual
1239 zero_fill_data(page, off, cur_len);
1248 arena->rtt[lane] = RTT_VALID | postmap;
1250 * Barrier to make sure this write is not reordered
1251 * to do the verification map_read before the RTT store
1255 ret = btt_map_read(arena, premap, &new_map, &new_t,
1256 &new_e, NVDIMM_IO_ATOMIC);
1260 if ((postmap == new_map) && (t_flag == new_t) &&
1269 ret = btt_data_read(arena, page, off, postmap, cur_len);
1271 /* Media error - set the e_flag */
1272 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1273 dev_warn_ratelimited(to_dev(arena),
1274 "Error persistently tracking bad blocks at %#x\n",
1280 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1285 arena->rtt[lane] = RTT_INVALID;
1286 nd_region_release_lane(btt->nd_region, lane);
1290 sector += btt->sector_size >> SECTOR_SHIFT;
1296 arena->rtt[lane] = RTT_INVALID;
1298 nd_region_release_lane(btt->nd_region, lane);
1303 * Normally, arena_{read,write}_bytes will take care of the initial offset
1304 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1305 * we need the final, raw namespace offset here
1307 static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1310 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1311 to_namespace_offset(arena, postmap));
1312 sector_t phys_sector = nsoff >> 9;
1314 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1317 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1318 sector_t sector, struct page *page, unsigned int off,
1322 struct arena_info *arena = NULL;
1323 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1324 struct log_entry log;
1332 lane = nd_region_acquire_lane(btt->nd_region);
1334 ret = lba_to_arena(btt, sector, &premap, &arena);
1337 cur_len = min(btt->sector_size, len);
1339 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1344 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1345 arena->freelist[lane].has_err = 1;
1347 if (mutex_is_locked(&arena->err_lock)
1348 || arena->freelist[lane].has_err) {
1349 nd_region_release_lane(btt->nd_region, lane);
1351 ret = arena_clear_freelist_error(arena, lane);
1355 /* OK to acquire a different lane/free block */
1359 new_postmap = arena->freelist[lane].block;
1361 /* Wait if the new block is being read from */
1362 for (i = 0; i < arena->nfree; i++)
1363 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1367 if (new_postmap >= arena->internal_nlba) {
1372 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1377 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1383 lock_map(arena, premap);
1384 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1388 if (old_postmap >= arena->internal_nlba) {
1393 set_e_flag(old_postmap);
1395 log.lba = cpu_to_le32(premap);
1396 log.old_map = cpu_to_le32(old_postmap);
1397 log.new_map = cpu_to_le32(new_postmap);
1398 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1399 sub = arena->freelist[lane].sub;
1400 ret = btt_flog_write(arena, lane, sub, &log);
1404 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1409 unlock_map(arena, premap);
1410 nd_region_release_lane(btt->nd_region, lane);
1413 ret = arena_clear_freelist_error(arena, lane);
1420 sector += btt->sector_size >> SECTOR_SHIFT;
1426 unlock_map(arena, premap);
1428 nd_region_release_lane(btt->nd_region, lane);
1432 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1433 struct page *page, unsigned int len, unsigned int off,
1434 bool is_write, sector_t sector)
1439 ret = btt_read_pg(btt, bip, page, off, sector, len);
1440 flush_dcache_page(page);
1442 flush_dcache_page(page);
1443 ret = btt_write_pg(btt, bip, sector, page, off, len);
1449 static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1451 struct bio_integrity_payload *bip = bio_integrity(bio);
1452 struct btt *btt = q->queuedata;
1453 struct bvec_iter iter;
1454 unsigned long start;
1455 struct bio_vec bvec;
1459 if (!bio_integrity_prep(bio))
1460 return BLK_QC_T_NONE;
1462 do_acct = nd_iostat_start(bio, &start);
1463 bio_for_each_segment(bvec, bio, iter) {
1464 unsigned int len = bvec.bv_len;
1466 if (len > PAGE_SIZE || len < btt->sector_size ||
1467 len % btt->sector_size) {
1468 dev_err_ratelimited(&btt->nd_btt->dev,
1469 "unaligned bio segment (len: %d)\n", len);
1470 bio->bi_status = BLK_STS_IOERR;
1474 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1475 op_is_write(bio_op(bio)), iter.bi_sector);
1477 dev_err(&btt->nd_btt->dev,
1478 "io error in %s sector %lld, len %d,\n",
1479 (op_is_write(bio_op(bio))) ? "WRITE" :
1481 (unsigned long long) iter.bi_sector, len);
1482 bio->bi_status = errno_to_blk_status(err);
1487 nd_iostat_end(bio, start);
1490 return BLK_QC_T_NONE;
1493 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1494 struct page *page, bool is_write)
1496 struct btt *btt = bdev->bd_disk->private_data;
1500 len = hpage_nr_pages(page) * PAGE_SIZE;
1501 rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
1503 page_endio(page, is_write, 0);
1509 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1511 /* some standard values */
1512 geo->heads = 1 << 6;
1513 geo->sectors = 1 << 5;
1514 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1518 static const struct block_device_operations btt_fops = {
1519 .owner = THIS_MODULE,
1520 .rw_page = btt_rw_page,
1521 .getgeo = btt_getgeo,
1522 .revalidate_disk = nvdimm_revalidate_disk,
1525 static int btt_blk_init(struct btt *btt)
1527 struct nd_btt *nd_btt = btt->nd_btt;
1528 struct nd_namespace_common *ndns = nd_btt->ndns;
1530 /* create a new disk and request queue for btt */
1531 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1532 if (!btt->btt_queue)
1535 btt->btt_disk = alloc_disk(0);
1536 if (!btt->btt_disk) {
1537 blk_cleanup_queue(btt->btt_queue);
1541 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1542 btt->btt_disk->first_minor = 0;
1543 btt->btt_disk->fops = &btt_fops;
1544 btt->btt_disk->private_data = btt;
1545 btt->btt_disk->queue = btt->btt_queue;
1546 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1548 blk_queue_make_request(btt->btt_queue, btt_make_request);
1549 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1550 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1551 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1552 btt->btt_queue->queuedata = btt;
1554 if (btt_meta_size(btt)) {
1555 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1558 del_gendisk(btt->btt_disk);
1559 put_disk(btt->btt_disk);
1560 blk_cleanup_queue(btt->btt_queue);
1564 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1565 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1566 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1567 revalidate_disk(btt->btt_disk);
1572 static void btt_blk_cleanup(struct btt *btt)
1574 del_gendisk(btt->btt_disk);
1575 put_disk(btt->btt_disk);
1576 blk_cleanup_queue(btt->btt_queue);
1580 * btt_init - initialize a block translation table for the given device
1581 * @nd_btt: device with BTT geometry and backing device info
1582 * @rawsize: raw size in bytes of the backing device
1583 * @lbasize: lba size of the backing device
1584 * @uuid: A uuid for the backing device - this is stored on media
1585 * @maxlane: maximum number of parallel requests the device can handle
1587 * Initialize a Block Translation Table on a backing device to provide
1588 * single sector power fail atomicity.
1594 * Pointer to a new struct btt on success, NULL on failure.
1596 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1597 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1601 struct nd_namespace_io *nsio;
1602 struct device *dev = &nd_btt->dev;
1604 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1608 btt->nd_btt = nd_btt;
1609 btt->rawsize = rawsize;
1610 btt->lbasize = lbasize;
1611 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1612 INIT_LIST_HEAD(&btt->arena_list);
1613 mutex_init(&btt->init_lock);
1614 btt->nd_region = nd_region;
1615 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1616 btt->phys_bb = &nsio->bb;
1618 ret = discover_arenas(btt);
1620 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1624 if (btt->init_state != INIT_READY && nd_region->ro) {
1625 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1626 dev_name(&nd_region->dev));
1628 } else if (btt->init_state != INIT_READY) {
1629 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1630 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1631 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1632 btt->num_arenas, rawsize);
1634 ret = create_arenas(btt);
1636 dev_info(dev, "init: create_arenas: %d\n", ret);
1640 ret = btt_meta_init(btt);
1642 dev_err(dev, "init: error in meta_init: %d\n", ret);
1647 ret = btt_blk_init(btt);
1649 dev_err(dev, "init: error in blk_init: %d\n", ret);
1653 btt_debugfs_init(btt);
1659 * btt_fini - de-initialize a BTT
1660 * @btt: the BTT handle that was generated by btt_init
1662 * De-initialize a Block Translation Table on device removal
1667 static void btt_fini(struct btt *btt)
1670 btt_blk_cleanup(btt);
1672 debugfs_remove_recursive(btt->debugfs_dir);
1676 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1678 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1679 struct nd_region *nd_region;
1680 struct btt_sb *btt_sb;
1684 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1685 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1689 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1694 * If this returns < 0, that is ok as it just means there wasn't
1695 * an existing BTT, and we're creating a new one. We still need to
1696 * call this as we need the version dependent fields in nd_btt to be
1697 * set correctly based on the holder class
1699 nd_btt_version(nd_btt, ndns, btt_sb);
1701 rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
1702 if (rawsize < ARENA_MIN_SIZE) {
1703 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1704 dev_name(&ndns->dev),
1705 ARENA_MIN_SIZE + nd_btt->initial_offset);
1708 nd_region = to_nd_region(nd_btt->dev.parent);
1709 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1717 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1719 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1721 struct btt *btt = nd_btt->btt;
1728 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1730 static int __init nd_btt_init(void)
1734 debugfs_root = debugfs_create_dir("btt", NULL);
1735 if (IS_ERR_OR_NULL(debugfs_root))
1741 static void __exit nd_btt_exit(void)
1743 debugfs_remove_recursive(debugfs_root);
1746 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1747 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1748 MODULE_LICENSE("GPL v2");
1749 module_init(nd_btt_init);
1750 module_exit(nd_btt_exit);