2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/slab.h>
9 #include <linux/module.h>
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "raid"
20 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
23 * Minimum sectors of free reshape space per raid device
25 #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
28 * Minimum journal space 4 MiB in sectors.
30 #define MIN_RAID456_JOURNAL_SPACE (4*2048)
32 static bool devices_handle_discard_safely = false;
35 * The following flags are used by dm-raid.c to set up the array state.
36 * They must be cleared before md_run is called.
38 #define FirstUse 10 /* rdev flag */
42 * Two DM devices, one to hold metadata and one to hold the
43 * actual data/parity. The reason for this is to not confuse
44 * ti->len and give more flexibility in altering size and
47 * While it is possible for this device to be associated
48 * with a different physical device than the data_dev, it
49 * is intended for it to be the same.
50 * |--------- Physical Device ---------|
51 * |- meta_dev -|------ data_dev ------|
53 struct dm_dev *meta_dev;
54 struct dm_dev *data_dev;
59 * Bits for establishing rs->ctr_flags
64 #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
65 #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
66 #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
67 #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
68 #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
69 #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
70 #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
71 #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
72 #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
73 #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
74 #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
75 #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
77 #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid1/4/5/6/10! */
78 #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
79 #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
82 #define __CTR_FLAG_JOURNAL_DEV 15 /* 2 */ /* Only with raid4/5/6 (journal device)! */
85 #define __CTR_FLAG_JOURNAL_MODE 16 /* 2 */ /* Only with raid4/5/6 (journal mode)! */
88 * Flags for rs->ctr_flags field.
90 #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
91 #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
92 #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
93 #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
94 #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
95 #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
96 #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
97 #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
98 #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
99 #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
100 #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
101 #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
102 #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
103 #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
104 #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
105 #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
106 #define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
108 #define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET)
111 * Definitions of various constructor flags to
112 * be used in checks of valid / invalid flags
115 /* Define all any sync flags */
116 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
118 /* Define flags for options without argument (e.g. 'nosync') */
119 #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
120 CTR_FLAG_RAID10_USE_NEAR_SETS)
122 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
123 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
124 CTR_FLAG_WRITE_MOSTLY | \
125 CTR_FLAG_DAEMON_SLEEP | \
126 CTR_FLAG_MIN_RECOVERY_RATE | \
127 CTR_FLAG_MAX_RECOVERY_RATE | \
128 CTR_FLAG_MAX_WRITE_BEHIND | \
129 CTR_FLAG_STRIPE_CACHE | \
130 CTR_FLAG_REGION_SIZE | \
131 CTR_FLAG_RAID10_COPIES | \
132 CTR_FLAG_RAID10_FORMAT | \
133 CTR_FLAG_DELTA_DISKS | \
134 CTR_FLAG_DATA_OFFSET)
136 /* Valid options definitions per raid level... */
138 /* "raid0" does only accept data offset */
139 #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
141 /* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */
142 #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
144 CTR_FLAG_WRITE_MOSTLY | \
145 CTR_FLAG_DAEMON_SLEEP | \
146 CTR_FLAG_MIN_RECOVERY_RATE | \
147 CTR_FLAG_MAX_RECOVERY_RATE | \
148 CTR_FLAG_MAX_WRITE_BEHIND | \
149 CTR_FLAG_REGION_SIZE | \
150 CTR_FLAG_DELTA_DISKS | \
151 CTR_FLAG_DATA_OFFSET)
153 /* "raid10" does not accept any raid1 or stripe cache options */
154 #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
156 CTR_FLAG_DAEMON_SLEEP | \
157 CTR_FLAG_MIN_RECOVERY_RATE | \
158 CTR_FLAG_MAX_RECOVERY_RATE | \
159 CTR_FLAG_REGION_SIZE | \
160 CTR_FLAG_RAID10_COPIES | \
161 CTR_FLAG_RAID10_FORMAT | \
162 CTR_FLAG_DELTA_DISKS | \
163 CTR_FLAG_DATA_OFFSET | \
164 CTR_FLAG_RAID10_USE_NEAR_SETS)
167 * "raid4/5/6" do not accept any raid1 or raid10 specific options
169 * "raid6" does not accept "nosync", because it is not guaranteed
170 * that both parity and q-syndrome are being written properly with
173 #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
175 CTR_FLAG_DAEMON_SLEEP | \
176 CTR_FLAG_MIN_RECOVERY_RATE | \
177 CTR_FLAG_MAX_RECOVERY_RATE | \
178 CTR_FLAG_STRIPE_CACHE | \
179 CTR_FLAG_REGION_SIZE | \
180 CTR_FLAG_DELTA_DISKS | \
181 CTR_FLAG_DATA_OFFSET | \
182 CTR_FLAG_JOURNAL_DEV | \
183 CTR_FLAG_JOURNAL_MODE)
185 #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
187 CTR_FLAG_DAEMON_SLEEP | \
188 CTR_FLAG_MIN_RECOVERY_RATE | \
189 CTR_FLAG_MAX_RECOVERY_RATE | \
190 CTR_FLAG_STRIPE_CACHE | \
191 CTR_FLAG_REGION_SIZE | \
192 CTR_FLAG_DELTA_DISKS | \
193 CTR_FLAG_DATA_OFFSET | \
194 CTR_FLAG_JOURNAL_DEV | \
195 CTR_FLAG_JOURNAL_MODE)
196 /* ...valid options definitions per raid level */
199 * Flags for rs->runtime_flags field
200 * (RT_FLAG prefix meaning "runtime flag")
202 * These are all internal and used to define runtime state,
203 * e.g. to prevent another resume from preresume processing
204 * the raid set all over again.
206 #define RT_FLAG_RS_PRERESUMED 0
207 #define RT_FLAG_RS_RESUMED 1
208 #define RT_FLAG_RS_BITMAP_LOADED 2
209 #define RT_FLAG_UPDATE_SBS 3
210 #define RT_FLAG_RESHAPE_RS 4
211 #define RT_FLAG_RS_SUSPENDED 5
213 /* Array elements of 64 bit needed for rebuild/failed disk bits */
214 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
217 * raid set level, layout and chunk sectors backup/restore
222 int new_chunk_sectors;
226 struct dm_target *ti;
228 uint32_t bitmap_loaded;
229 uint32_t stripe_cache_entries;
230 unsigned long ctr_flags;
231 unsigned long runtime_flags;
233 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
239 int requested_bitmap_chunk_sectors;
242 struct raid_type *raid_type;
243 struct dm_target_callbacks callbacks;
245 /* Optional raid4/5/6 journal device */
252 struct raid_dev dev[0];
255 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
257 struct mddev *mddev = &rs->md;
259 l->new_level = mddev->new_level;
260 l->new_layout = mddev->new_layout;
261 l->new_chunk_sectors = mddev->new_chunk_sectors;
264 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
266 struct mddev *mddev = &rs->md;
268 mddev->new_level = l->new_level;
269 mddev->new_layout = l->new_layout;
270 mddev->new_chunk_sectors = l->new_chunk_sectors;
273 /* raid10 algorithms (i.e. formats) */
274 #define ALGORITHM_RAID10_DEFAULT 0
275 #define ALGORITHM_RAID10_NEAR 1
276 #define ALGORITHM_RAID10_OFFSET 2
277 #define ALGORITHM_RAID10_FAR 3
279 /* Supported raid types and properties. */
280 static struct raid_type {
281 const char *name; /* RAID algorithm. */
282 const char *descr; /* Descriptor text for logging. */
283 const unsigned int parity_devs; /* # of parity devices. */
284 const unsigned int minimal_devs;/* minimal # of devices in set. */
285 const unsigned int level; /* RAID level. */
286 const unsigned int algorithm; /* RAID algorithm. */
288 {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
289 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
290 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
291 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
292 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
293 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
294 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
295 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
296 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
297 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
298 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
299 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
300 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
301 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
302 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
303 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
304 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
305 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
306 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
307 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
310 /* True, if @v is in inclusive range [@min, @max] */
311 static bool __within_range(long v, long min, long max)
313 return v >= min && v <= max;
316 /* All table line arguments are defined here */
317 static struct arg_name_flag {
318 const unsigned long flag;
320 } __arg_name_flags[] = {
321 { CTR_FLAG_SYNC, "sync"},
322 { CTR_FLAG_NOSYNC, "nosync"},
323 { CTR_FLAG_REBUILD, "rebuild"},
324 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
325 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
326 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
327 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
328 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
329 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
330 { CTR_FLAG_REGION_SIZE, "region_size"},
331 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
332 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
333 { CTR_FLAG_DATA_OFFSET, "data_offset"},
334 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
335 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
336 { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
337 { CTR_FLAG_JOURNAL_MODE, "journal_mode" },
340 /* Return argument name string for given @flag */
341 static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
343 if (hweight32(flag) == 1) {
344 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
346 while (anf-- > __arg_name_flags)
347 if (flag & anf->flag)
351 DMERR("%s called with more than one flag!", __func__);
356 /* Define correlation of raid456 journal cache modes and dm-raid target line parameters */
360 } _raid456_journal_mode[] = {
361 { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
362 { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
365 /* Return MD raid4/5/6 journal mode for dm @journal_mode one */
366 static int dm_raid_journal_mode_to_md(const char *mode)
368 int m = ARRAY_SIZE(_raid456_journal_mode);
371 if (!strcasecmp(mode, _raid456_journal_mode[m].param))
372 return _raid456_journal_mode[m].mode;
377 /* Return dm-raid raid4/5/6 journal mode string for @mode */
378 static const char *md_journal_mode_to_dm_raid(const int mode)
380 int m = ARRAY_SIZE(_raid456_journal_mode);
383 if (mode == _raid456_journal_mode[m].mode)
384 return _raid456_journal_mode[m].param;
390 * Bool helpers to test for various raid levels of a raid set.
391 * It's level as reported by the superblock rather than
392 * the requested raid_type passed to the constructor.
394 /* Return true, if raid set in @rs is raid0 */
395 static bool rs_is_raid0(struct raid_set *rs)
397 return !rs->md.level;
400 /* Return true, if raid set in @rs is raid1 */
401 static bool rs_is_raid1(struct raid_set *rs)
403 return rs->md.level == 1;
406 /* Return true, if raid set in @rs is raid10 */
407 static bool rs_is_raid10(struct raid_set *rs)
409 return rs->md.level == 10;
412 /* Return true, if raid set in @rs is level 6 */
413 static bool rs_is_raid6(struct raid_set *rs)
415 return rs->md.level == 6;
418 /* Return true, if raid set in @rs is level 4, 5 or 6 */
419 static bool rs_is_raid456(struct raid_set *rs)
421 return __within_range(rs->md.level, 4, 6);
424 /* Return true, if raid set in @rs is reshapable */
425 static bool __is_raid10_far(int layout);
426 static bool rs_is_reshapable(struct raid_set *rs)
428 return rs_is_raid456(rs) ||
429 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
432 /* Return true, if raid set in @rs is recovering */
433 static bool rs_is_recovering(struct raid_set *rs)
435 return rs->md.recovery_cp < rs->md.dev_sectors;
438 /* Return true, if raid set in @rs is reshaping */
439 static bool rs_is_reshaping(struct raid_set *rs)
441 return rs->md.reshape_position != MaxSector;
445 * bool helpers to test for various raid levels of a raid type @rt
448 /* Return true, if raid type in @rt is raid0 */
449 static bool rt_is_raid0(struct raid_type *rt)
454 /* Return true, if raid type in @rt is raid1 */
455 static bool rt_is_raid1(struct raid_type *rt)
457 return rt->level == 1;
460 /* Return true, if raid type in @rt is raid10 */
461 static bool rt_is_raid10(struct raid_type *rt)
463 return rt->level == 10;
466 /* Return true, if raid type in @rt is raid4/5 */
467 static bool rt_is_raid45(struct raid_type *rt)
469 return __within_range(rt->level, 4, 5);
472 /* Return true, if raid type in @rt is raid6 */
473 static bool rt_is_raid6(struct raid_type *rt)
475 return rt->level == 6;
478 /* Return true, if raid type in @rt is raid4/5/6 */
479 static bool rt_is_raid456(struct raid_type *rt)
481 return __within_range(rt->level, 4, 6);
483 /* END: raid level bools */
485 /* Return valid ctr flags for the raid level of @rs */
486 static unsigned long __valid_flags(struct raid_set *rs)
488 if (rt_is_raid0(rs->raid_type))
489 return RAID0_VALID_FLAGS;
490 else if (rt_is_raid1(rs->raid_type))
491 return RAID1_VALID_FLAGS;
492 else if (rt_is_raid10(rs->raid_type))
493 return RAID10_VALID_FLAGS;
494 else if (rt_is_raid45(rs->raid_type))
495 return RAID45_VALID_FLAGS;
496 else if (rt_is_raid6(rs->raid_type))
497 return RAID6_VALID_FLAGS;
503 * Check for valid flags set on @rs
505 * Has to be called after parsing of the ctr flags!
507 static int rs_check_for_valid_flags(struct raid_set *rs)
509 if (rs->ctr_flags & ~__valid_flags(rs)) {
510 rs->ti->error = "Invalid flags combination";
517 /* MD raid10 bit definitions and helpers */
518 #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */
519 #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */
520 #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */
521 #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */
523 /* Return md raid10 near copies for @layout */
524 static unsigned int __raid10_near_copies(int layout)
526 return layout & 0xFF;
529 /* Return md raid10 far copies for @layout */
530 static unsigned int __raid10_far_copies(int layout)
532 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
535 /* Return true if md raid10 offset for @layout */
536 static bool __is_raid10_offset(int layout)
538 return !!(layout & RAID10_OFFSET);
541 /* Return true if md raid10 near for @layout */
542 static bool __is_raid10_near(int layout)
544 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
547 /* Return true if md raid10 far for @layout */
548 static bool __is_raid10_far(int layout)
550 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
553 /* Return md raid10 layout string for @layout */
554 static const char *raid10_md_layout_to_format(int layout)
557 * Bit 16 stands for "offset"
558 * (i.e. adjacent stripes hold copies)
560 * Refer to MD's raid10.c for details
562 if (__is_raid10_offset(layout))
565 if (__raid10_near_copies(layout) > 1)
568 if (__raid10_far_copies(layout) > 1)
574 /* Return md raid10 algorithm for @name */
575 static int raid10_name_to_format(const char *name)
577 if (!strcasecmp(name, "near"))
578 return ALGORITHM_RAID10_NEAR;
579 else if (!strcasecmp(name, "offset"))
580 return ALGORITHM_RAID10_OFFSET;
581 else if (!strcasecmp(name, "far"))
582 return ALGORITHM_RAID10_FAR;
587 /* Return md raid10 copies for @layout */
588 static unsigned int raid10_md_layout_to_copies(int layout)
590 return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
593 /* Return md raid10 format id for @format string */
594 static int raid10_format_to_md_layout(struct raid_set *rs,
595 unsigned int algorithm,
598 unsigned int n = 1, f = 1, r = 0;
601 * MD resilienece flaw:
603 * enabling use_far_sets for far/offset formats causes copies
604 * to be colocated on the same devs together with their origins!
606 * -> disable it for now in the definition above
608 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
609 algorithm == ALGORITHM_RAID10_NEAR)
612 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
615 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
616 r |= RAID10_USE_FAR_SETS;
618 } else if (algorithm == ALGORITHM_RAID10_FAR) {
621 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
622 r |= RAID10_USE_FAR_SETS;
627 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
629 /* END: MD raid10 bit definitions and helpers */
631 /* Check for any of the raid10 algorithms */
632 static bool __got_raid10(struct raid_type *rtp, const int layout)
634 if (rtp->level == 10) {
635 switch (rtp->algorithm) {
636 case ALGORITHM_RAID10_DEFAULT:
637 case ALGORITHM_RAID10_NEAR:
638 return __is_raid10_near(layout);
639 case ALGORITHM_RAID10_OFFSET:
640 return __is_raid10_offset(layout);
641 case ALGORITHM_RAID10_FAR:
642 return __is_raid10_far(layout);
651 /* Return raid_type for @name */
652 static struct raid_type *get_raid_type(const char *name)
654 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
656 while (rtp-- > raid_types)
657 if (!strcasecmp(rtp->name, name))
663 /* Return raid_type for @name based derived from @level and @layout */
664 static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
666 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
668 while (rtp-- > raid_types) {
669 /* RAID10 special checks based on @layout flags/properties */
670 if (rtp->level == level &&
671 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
678 /* Adjust rdev sectors */
679 static void rs_set_rdev_sectors(struct raid_set *rs)
681 struct mddev *mddev = &rs->md;
682 struct md_rdev *rdev;
685 * raid10 sets rdev->sector to the device size, which
686 * is unintended in case of out-of-place reshaping
688 rdev_for_each(rdev, mddev)
689 if (!test_bit(Journal, &rdev->flags))
690 rdev->sectors = mddev->dev_sectors;
694 * Change bdev capacity of @rs in case of a disk add/remove reshape
696 static void rs_set_capacity(struct raid_set *rs)
698 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
700 set_capacity(gendisk, rs->md.array_sectors);
701 revalidate_disk(gendisk);
705 * Set the mddev properties in @rs to the current
706 * ones retrieved from the freshest superblock
708 static void rs_set_cur(struct raid_set *rs)
710 struct mddev *mddev = &rs->md;
712 mddev->new_level = mddev->level;
713 mddev->new_layout = mddev->layout;
714 mddev->new_chunk_sectors = mddev->chunk_sectors;
718 * Set the mddev properties in @rs to the new
719 * ones requested by the ctr
721 static void rs_set_new(struct raid_set *rs)
723 struct mddev *mddev = &rs->md;
725 mddev->level = mddev->new_level;
726 mddev->layout = mddev->new_layout;
727 mddev->chunk_sectors = mddev->new_chunk_sectors;
728 mddev->raid_disks = rs->raid_disks;
729 mddev->delta_disks = 0;
732 static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
733 unsigned int raid_devs)
738 if (raid_devs <= raid_type->parity_devs) {
739 ti->error = "Insufficient number of devices";
740 return ERR_PTR(-EINVAL);
743 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
745 ti->error = "Cannot allocate raid context";
746 return ERR_PTR(-ENOMEM);
751 rs->raid_disks = raid_devs;
755 rs->raid_type = raid_type;
756 rs->stripe_cache_entries = 256;
757 rs->md.raid_disks = raid_devs;
758 rs->md.level = raid_type->level;
759 rs->md.new_level = rs->md.level;
760 rs->md.layout = raid_type->algorithm;
761 rs->md.new_layout = rs->md.layout;
762 rs->md.delta_disks = 0;
763 rs->md.recovery_cp = MaxSector;
765 for (i = 0; i < raid_devs; i++)
766 md_rdev_init(&rs->dev[i].rdev);
769 * Remaining items to be initialized by further RAID params:
772 * rs->md.chunk_sectors
773 * rs->md.new_chunk_sectors
780 static void raid_set_free(struct raid_set *rs)
784 if (rs->journal_dev.dev) {
785 md_rdev_clear(&rs->journal_dev.rdev);
786 dm_put_device(rs->ti, rs->journal_dev.dev);
789 for (i = 0; i < rs->raid_disks; i++) {
790 if (rs->dev[i].meta_dev)
791 dm_put_device(rs->ti, rs->dev[i].meta_dev);
792 md_rdev_clear(&rs->dev[i].rdev);
793 if (rs->dev[i].data_dev)
794 dm_put_device(rs->ti, rs->dev[i].data_dev);
801 * For every device we have two words
802 * <meta_dev>: meta device name or '-' if missing
803 * <data_dev>: data device name or '-' if missing
805 * The following are permitted:
808 * <meta_dev> <data_dev>
810 * The following is not allowed:
813 * This code parses those words. If there is a failure,
814 * the caller must use raid_set_free() to unwind the operations.
816 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
820 int metadata_available = 0;
824 /* Put off the number of raid devices argument to get to dev pairs */
825 arg = dm_shift_arg(as);
829 for (i = 0; i < rs->raid_disks; i++) {
830 rs->dev[i].rdev.raid_disk = i;
832 rs->dev[i].meta_dev = NULL;
833 rs->dev[i].data_dev = NULL;
836 * There are no offsets initially.
837 * Out of place reshape will set them accordingly.
839 rs->dev[i].rdev.data_offset = 0;
840 rs->dev[i].rdev.new_data_offset = 0;
841 rs->dev[i].rdev.mddev = &rs->md;
843 arg = dm_shift_arg(as);
847 if (strcmp(arg, "-")) {
848 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
849 &rs->dev[i].meta_dev);
851 rs->ti->error = "RAID metadata device lookup failure";
855 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
856 if (!rs->dev[i].rdev.sb_page) {
857 rs->ti->error = "Failed to allocate superblock page";
862 arg = dm_shift_arg(as);
866 if (!strcmp(arg, "-")) {
867 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
868 (!rs->dev[i].rdev.recovery_offset)) {
869 rs->ti->error = "Drive designated for rebuild not specified";
873 if (rs->dev[i].meta_dev) {
874 rs->ti->error = "No data device supplied with metadata device";
881 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
882 &rs->dev[i].data_dev);
884 rs->ti->error = "RAID device lookup failure";
888 if (rs->dev[i].meta_dev) {
889 metadata_available = 1;
890 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
892 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
893 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
894 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
898 if (rs->journal_dev.dev)
899 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
901 if (metadata_available) {
903 rs->md.persistent = 1;
904 rs->md.major_version = 2;
905 } else if (rebuild && !rs->md.recovery_cp) {
907 * Without metadata, we will not be able to tell if the array
908 * is in-sync or not - we must assume it is not. Therefore,
909 * it is impossible to rebuild a drive.
911 * Even if there is metadata, the on-disk information may
912 * indicate that the array is not in-sync and it will then
915 * User could specify 'nosync' option if desperate.
917 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
925 * validate_region_size
927 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
929 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
930 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
932 * Returns: 0 on success, -EINVAL on failure.
934 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
936 unsigned long min_region_size = rs->ti->len / (1 << 21);
943 * Choose a reasonable default. All figures in sectors.
945 if (min_region_size > (1 << 13)) {
946 /* If not a power of 2, make it the next power of 2 */
947 region_size = roundup_pow_of_two(min_region_size);
948 DMINFO("Choosing default region size of %lu sectors",
951 DMINFO("Choosing default region size of 4MiB");
952 region_size = 1 << 13; /* sectors */
956 * Validate user-supplied value.
958 if (region_size > rs->ti->len) {
959 rs->ti->error = "Supplied region size is too large";
963 if (region_size < min_region_size) {
964 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
965 region_size, min_region_size);
966 rs->ti->error = "Supplied region size is too small";
970 if (!is_power_of_2(region_size)) {
971 rs->ti->error = "Region size is not a power of 2";
975 if (region_size < rs->md.chunk_sectors) {
976 rs->ti->error = "Region size is smaller than the chunk size";
982 * Convert sectors to bytes.
984 rs->md.bitmap_info.chunksize = to_bytes(region_size);
990 * validate_raid_redundancy
993 * Determine if there are enough devices in the array that haven't
994 * failed (or are being rebuilt) to form a usable array.
996 * Returns: 0 on success, -EINVAL on failure.
998 static int validate_raid_redundancy(struct raid_set *rs)
1000 unsigned int i, rebuild_cnt = 0;
1001 unsigned int rebuilds_per_group = 0, copies, raid_disks;
1002 unsigned int group_size, last_group_start;
1004 for (i = 0; i < rs->raid_disks; i++)
1005 if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
1006 ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1007 !rs->dev[i].rdev.sb_page)))
1010 switch (rs->raid_type->level) {
1014 if (rebuild_cnt >= rs->md.raid_disks)
1020 if (rebuild_cnt > rs->raid_type->parity_devs)
1024 copies = raid10_md_layout_to_copies(rs->md.new_layout);
1025 if (rebuild_cnt < copies)
1029 * It is possible to have a higher rebuild count for RAID10,
1030 * as long as the failed devices occur in different mirror
1031 * groups (i.e. different stripes).
1033 * When checking "near" format, make sure no adjacent devices
1034 * have failed beyond what can be handled. In addition to the
1035 * simple case where the number of devices is a multiple of the
1036 * number of copies, we must also handle cases where the number
1037 * of devices is not a multiple of the number of copies.
1038 * E.g. dev1 dev2 dev3 dev4 dev5
1042 raid_disks = min(rs->raid_disks, rs->md.raid_disks);
1043 if (__is_raid10_near(rs->md.new_layout)) {
1044 for (i = 0; i < raid_disks; i++) {
1046 rebuilds_per_group = 0;
1047 if ((!rs->dev[i].rdev.sb_page ||
1048 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1049 (++rebuilds_per_group >= copies))
1056 * When checking "far" and "offset" formats, we need to ensure
1057 * that the device that holds its copy is not also dead or
1058 * being rebuilt. (Note that "far" and "offset" formats only
1059 * support two copies right now. These formats also only ever
1060 * use the 'use_far_sets' variant.)
1062 * This check is somewhat complicated by the need to account
1063 * for arrays that are not a multiple of (far) copies. This
1064 * results in the need to treat the last (potentially larger)
1067 group_size = (raid_disks / copies);
1068 last_group_start = (raid_disks / group_size) - 1;
1069 last_group_start *= group_size;
1070 for (i = 0; i < raid_disks; i++) {
1071 if (!(i % copies) && !(i > last_group_start))
1072 rebuilds_per_group = 0;
1073 if ((!rs->dev[i].rdev.sb_page ||
1074 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1075 (++rebuilds_per_group >= copies))
1091 * Possible arguments are...
1092 * <chunk_size> [optional_args]
1094 * Argument definitions
1095 * <chunk_size> The number of sectors per disk that
1096 * will form the "stripe"
1097 * [[no]sync] Force or prevent recovery of the
1099 * [rebuild <idx>] Rebuild the drive indicated by the index
1100 * [daemon_sleep <ms>] Time between bitmap daemon work to
1102 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1103 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1104 * [write_mostly <idx>] Indicate a write mostly drive via index
1105 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
1106 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
1107 * [region_size <sectors>] Defines granularity of bitmap
1108 * [journal_dev <dev>] raid4/5/6 journaling deviice
1109 * (i.e. write hole closing log)
1111 * RAID10-only options:
1112 * [raid10_copies <# copies>] Number of copies. (Default: 2)
1113 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
1115 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1116 unsigned int num_raid_params)
1118 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1119 unsigned int raid10_copies = 2;
1120 unsigned int i, write_mostly = 0;
1121 unsigned int region_size = 0;
1122 sector_t max_io_len;
1123 const char *arg, *key;
1124 struct raid_dev *rd;
1125 struct raid_type *rt = rs->raid_type;
1127 arg = dm_shift_arg(as);
1128 num_raid_params--; /* Account for chunk_size argument */
1130 if (kstrtoint(arg, 10, &value) < 0) {
1131 rs->ti->error = "Bad numerical argument given for chunk_size";
1136 * First, parse the in-order required arguments
1137 * "chunk_size" is the only argument of this type.
1139 if (rt_is_raid1(rt)) {
1141 DMERR("Ignoring chunk size parameter for RAID 1");
1143 } else if (!is_power_of_2(value)) {
1144 rs->ti->error = "Chunk size must be a power of 2";
1146 } else if (value < 8) {
1147 rs->ti->error = "Chunk size value is too small";
1151 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1154 * We set each individual device as In_sync with a completed
1155 * 'recovery_offset'. If there has been a device failure or
1156 * replacement then one of the following cases applies:
1158 * 1) User specifies 'rebuild'.
1159 * - Device is reset when param is read.
1160 * 2) A new device is supplied.
1161 * - No matching superblock found, resets device.
1162 * 3) Device failure was transient and returns on reload.
1163 * - Failure noticed, resets device for bitmap replay.
1164 * 4) Device hadn't completed recovery after previous failure.
1165 * - Superblock is read and overrides recovery_offset.
1167 * What is found in the superblocks of the devices is always
1168 * authoritative, unless 'rebuild' or '[no]sync' was specified.
1170 for (i = 0; i < rs->raid_disks; i++) {
1171 set_bit(In_sync, &rs->dev[i].rdev.flags);
1172 rs->dev[i].rdev.recovery_offset = MaxSector;
1176 * Second, parse the unordered optional arguments
1178 for (i = 0; i < num_raid_params; i++) {
1179 key = dm_shift_arg(as);
1181 rs->ti->error = "Not enough raid parameters given";
1185 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1186 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1187 rs->ti->error = "Only one 'nosync' argument allowed";
1192 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1193 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1194 rs->ti->error = "Only one 'sync' argument allowed";
1199 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1200 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1201 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1207 arg = dm_shift_arg(as);
1208 i++; /* Account for the argument pairs */
1210 rs->ti->error = "Wrong number of raid parameters given";
1215 * Parameters that take a string value are checked here.
1217 /* "raid10_format {near|offset|far} */
1218 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1219 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1220 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1223 if (!rt_is_raid10(rt)) {
1224 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1227 raid10_format = raid10_name_to_format(arg);
1228 if (raid10_format < 0) {
1229 rs->ti->error = "Invalid 'raid10_format' value given";
1230 return raid10_format;
1235 /* "journal_dev <dev>" */
1236 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
1238 struct md_rdev *jdev;
1240 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1241 rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
1244 if (!rt_is_raid456(rt)) {
1245 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
1248 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
1249 &rs->journal_dev.dev);
1251 rs->ti->error = "raid4/5/6 journal device lookup failure";
1254 jdev = &rs->journal_dev.rdev;
1256 jdev->mddev = &rs->md;
1257 jdev->bdev = rs->journal_dev.dev->bdev;
1258 jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
1259 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
1260 rs->ti->error = "No space for raid4/5/6 journal";
1263 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
1264 set_bit(Journal, &jdev->flags);
1268 /* "journal_mode <mode>" ("journal_dev" mandatory!) */
1269 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) {
1272 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1273 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'";
1276 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
1277 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed";
1280 r = dm_raid_journal_mode_to_md(arg);
1282 rs->ti->error = "Invalid 'journal_mode' argument";
1285 rs->journal_dev.mode = r;
1290 * Parameters with number values from here on.
1292 if (kstrtoint(arg, 10, &value) < 0) {
1293 rs->ti->error = "Bad numerical argument given in raid params";
1297 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1299 * "rebuild" is being passed in by userspace to provide
1300 * indexes of replaced devices and to set up additional
1301 * devices on raid level takeover.
1303 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1304 rs->ti->error = "Invalid rebuild index given";
1308 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1309 rs->ti->error = "rebuild for this index already given";
1313 rd = rs->dev + value;
1314 clear_bit(In_sync, &rd->rdev.flags);
1315 clear_bit(Faulty, &rd->rdev.flags);
1316 rd->rdev.recovery_offset = 0;
1317 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1318 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1319 if (!rt_is_raid1(rt)) {
1320 rs->ti->error = "write_mostly option is only valid for RAID1";
1324 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1325 rs->ti->error = "Invalid write_mostly index given";
1330 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1331 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1332 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1333 if (!rt_is_raid1(rt)) {
1334 rs->ti->error = "max_write_behind option is only valid for RAID1";
1338 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1339 rs->ti->error = "Only one max_write_behind argument pair allowed";
1344 * In device-mapper, we specify things in sectors, but
1345 * MD records this value in kB
1348 if (value > COUNTER_MAX) {
1349 rs->ti->error = "Max write-behind limit out of range";
1353 rs->md.bitmap_info.max_write_behind = value;
1354 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1355 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1356 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1359 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
1360 rs->ti->error = "daemon sleep period out of range";
1363 rs->md.bitmap_info.daemon_sleep = value;
1364 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1365 /* Userspace passes new data_offset after having extended the the data image LV */
1366 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1367 rs->ti->error = "Only one data_offset argument pair allowed";
1370 /* Ensure sensible data offset */
1372 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1373 rs->ti->error = "Bogus data_offset value";
1376 rs->data_offset = value;
1377 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1378 /* Define the +/-# of disks to add to/remove from the given raid set */
1379 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1380 rs->ti->error = "Only one delta_disks argument pair allowed";
1383 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
1384 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1385 rs->ti->error = "Too many delta_disk requested";
1389 rs->delta_disks = value;
1390 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1391 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1392 rs->ti->error = "Only one stripe_cache argument pair allowed";
1396 if (!rt_is_raid456(rt)) {
1397 rs->ti->error = "Inappropriate argument: stripe_cache";
1401 rs->stripe_cache_entries = value;
1402 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1403 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1404 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1407 if (value > INT_MAX) {
1408 rs->ti->error = "min_recovery_rate out of range";
1411 rs->md.sync_speed_min = (int)value;
1412 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1413 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
1414 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1417 if (value > INT_MAX) {
1418 rs->ti->error = "max_recovery_rate out of range";
1421 rs->md.sync_speed_max = (int)value;
1422 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1423 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1424 rs->ti->error = "Only one region_size argument pair allowed";
1428 region_size = value;
1429 rs->requested_bitmap_chunk_sectors = value;
1430 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1431 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1432 rs->ti->error = "Only one raid10_copies argument pair allowed";
1436 if (!__within_range(value, 2, rs->md.raid_disks)) {
1437 rs->ti->error = "Bad value for 'raid10_copies'";
1441 raid10_copies = value;
1443 DMERR("Unable to parse RAID parameter: %s", key);
1444 rs->ti->error = "Unable to parse RAID parameter";
1449 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1450 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1451 rs->ti->error = "sync and nosync are mutually exclusive";
1455 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
1456 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
1457 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
1458 rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
1462 if (write_mostly >= rs->md.raid_disks) {
1463 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1467 if (validate_region_size(rs, region_size))
1470 if (rs->md.chunk_sectors)
1471 max_io_len = rs->md.chunk_sectors;
1473 max_io_len = region_size;
1475 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1478 if (rt_is_raid10(rt)) {
1479 if (raid10_copies > rs->md.raid_disks) {
1480 rs->ti->error = "Not enough devices to satisfy specification";
1484 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1485 if (rs->md.new_layout < 0) {
1486 rs->ti->error = "Error getting raid10 format";
1487 return rs->md.new_layout;
1490 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1492 rs->ti->error = "Failed to recognize new raid10 layout";
1496 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1497 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1498 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1499 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1504 rs->raid10_copies = raid10_copies;
1506 /* Assume there are no metadata devices until the drives are parsed */
1507 rs->md.persistent = 0;
1508 rs->md.external = 1;
1510 /* Check, if any invalid ctr arguments have been passed in for the raid level */
1511 return rs_check_for_valid_flags(rs);
1514 /* Set raid4/5/6 cache size */
1515 static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1518 struct r5conf *conf;
1519 struct mddev *mddev = &rs->md;
1520 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1521 uint32_t nr_stripes = rs->stripe_cache_entries;
1523 if (!rt_is_raid456(rs->raid_type)) {
1524 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1528 if (nr_stripes < min_stripes) {
1529 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1530 nr_stripes, min_stripes);
1531 nr_stripes = min_stripes;
1534 conf = mddev->private;
1536 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1540 /* Try setting number of stripes in raid456 stripe cache */
1541 if (conf->min_nr_stripes != nr_stripes) {
1542 r = raid5_set_cache_size(mddev, nr_stripes);
1544 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1548 DMINFO("%u stripe cache entries", nr_stripes);
1554 /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
1555 static unsigned int mddev_data_stripes(struct raid_set *rs)
1557 return rs->md.raid_disks - rs->raid_type->parity_devs;
1560 /* Return # of data stripes of @rs (i.e. as of ctr) */
1561 static unsigned int rs_data_stripes(struct raid_set *rs)
1563 return rs->raid_disks - rs->raid_type->parity_devs;
1567 * Retrieve rdev->sectors from any valid raid device of @rs
1568 * to allow userpace to pass in arbitray "- -" device tupples.
1570 static sector_t __rdev_sectors(struct raid_set *rs)
1574 for (i = 0; i < rs->raid_disks; i++) {
1575 struct md_rdev *rdev = &rs->dev[i].rdev;
1577 if (!test_bit(Journal, &rdev->flags) &&
1578 rdev->bdev && rdev->sectors)
1579 return rdev->sectors;
1585 /* Calculate the sectors per device and per array used for @rs */
1586 static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
1589 unsigned int data_stripes;
1590 struct mddev *mddev = &rs->md;
1591 struct md_rdev *rdev;
1592 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
1595 delta_disks = mddev->delta_disks;
1596 data_stripes = mddev_data_stripes(rs);
1598 delta_disks = rs->delta_disks;
1599 data_stripes = rs_data_stripes(rs);
1602 /* Special raid1 case w/o delta_disks support (yet) */
1603 if (rt_is_raid1(rs->raid_type))
1605 else if (rt_is_raid10(rs->raid_type)) {
1606 if (rs->raid10_copies < 2 ||
1608 rs->ti->error = "Bogus raid10 data copies or delta disks";
1612 dev_sectors *= rs->raid10_copies;
1613 if (sector_div(dev_sectors, data_stripes))
1616 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1617 if (sector_div(array_sectors, rs->raid10_copies))
1620 } else if (sector_div(dev_sectors, data_stripes))
1624 /* Striped layouts */
1625 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1627 rdev_for_each(rdev, mddev)
1628 if (!test_bit(Journal, &rdev->flags))
1629 rdev->sectors = dev_sectors;
1631 mddev->array_sectors = array_sectors;
1632 mddev->dev_sectors = dev_sectors;
1636 rs->ti->error = "Target length not divisible by number of data devices";
1640 /* Setup recovery on @rs */
1641 static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1643 /* raid0 does not recover */
1644 if (rs_is_raid0(rs))
1645 rs->md.recovery_cp = MaxSector;
1647 * A raid6 set has to be recovered either
1648 * completely or for the grown part to
1649 * ensure proper parity and Q-Syndrome
1651 else if (rs_is_raid6(rs))
1652 rs->md.recovery_cp = dev_sectors;
1654 * Other raid set types may skip recovery
1655 * depending on the 'nosync' flag.
1658 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1659 ? MaxSector : dev_sectors;
1662 /* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
1663 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1666 /* New raid set or 'sync' flag provided */
1667 __rs_setup_recovery(rs, 0);
1668 else if (dev_sectors == MaxSector)
1669 /* Prevent recovery */
1670 __rs_setup_recovery(rs, MaxSector);
1671 else if (__rdev_sectors(rs) < dev_sectors)
1672 /* Grown raid set */
1673 __rs_setup_recovery(rs, __rdev_sectors(rs));
1675 __rs_setup_recovery(rs, MaxSector);
1678 static void do_table_event(struct work_struct *ws)
1680 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1682 smp_rmb(); /* Make sure we access most actual mddev properties */
1683 if (!rs_is_reshaping(rs)) {
1684 if (rs_is_raid10(rs))
1685 rs_set_rdev_sectors(rs);
1686 rs_set_capacity(rs);
1688 dm_table_event(rs->ti->table);
1691 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
1693 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
1695 return mddev_congested(&rs->md, bits);
1699 * Make sure a valid takover (level switch) is being requested on @rs
1701 * Conversions of raid sets from one MD personality to another
1702 * have to conform to restrictions which are enforced here.
1704 static int rs_check_takeover(struct raid_set *rs)
1706 struct mddev *mddev = &rs->md;
1707 unsigned int near_copies;
1709 if (rs->md.degraded) {
1710 rs->ti->error = "Can't takeover degraded raid set";
1714 if (rs_is_reshaping(rs)) {
1715 rs->ti->error = "Can't takeover reshaping raid set";
1719 switch (mddev->level) {
1721 /* raid0 -> raid1/5 with one disk */
1722 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1723 mddev->raid_disks == 1)
1726 /* raid0 -> raid10 */
1727 if (mddev->new_level == 10 &&
1728 !(rs->raid_disks % mddev->raid_disks))
1731 /* raid0 with multiple disks -> raid4/5/6 */
1732 if (__within_range(mddev->new_level, 4, 6) &&
1733 mddev->new_layout == ALGORITHM_PARITY_N &&
1734 mddev->raid_disks > 1)
1740 /* Can't takeover raid10_offset! */
1741 if (__is_raid10_offset(mddev->layout))
1744 near_copies = __raid10_near_copies(mddev->layout);
1746 /* raid10* -> raid0 */
1747 if (mddev->new_level == 0) {
1748 /* Can takeover raid10_near with raid disks divisable by data copies! */
1749 if (near_copies > 1 &&
1750 !(mddev->raid_disks % near_copies)) {
1751 mddev->raid_disks /= near_copies;
1752 mddev->delta_disks = mddev->raid_disks;
1756 /* Can takeover raid10_far */
1757 if (near_copies == 1 &&
1758 __raid10_far_copies(mddev->layout) > 1)
1764 /* raid10_{near,far} -> raid1 */
1765 if (mddev->new_level == 1 &&
1766 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1769 /* raid10_{near,far} with 2 disks -> raid4/5 */
1770 if (__within_range(mddev->new_level, 4, 5) &&
1771 mddev->raid_disks == 2)
1776 /* raid1 with 2 disks -> raid4/5 */
1777 if (__within_range(mddev->new_level, 4, 5) &&
1778 mddev->raid_disks == 2) {
1779 mddev->degraded = 1;
1783 /* raid1 -> raid0 */
1784 if (mddev->new_level == 0 &&
1785 mddev->raid_disks == 1)
1788 /* raid1 -> raid10 */
1789 if (mddev->new_level == 10)
1794 /* raid4 -> raid0 */
1795 if (mddev->new_level == 0)
1798 /* raid4 -> raid1/5 with 2 disks */
1799 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1800 mddev->raid_disks == 2)
1803 /* raid4 -> raid5/6 with parity N */
1804 if (__within_range(mddev->new_level, 5, 6) &&
1805 mddev->layout == ALGORITHM_PARITY_N)
1810 /* raid5 with parity N -> raid0 */
1811 if (mddev->new_level == 0 &&
1812 mddev->layout == ALGORITHM_PARITY_N)
1815 /* raid5 with parity N -> raid4 */
1816 if (mddev->new_level == 4 &&
1817 mddev->layout == ALGORITHM_PARITY_N)
1820 /* raid5 with 2 disks -> raid1/4/10 */
1821 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1822 mddev->raid_disks == 2)
1825 /* raid5_* -> raid6_*_6 with Q-Syndrome N (e.g. raid5_ra -> raid6_ra_6 */
1826 if (mddev->new_level == 6 &&
1827 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1828 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1833 /* raid6 with parity N -> raid0 */
1834 if (mddev->new_level == 0 &&
1835 mddev->layout == ALGORITHM_PARITY_N)
1838 /* raid6 with parity N -> raid4 */
1839 if (mddev->new_level == 4 &&
1840 mddev->layout == ALGORITHM_PARITY_N)
1843 /* raid6_*_n with Q-Syndrome N -> raid5_* */
1844 if (mddev->new_level == 5 &&
1845 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1846 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1853 rs->ti->error = "takeover not possible";
1857 /* True if @rs requested to be taken over */
1858 static bool rs_takeover_requested(struct raid_set *rs)
1860 return rs->md.new_level != rs->md.level;
1863 /* True if @rs is requested to reshape by ctr */
1864 static bool rs_reshape_requested(struct raid_set *rs)
1867 struct mddev *mddev = &rs->md;
1869 if (rs_takeover_requested(rs))
1875 change = mddev->new_layout != mddev->layout ||
1876 mddev->new_chunk_sectors != mddev->chunk_sectors ||
1879 /* Historical case to support raid1 reshape without delta disks */
1880 if (mddev->level == 1) {
1881 if (rs->delta_disks)
1882 return !!rs->delta_disks;
1885 mddev->raid_disks != rs->raid_disks;
1888 if (mddev->level == 10)
1890 !__is_raid10_far(mddev->new_layout) &&
1891 rs->delta_disks >= 0;
1897 #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */
1899 /* State flags for sb->flags */
1900 #define SB_FLAG_RESHAPE_ACTIVE 0x1
1901 #define SB_FLAG_RESHAPE_BACKWARDS 0x2
1904 * This structure is never routinely used by userspace, unlike md superblocks.
1905 * Devices with this superblock should only ever be accessed via device-mapper.
1907 #define DM_RAID_MAGIC 0x64526D44
1908 struct dm_raid_superblock {
1909 __le32 magic; /* "DmRd" */
1910 __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */
1912 __le32 num_devices; /* Number of devices in this raid set. (Max 64) */
1913 __le32 array_position; /* The position of this drive in the raid set */
1915 __le64 events; /* Incremented by md when superblock updated */
1916 __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */
1917 /* indicate failures (see extension below) */
1920 * This offset tracks the progress of the repair or replacement of
1921 * an individual drive.
1923 __le64 disk_recovery_offset;
1926 * This offset tracks the progress of the initial raid set
1927 * synchronisation/parity calculation.
1929 __le64 array_resync_offset;
1932 * raid characteristics
1936 __le32 stripe_sectors;
1938 /********************************************************************
1939 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
1941 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
1944 __le32 flags; /* Flags defining array states for reshaping */
1947 * This offset tracks the progress of a raid
1948 * set reshape in order to be able to restart it
1950 __le64 reshape_position;
1953 * These define the properties of the array in case of an interrupted reshape
1957 __le32 new_stripe_sectors;
1960 __le64 array_sectors; /* Array size in sectors */
1963 * Sector offsets to data on devices (reshaping).
1964 * Needed to support out of place reshaping, thus
1965 * not writing over any stripes whilst converting
1966 * them from old to new layout
1969 __le64 new_data_offset;
1971 __le64 sectors; /* Used device size in sectors */
1974 * Additonal Bit field of devices indicating failures to support
1975 * up to 256 devices with the 1.9.0 on-disk metadata format
1977 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
1979 __le32 incompat_features; /* Used to indicate any incompatible features */
1981 /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */
1985 * Check for reshape constraints on raid set @rs:
1987 * - reshape function non-existent
1989 * - ongoing recovery
1992 * Returns 0 if none or -EPERM if given constraint
1993 * and error message reference in @errmsg
1995 static int rs_check_reshape(struct raid_set *rs)
1997 struct mddev *mddev = &rs->md;
1999 if (!mddev->pers || !mddev->pers->check_reshape)
2000 rs->ti->error = "Reshape not supported";
2001 else if (mddev->degraded)
2002 rs->ti->error = "Can't reshape degraded raid set";
2003 else if (rs_is_recovering(rs))
2004 rs->ti->error = "Convert request on recovering raid set prohibited";
2005 else if (rs_is_reshaping(rs))
2006 rs->ti->error = "raid set already reshaping!";
2007 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
2008 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
2015 static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2017 BUG_ON(!rdev->sb_page);
2019 if (rdev->sb_loaded && !force_reload)
2022 rdev->sb_loaded = 0;
2024 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
2025 DMERR("Failed to read superblock of device at position %d",
2027 md_error(rdev->mddev, rdev);
2028 set_bit(Faulty, &rdev->flags);
2032 rdev->sb_loaded = 1;
2037 static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2039 failed_devices[0] = le64_to_cpu(sb->failed_devices);
2040 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
2042 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2043 int i = ARRAY_SIZE(sb->extended_failed_devices);
2046 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
2050 static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2052 int i = ARRAY_SIZE(sb->extended_failed_devices);
2054 sb->failed_devices = cpu_to_le64(failed_devices[0]);
2056 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
2060 * Synchronize the superblock members with the raid set properties
2062 * All superblock data is little endian.
2064 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2066 bool update_failed_devices = false;
2068 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2069 struct dm_raid_superblock *sb;
2070 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2072 /* No metadata device, no superblock */
2073 if (!rdev->meta_bdev)
2076 BUG_ON(!rdev->sb_page);
2078 sb = page_address(rdev->sb_page);
2080 sb_retrieve_failed_devices(sb, failed_devices);
2082 for (i = 0; i < rs->raid_disks; i++)
2083 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2084 update_failed_devices = true;
2085 set_bit(i, (void *) failed_devices);
2088 if (update_failed_devices)
2089 sb_update_failed_devices(sb, failed_devices);
2091 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
2092 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2094 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2095 sb->array_position = cpu_to_le32(rdev->raid_disk);
2097 sb->events = cpu_to_le64(mddev->events);
2099 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2100 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2102 sb->level = cpu_to_le32(mddev->level);
2103 sb->layout = cpu_to_le32(mddev->layout);
2104 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2106 /********************************************************************
2107 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
2109 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
2111 sb->new_level = cpu_to_le32(mddev->new_level);
2112 sb->new_layout = cpu_to_le32(mddev->new_layout);
2113 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2115 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2117 smp_rmb(); /* Make sure we access most recent reshape position */
2118 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2119 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
2120 /* Flag ongoing reshape */
2121 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
2123 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2124 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
2126 /* Clear reshape flags */
2127 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
2130 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2131 sb->data_offset = cpu_to_le64(rdev->data_offset);
2132 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2133 sb->sectors = cpu_to_le64(rdev->sectors);
2134 sb->incompat_features = cpu_to_le32(0);
2136 /* Zero out the rest of the payload after the size of the superblock */
2137 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2143 * This function creates a superblock if one is not found on the device
2144 * and will decide which superblock to use if there's a choice.
2146 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
2148 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2151 struct dm_raid_superblock *sb;
2152 struct dm_raid_superblock *refsb;
2153 uint64_t events_sb, events_refsb;
2155 r = read_disk_sb(rdev, rdev->sb_size, false);
2159 sb = page_address(rdev->sb_page);
2162 * Two cases that we want to write new superblocks and rebuild:
2163 * 1) New device (no matching magic number)
2164 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
2166 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
2167 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2168 super_sync(rdev->mddev, rdev);
2170 set_bit(FirstUse, &rdev->flags);
2171 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2173 /* Force writing of superblocks to disk */
2174 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2176 /* Any superblock is better than none, choose that if given */
2177 return refdev ? 0 : 1;
2183 events_sb = le64_to_cpu(sb->events);
2185 refsb = page_address(refdev->sb_page);
2186 events_refsb = le64_to_cpu(refsb->events);
2188 return (events_sb > events_refsb) ? 1 : 0;
2191 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2195 struct mddev *mddev = &rs->md;
2197 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2198 struct dm_raid_superblock *sb;
2199 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
2201 struct dm_raid_superblock *sb2;
2203 sb = page_address(rdev->sb_page);
2204 events_sb = le64_to_cpu(sb->events);
2207 * Initialise to 1 if this is a new superblock.
2209 mddev->events = events_sb ? : 1;
2211 mddev->reshape_position = MaxSector;
2213 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2214 mddev->level = le32_to_cpu(sb->level);
2215 mddev->layout = le32_to_cpu(sb->layout);
2216 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2219 * Reshaping is supported, e.g. reshape_position is valid
2220 * in superblock and superblock content is authoritative.
2222 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2223 /* Superblock is authoritative wrt given raid set layout! */
2224 mddev->new_level = le32_to_cpu(sb->new_level);
2225 mddev->new_layout = le32_to_cpu(sb->new_layout);
2226 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2227 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2228 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2230 /* raid was reshaping and got interrupted */
2231 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
2232 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
2233 DMERR("Reshape requested but raid set is still reshaping");
2237 if (mddev->delta_disks < 0 ||
2238 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2239 mddev->reshape_backwards = 1;
2241 mddev->reshape_backwards = 0;
2243 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2244 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2249 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
2251 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2252 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2254 if (rs_takeover_requested(rs)) {
2255 if (rt_cur && rt_new)
2256 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
2257 rt_cur->name, rt_new->name);
2259 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
2261 } else if (rs_reshape_requested(rs)) {
2262 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
2263 if (mddev->layout != mddev->new_layout) {
2264 if (rt_cur && rt_new)
2265 DMERR(" current layout %s vs new layout %s",
2266 rt_cur->name, rt_new->name);
2268 DMERR(" current layout 0x%X vs new layout 0x%X",
2269 le32_to_cpu(sb->layout), mddev->new_layout);
2271 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2272 DMERR(" current stripe sectors %u vs new stripe sectors %u",
2273 mddev->chunk_sectors, mddev->new_chunk_sectors);
2274 if (rs->delta_disks)
2275 DMERR(" current %u disks vs new %u disks",
2276 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2277 if (rs_is_raid10(rs)) {
2278 DMERR(" Old layout: %s w/ %u copies",
2279 raid10_md_layout_to_format(mddev->layout),
2280 raid10_md_layout_to_copies(mddev->layout));
2281 DMERR(" New layout: %s w/ %u copies",
2282 raid10_md_layout_to_format(mddev->new_layout),
2283 raid10_md_layout_to_copies(mddev->new_layout));
2288 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2291 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2292 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2295 * During load, we set FirstUse if a new superblock was written.
2296 * There are two reasons we might not have a superblock:
2297 * 1) The raid set is brand new - in which case, all of the
2298 * devices must have their In_sync bit set. Also,
2299 * recovery_cp must be 0, unless forced.
2300 * 2) This is a new device being added to an old raid set
2301 * and the new device needs to be rebuilt - in which
2302 * case the In_sync bit will /not/ be set and
2303 * recovery_cp must be MaxSector.
2304 * 3) This is/are a new device(s) being added to an old
2305 * raid set during takeover to a higher raid level
2306 * to provide capacity for redundancy or during reshape
2307 * to add capacity to grow the raid set.
2310 rdev_for_each(r, mddev) {
2311 if (test_bit(Journal, &rdev->flags))
2314 if (test_bit(FirstUse, &r->flags))
2317 if (!test_bit(In_sync, &r->flags)) {
2318 DMINFO("Device %d specified for rebuild; clearing superblock",
2322 if (test_bit(FirstUse, &r->flags))
2329 if (new_devs == rs->raid_disks || !rebuilds) {
2330 /* Replace a broken device */
2331 if (new_devs == 1 && !rs->delta_disks)
2333 if (new_devs == rs->raid_disks) {
2334 DMINFO("Superblocks created for new raid set");
2335 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2336 } else if (new_devs != rebuilds &&
2337 new_devs != rs->delta_disks) {
2338 DMERR("New device injected into existing raid set without "
2339 "'delta_disks' or 'rebuild' parameter specified");
2342 } else if (new_devs && new_devs != rebuilds) {
2343 DMERR("%u 'rebuild' devices cannot be injected into"
2344 " a raid set with %u other first-time devices",
2345 rebuilds, new_devs);
2347 } else if (rebuilds) {
2348 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2349 DMERR("new device%s provided without 'rebuild'",
2350 new_devs > 1 ? "s" : "");
2352 } else if (rs_is_recovering(rs)) {
2353 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2354 (unsigned long long) mddev->recovery_cp);
2356 } else if (rs_is_reshaping(rs)) {
2357 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2358 (unsigned long long) mddev->reshape_position);
2364 * Now we set the Faulty bit for those devices that are
2365 * recorded in the superblock as failed.
2367 sb_retrieve_failed_devices(sb, failed_devices);
2368 rdev_for_each(r, mddev) {
2369 if (test_bit(Journal, &rdev->flags) ||
2372 sb2 = page_address(r->sb_page);
2373 sb2->failed_devices = 0;
2374 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2377 * Check for any device re-ordering.
2379 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2380 role = le32_to_cpu(sb2->array_position);
2384 if (role != r->raid_disk) {
2385 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2386 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2387 rs->raid_disks % rs->raid10_copies) {
2389 "Cannot change raid10 near set to odd # of devices!";
2393 sb2->array_position = cpu_to_le32(r->raid_disk);
2395 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2396 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2397 !rt_is_raid1(rs->raid_type)) {
2398 rs->ti->error = "Cannot change device positions in raid set";
2402 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2406 * Partial recovery is performed on
2407 * returning failed devices.
2409 if (test_bit(role, (void *) failed_devices))
2410 set_bit(Faulty, &r->flags);
2417 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2419 struct mddev *mddev = &rs->md;
2420 struct dm_raid_superblock *sb;
2422 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2425 sb = page_address(rdev->sb_page);
2428 * If mddev->events is not set, we know we have not yet initialized
2431 if (!mddev->events && super_init_validation(rs, rdev))
2434 if (le32_to_cpu(sb->compat_features) &&
2435 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2436 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2440 if (sb->incompat_features) {
2441 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2445 /* Enable bitmap creation for RAID levels != 0 */
2446 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2447 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2449 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2451 * Retrieve rdev size stored in superblock to be prepared for shrink.
2452 * Check extended superblock members are present otherwise the size
2455 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2456 rdev->sectors = le64_to_cpu(sb->sectors);
2458 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2459 if (rdev->recovery_offset == MaxSector)
2460 set_bit(In_sync, &rdev->flags);
2462 * If no reshape in progress -> we're recovering single
2463 * disk(s) and have to set the device(s) to out-of-sync
2465 else if (!rs_is_reshaping(rs))
2466 clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
2470 * If a device comes back, set it as not In_sync and no longer faulty.
2472 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2473 rdev->recovery_offset = 0;
2474 clear_bit(In_sync, &rdev->flags);
2475 rdev->saved_raid_disk = rdev->raid_disk;
2478 /* Reshape support -> restore repective data offsets */
2479 rdev->data_offset = le64_to_cpu(sb->data_offset);
2480 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2486 * Analyse superblocks and select the freshest.
2488 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2491 struct md_rdev *rdev, *freshest;
2492 struct mddev *mddev = &rs->md;
2495 rdev_for_each(rdev, mddev) {
2496 if (test_bit(Journal, &rdev->flags))
2499 if (!rdev->meta_bdev)
2502 /* Set superblock offset/size for metadata device. */
2504 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2505 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2506 DMERR("superblock size of a logical block is no longer valid");
2511 * Skipping super_load due to CTR_FLAG_SYNC will cause
2512 * the array to undergo initialization again as
2513 * though it were new. This is the intended effect
2514 * of the "sync" directive.
2516 * With reshaping capability added, we must ensure that
2517 * that the "sync" directive is disallowed during the reshape.
2519 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2522 r = super_load(rdev, freshest);
2531 /* This is a failure to read the superblock from the metadata device. */
2533 * We have to keep any raid0 data/metadata device pairs or
2534 * the MD raid0 personality will fail to start the array.
2536 if (rs_is_raid0(rs))
2540 * We keep the dm_devs to be able to emit the device tuple
2541 * properly on the table line in raid_status() (rather than
2542 * mistakenly acting as if '- -' got passed into the constructor).
2544 * The rdev has to stay on the same_set list to allow for
2545 * the attempt to restore faulty devices on second resume.
2547 rdev->raid_disk = rdev->saved_raid_disk = -1;
2556 * Validation of the freshest device provides the source of
2557 * validation for the remaining devices.
2559 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2560 if (super_validate(rs, freshest))
2563 if (validate_raid_redundancy(rs)) {
2564 rs->ti->error = "Insufficient redundancy to activate array";
2568 rdev_for_each(rdev, mddev)
2569 if (!test_bit(Journal, &rdev->flags) &&
2571 super_validate(rs, rdev))
2577 * Adjust data_offset and new_data_offset on all disk members of @rs
2578 * for out of place reshaping if requested by contructor
2580 * We need free space at the beginning of each raid disk for forward
2581 * and at the end for backward reshapes which userspace has to provide
2582 * via remapping/reordering of space.
2584 static int rs_adjust_data_offsets(struct raid_set *rs)
2586 sector_t data_offset = 0, new_data_offset = 0;
2587 struct md_rdev *rdev;
2589 /* Constructor did not request data offset change */
2590 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2591 if (!rs_is_reshapable(rs))
2597 /* HM FIXME: get InSync raid_dev? */
2598 rdev = &rs->dev[0].rdev;
2600 if (rs->delta_disks < 0) {
2602 * Removing disks (reshaping backwards):
2604 * - before reshape: data is at offset 0 and free space
2605 * is at end of each component LV
2607 * - after reshape: data is at offset rs->data_offset != 0 on each component LV
2610 new_data_offset = rs->data_offset;
2612 } else if (rs->delta_disks > 0) {
2614 * Adding disks (reshaping forwards):
2616 * - before reshape: data is at offset rs->data_offset != 0 and
2617 * free space is at begin of each component LV
2619 * - after reshape: data is at offset 0 on each component LV
2621 data_offset = rs->data_offset;
2622 new_data_offset = 0;
2626 * User space passes in 0 for data offset after having removed reshape space
2628 * - or - (data offset != 0)
2630 * Changing RAID layout or chunk size -> toggle offsets
2632 * - before reshape: data is at offset rs->data_offset 0 and
2633 * free space is at end of each component LV
2635 * data is at offset rs->data_offset != 0 and
2636 * free space is at begin of each component LV
2638 * - after reshape: data is at offset 0 if it was at offset != 0
2639 * or at offset != 0 if it was at offset 0
2640 * on each component LV
2643 data_offset = rs->data_offset ? rdev->data_offset : 0;
2644 new_data_offset = data_offset ? 0 : rs->data_offset;
2645 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2649 * Make sure we got a minimum amount of free sectors per device
2651 if (rs->data_offset &&
2652 to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
2653 rs->ti->error = data_offset ? "No space for forward reshape" :
2654 "No space for backward reshape";
2658 /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
2659 rdev_for_each(rdev, &rs->md) {
2660 if (!test_bit(Journal, &rdev->flags)) {
2661 rdev->data_offset = data_offset;
2662 rdev->new_data_offset = new_data_offset;
2669 /* Userpace reordered disks -> adjust raid_disk indexes in @rs */
2670 static void __reorder_raid_disk_indexes(struct raid_set *rs)
2673 struct md_rdev *rdev;
2675 rdev_for_each(rdev, &rs->md) {
2676 if (!test_bit(Journal, &rdev->flags)) {
2677 rdev->raid_disk = i++;
2678 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2684 * Setup @rs for takeover by a different raid level
2686 static int rs_setup_takeover(struct raid_set *rs)
2688 struct mddev *mddev = &rs->md;
2689 struct md_rdev *rdev;
2690 unsigned int d = mddev->raid_disks = rs->raid_disks;
2691 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2693 if (rt_is_raid10(rs->raid_type)) {
2694 if (mddev->level == 0) {
2695 /* Userpace reordered disks -> adjust raid_disk indexes */
2696 __reorder_raid_disk_indexes(rs);
2698 /* raid0 -> raid10_far layout */
2699 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2701 } else if (mddev->level == 1)
2702 /* raid1 -> raid10_near layout */
2703 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2710 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2711 mddev->recovery_cp = MaxSector;
2714 rdev = &rs->dev[d].rdev;
2716 if (test_bit(d, (void *) rs->rebuild_disks)) {
2717 clear_bit(In_sync, &rdev->flags);
2718 clear_bit(Faulty, &rdev->flags);
2719 mddev->recovery_cp = rdev->recovery_offset = 0;
2720 /* Bitmap has to be created when we do an "up" takeover */
2721 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2724 rdev->new_data_offset = new_data_offset;
2730 /* Prepare @rs for reshape */
2731 static int rs_prepare_reshape(struct raid_set *rs)
2734 struct mddev *mddev = &rs->md;
2736 if (rs_is_raid10(rs)) {
2737 if (rs->raid_disks != mddev->raid_disks &&
2738 __is_raid10_near(mddev->layout) &&
2739 rs->raid10_copies &&
2740 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2742 * raid disk have to be multiple of data copies to allow this conversion,
2744 * This is actually not a reshape it is a
2745 * rebuild of any additional mirrors per group
2747 if (rs->raid_disks % rs->raid10_copies) {
2748 rs->ti->error = "Can't reshape raid10 mirror groups";
2752 /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */
2753 __reorder_raid_disk_indexes(rs);
2754 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2756 mddev->new_layout = mddev->layout;
2761 } else if (rs_is_raid456(rs))
2764 else if (rs_is_raid1(rs)) {
2765 if (rs->delta_disks) {
2766 /* Process raid1 via delta_disks */
2767 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2770 /* Process raid1 without delta_disks */
2771 mddev->raid_disks = rs->raid_disks;
2775 rs->ti->error = "Called with bogus raid type";
2780 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2781 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2782 } else if (mddev->raid_disks < rs->raid_disks)
2783 /* Create new superblocks and bitmaps, if any new disks */
2784 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2791 * - change raid layout
2792 * - change chunk size
2796 static int rs_setup_reshape(struct raid_set *rs)
2799 unsigned int cur_raid_devs, d;
2800 struct mddev *mddev = &rs->md;
2801 struct md_rdev *rdev;
2803 mddev->delta_disks = rs->delta_disks;
2804 cur_raid_devs = mddev->raid_disks;
2806 /* Ignore impossible layout change whilst adding/removing disks */
2807 if (mddev->delta_disks &&
2808 mddev->layout != mddev->new_layout) {
2809 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2810 mddev->new_layout = mddev->layout;
2814 * Adjust array size:
2816 * - in case of adding disks, array size has
2817 * to grow after the disk adding reshape,
2818 * which'll hapen in the event handler;
2819 * reshape will happen forward, so space has to
2820 * be available at the beginning of each disk
2822 * - in case of removing disks, array size
2823 * has to shrink before starting the reshape,
2824 * which'll happen here;
2825 * reshape will happen backward, so space has to
2826 * be available at the end of each disk
2828 * - data_offset and new_data_offset are
2829 * adjusted for aforementioned out of place
2830 * reshaping based on userspace passing in
2831 * the "data_offset <sectors>" key/value
2832 * pair via the constructor
2836 if (rs->delta_disks > 0) {
2837 /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */
2838 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2839 rdev = &rs->dev[d].rdev;
2840 clear_bit(In_sync, &rdev->flags);
2843 * save_raid_disk needs to be -1, or recovery_offset will be set to 0
2844 * by md, which'll store that erroneously in the superblock on reshape
2846 rdev->saved_raid_disk = -1;
2847 rdev->raid_disk = d;
2849 rdev->sectors = mddev->dev_sectors;
2850 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2853 mddev->reshape_backwards = 0; /* adding disks -> forward reshape */
2855 /* Remove disk(s) */
2856 } else if (rs->delta_disks < 0) {
2857 r = rs_set_dev_and_array_sectors(rs, true);
2858 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
2860 /* Change layout and/or chunk size */
2863 * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size:
2865 * keeping number of disks and do layout change ->
2867 * toggle reshape_backward depending on data_offset:
2869 * - free space upfront -> reshape forward
2871 * - free space at the end -> reshape backward
2874 * This utilizes free reshape space avoiding the need
2875 * for userspace to move (parts of) LV segments in
2876 * case of layout/chunksize change (for disk
2877 * adding/removing reshape space has to be at
2878 * the proper address (see above with delta_disks):
2880 * add disk(s) -> begin
2881 * remove disk(s)-> end
2883 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2890 * Enable/disable discard support on RAID set depending on
2891 * RAID level and discard properties of underlying RAID members.
2893 static void configure_discard_support(struct raid_set *rs)
2897 struct dm_target *ti = rs->ti;
2899 /* Assume discards not supported until after checks below. */
2900 ti->discards_supported = false;
2903 * XXX: RAID level 4,5,6 require zeroing for safety.
2905 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
2907 for (i = 0; i < rs->raid_disks; i++) {
2908 struct request_queue *q;
2910 if (!rs->dev[i].rdev.bdev)
2913 q = bdev_get_queue(rs->dev[i].rdev.bdev);
2914 if (!q || !blk_queue_discard(q))
2918 if (!devices_handle_discard_safely) {
2919 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2920 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2926 /* All RAID members properly support discards */
2927 ti->discards_supported = true;
2930 * RAID1 and RAID10 personalities require bio splitting,
2931 * RAID0/4/5/6 don't and process large discard bios properly.
2933 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
2934 ti->num_discard_bios = 1;
2938 * Construct a RAID0/1/10/4/5/6 mapping:
2940 * <raid_type> <#raid_params> <raid_params>{0,} \
2941 * <#raid_devs> [<meta_dev1> <dev1>]{1,}
2943 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
2944 * details on possible <raid_params>.
2946 * Userspace is free to initialize the metadata devices, hence the superblocks to
2947 * enforce recreation based on the passed in table parameters.
2950 static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2954 struct raid_type *rt;
2955 unsigned int num_raid_params, num_raid_devs;
2956 sector_t calculated_dev_sectors, rdev_sectors;
2957 struct raid_set *rs = NULL;
2959 struct rs_layout rs_layout;
2960 struct dm_arg_set as = { argc, argv }, as_nrd;
2961 struct dm_arg _args[] = {
2962 { 0, as.argc, "Cannot understand number of raid parameters" },
2963 { 1, 254, "Cannot understand number of raid devices parameters" }
2966 /* Must have <raid_type> */
2967 arg = dm_shift_arg(&as);
2969 ti->error = "No arguments";
2973 rt = get_raid_type(arg);
2975 ti->error = "Unrecognised raid_type";
2979 /* Must have <#raid_params> */
2980 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
2983 /* number of raid device tupples <meta_dev data_dev> */
2985 dm_consume_args(&as_nrd, num_raid_params);
2986 _args[1].max = (as_nrd.argc - 1) / 2;
2987 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
2990 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
2991 ti->error = "Invalid number of supplied raid devices";
2995 rs = raid_set_alloc(ti, rt, num_raid_devs);
2999 r = parse_raid_params(rs, &as, num_raid_params);
3003 r = parse_dev_params(rs, &as);
3007 rs->md.sync_super = super_sync;
3010 * Calculate ctr requested array and device sizes to allow
3011 * for superblock analysis needing device sizes defined.
3013 * Any existing superblock will overwrite the array and device sizes
3015 r = rs_set_dev_and_array_sectors(rs, false);
3019 calculated_dev_sectors = rs->md.dev_sectors;
3022 * Backup any new raid set level, layout, ...
3023 * requested to be able to compare to superblock
3024 * members for conversion decisions.
3026 rs_config_backup(rs, &rs_layout);
3028 r = analyse_superblocks(ti, rs);
3032 rdev_sectors = __rdev_sectors(rs);
3033 if (!rdev_sectors) {
3034 ti->error = "Invalid rdev size";
3039 resize = calculated_dev_sectors != rdev_sectors;
3041 INIT_WORK(&rs->md.event_work, do_table_event);
3043 ti->num_flush_bios = 1;
3045 /* Restore any requested new layout for conversion decision */
3046 rs_config_restore(rs, &rs_layout);
3049 * Now that we have any superblock metadata available,
3050 * check for new, recovering, reshaping, to be taken over,
3051 * to be reshaped or an existing, unchanged raid set to
3054 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
3055 /* A new raid6 set has to be recovered to ensure proper parity and Q-Syndrome */
3056 if (rs_is_raid6(rs) &&
3057 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
3058 ti->error = "'nosync' not allowed for new raid6 set";
3062 rs_setup_recovery(rs, 0);
3063 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3065 } else if (rs_is_recovering(rs)) {
3066 /* Rebuild particular devices */
3067 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3068 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3069 rs_setup_recovery(rs, MaxSector);
3071 /* A recovering raid set may be resized */
3072 ; /* skip setup rs */
3073 } else if (rs_is_reshaping(rs)) {
3074 /* Have to reject size change request during reshape */
3076 ti->error = "Can't resize a reshaping raid set";
3081 } else if (rs_takeover_requested(rs)) {
3082 if (rs_is_reshaping(rs)) {
3083 ti->error = "Can't takeover a reshaping raid set";
3088 /* We can't takeover a journaled raid4/5/6 */
3089 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3090 ti->error = "Can't takeover a journaled raid4/5/6 set";
3096 * If a takeover is needed, userspace sets any additional
3097 * devices to rebuild and we can check for a valid request here.
3099 * If acceptible, set the level to the new requested
3100 * one, prohibit requesting recovery, allow the raid
3101 * set to run and store superblocks during resume.
3103 r = rs_check_takeover(rs);
3107 r = rs_setup_takeover(rs);
3111 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3112 /* Takeover ain't recovery, so disable recovery */
3113 rs_setup_recovery(rs, MaxSector);
3115 } else if (rs_reshape_requested(rs)) {
3117 * No need to check for 'ongoing' takeover here, because takeover
3118 * is an instant operation as oposed to an ongoing reshape.
3121 /* We can't reshape a journaled raid4/5/6 */
3122 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3123 ti->error = "Can't reshape a journaled raid4/5/6 set";
3129 * We can only prepare for a reshape here, because the
3130 * raid set needs to run to provide the repective reshape
3131 * check functions via its MD personality instance.
3133 * So do the reshape check after md_run() succeeded.
3135 r = rs_prepare_reshape(rs);
3139 /* Reshaping ain't recovery, so disable recovery */
3140 rs_setup_recovery(rs, MaxSector);
3143 /* May not set recovery when a device rebuild is requested */
3144 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3145 rs_setup_recovery(rs, MaxSector);
3146 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3148 rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
3149 0 : (resize ? calculated_dev_sectors : MaxSector));
3153 /* If constructor requested it, change data and new_data offsets */
3154 r = rs_adjust_data_offsets(rs);
3158 /* Start raid set read-only and assumed clean to change in raid_resume() */
3161 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3163 /* Has to be held on running the array */
3164 mddev_lock_nointr(&rs->md);
3165 r = md_run(&rs->md);
3166 rs->md.in_sync = 0; /* Assume already marked dirty */
3169 ti->error = "Failed to run raid array";
3170 mddev_unlock(&rs->md);
3174 rs->callbacks.congested_fn = raid_is_congested;
3175 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
3177 /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */
3178 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3179 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3181 ti->error = "Failed to set raid4/5/6 journal mode";
3182 mddev_unlock(&rs->md);
3183 goto bad_journal_mode_set;
3187 mddev_suspend(&rs->md);
3188 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3190 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
3191 if (rs_is_raid456(rs)) {
3192 r = rs_set_raid456_stripe_cache(rs);
3194 goto bad_stripe_cache;
3197 /* Now do an early reshape check */
3198 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3199 r = rs_check_reshape(rs);
3201 goto bad_check_reshape;
3203 /* Restore new, ctr requested layout to perform check */
3204 rs_config_restore(rs, &rs_layout);
3206 if (rs->md.pers->start_reshape) {
3207 r = rs->md.pers->check_reshape(&rs->md);
3209 ti->error = "Reshape check failed";
3210 goto bad_check_reshape;
3215 /* Disable/enable discard support on raid set. */
3216 configure_discard_support(rs);
3218 mddev_unlock(&rs->md);
3221 bad_journal_mode_set:
3231 static void raid_dtr(struct dm_target *ti)
3233 struct raid_set *rs = ti->private;
3235 list_del_init(&rs->callbacks.list);
3240 static int raid_map(struct dm_target *ti, struct bio *bio)
3242 struct raid_set *rs = ti->private;
3243 struct mddev *mddev = &rs->md;
3246 * If we're reshaping to add disk(s)), ti->len and
3247 * mddev->array_sectors will differ during the process
3248 * (ti->len > mddev->array_sectors), so we have to requeue
3249 * bios with addresses > mddev->array_sectors here or
3250 * there will occur accesses past EOD of the component
3251 * data images thus erroring the raid set.
3253 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3254 return DM_MAPIO_REQUEUE;
3256 md_handle_request(mddev, bio);
3258 return DM_MAPIO_SUBMITTED;
3261 /* Return string describing the current sync action of @mddev */
3262 static const char *decipher_sync_action(struct mddev *mddev)
3264 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3267 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3268 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3269 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3272 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3273 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3275 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3280 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3288 * Return status string for @rdev
3290 * Status characters:
3292 * 'D' = Dead/Failed raid set component or raid4/5/6 journal device
3293 * 'a' = Alive but not in-sync raid set component _or_ alive raid4/5/6 'write_back' journal device
3294 * 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device
3295 * '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
3297 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, bool array_in_sync)
3301 else if (test_bit(Faulty, &rdev->flags))
3303 else if (test_bit(Journal, &rdev->flags))
3304 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
3305 else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
3311 /* Helper to return resync/reshape progress for @rs and @array_in_sync */
3312 static sector_t rs_get_progress(struct raid_set *rs,
3313 sector_t resync_max_sectors, bool *array_in_sync)
3315 sector_t r, curr_resync_completed;
3316 struct mddev *mddev = &rs->md;
3318 curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
3319 *array_in_sync = false;
3321 if (rs_is_raid0(rs)) {
3322 r = resync_max_sectors;
3323 *array_in_sync = true;
3326 r = mddev->reshape_position;
3328 /* Reshape is relative to the array size */
3329 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
3331 if (r == MaxSector) {
3332 *array_in_sync = true;
3333 r = resync_max_sectors;
3335 /* Got to reverse on backward reshape */
3336 if (mddev->reshape_backwards)
3337 r = mddev->array_sectors - r;
3339 /* Devide by # of data stripes */
3340 sector_div(r, mddev_data_stripes(rs));
3343 /* Sync is relative to the component device size */
3344 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3345 r = curr_resync_completed;
3347 r = mddev->recovery_cp;
3349 if ((r == MaxSector) ||
3350 (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
3351 (mddev->curr_resync_completed == resync_max_sectors))) {
3355 *array_in_sync = true;
3356 r = resync_max_sectors;
3357 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
3359 * If "check" or "repair" is occurring, the raid set has
3360 * undergone an initial sync and the health characters
3361 * should not be 'a' anymore.
3363 *array_in_sync = true;
3365 struct md_rdev *rdev;
3368 * The raid set may be doing an initial sync, or it may
3369 * be rebuilding individual components. If all the
3370 * devices are In_sync, then it is the raid set that is
3371 * being initialized.
3373 rdev_for_each(rdev, mddev)
3374 if (!test_bit(Journal, &rdev->flags) &&
3375 !test_bit(In_sync, &rdev->flags))
3376 *array_in_sync = true;
3378 r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
3386 /* Helper to return @dev name or "-" if !@dev */
3387 static const char *__get_dev_name(struct dm_dev *dev)
3389 return dev ? dev->name : "-";
3392 static void raid_status(struct dm_target *ti, status_type_t type,
3393 unsigned int status_flags, char *result, unsigned int maxlen)
3395 struct raid_set *rs = ti->private;
3396 struct mddev *mddev = &rs->md;
3397 struct r5conf *conf = mddev->private;
3398 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3400 unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
3401 unsigned int sz = 0;
3402 unsigned int rebuild_disks;
3403 unsigned int write_mostly_params = 0;
3404 sector_t progress, resync_max_sectors, resync_mismatches;
3405 const char *sync_action;
3406 struct raid_type *rt;
3409 case STATUSTYPE_INFO:
3410 /* *Should* always succeed */
3411 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3415 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3417 /* Access most recent mddev properties for status output */
3419 /* Get sensible max sectors even if raid set not yet started */
3420 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3421 mddev->resync_max_sectors : mddev->dev_sectors;
3422 progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
3423 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3424 atomic64_read(&mddev->resync_mismatches) : 0;
3425 sync_action = decipher_sync_action(&rs->md);
3427 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
3428 for (i = 0; i < rs->raid_disks; i++)
3429 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev, array_in_sync));
3432 * In-sync/Reshape ratio:
3433 * The in-sync ratio shows the progress of:
3434 * - Initializing the raid set
3435 * - Rebuilding a subset of devices of the raid set
3436 * The user can distinguish between the two by referring
3437 * to the status characters.
3439 * The reshape ratio shows the progress of
3440 * changing the raid layout or the number of
3441 * disks of a raid set
3443 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3444 (unsigned long long) resync_max_sectors);
3450 * See Documentation/device-mapper/dm-raid.txt for
3451 * information on each of these states.
3453 DMEMIT(" %s", sync_action);
3458 * resync_mismatches/mismatch_cnt
3459 * This field shows the number of discrepancies found when
3460 * performing a "check" of the raid set.
3462 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3467 * data_offset (needed for out of space reshaping)
3468 * This field shows the data offset into the data
3469 * image LV where the first stripes data starts.
3471 * We keep data_offset equal on all raid disks of the set,
3472 * so retrieving it from the first raid disk is sufficient.
3474 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3479 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
3480 __raid_dev_status(rs, &rs->journal_dev.rdev, 0) : "-");
3483 case STATUSTYPE_TABLE:
3484 /* Report the table line string you would use to construct this raid set */
3486 /* Calculate raid parameter count */
3487 for (i = 0; i < rs->raid_disks; i++)
3488 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3489 write_mostly_params += 2;
3490 rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
3491 raid_param_cnt += rebuild_disks * 2 +
3492 write_mostly_params +
3493 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3494 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2 +
3495 (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 2 : 0) +
3496 (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags) ? 2 : 0);
3498 /* Emit table line */
3499 /* This has to be in the documented order for userspace! */
3500 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3501 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3502 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3503 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3504 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3506 for (i = 0; i < rs->raid_disks; i++)
3507 if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
3508 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
3509 rs->dev[i].rdev.raid_disk);
3510 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3511 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3512 mddev->bitmap_info.daemon_sleep);
3513 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3514 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3515 mddev->sync_speed_min);
3516 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3517 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3518 mddev->sync_speed_max);
3519 if (write_mostly_params)
3520 for (i = 0; i < rs->raid_disks; i++)
3521 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3522 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3523 rs->dev[i].rdev.raid_disk);
3524 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3525 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3526 mddev->bitmap_info.max_write_behind);
3527 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3528 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3530 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3531 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3532 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3533 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3534 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3535 raid10_md_layout_to_copies(mddev->layout));
3536 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3537 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3538 raid10_md_layout_to_format(mddev->layout));
3539 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3540 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3541 max(rs->delta_disks, mddev->delta_disks));
3542 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3543 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3544 (unsigned long long) rs->data_offset);
3545 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
3546 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
3547 __get_dev_name(rs->journal_dev.dev));
3548 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags))
3549 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE),
3550 md_journal_mode_to_dm_raid(rs->journal_dev.mode));
3551 DMEMIT(" %d", rs->raid_disks);
3552 for (i = 0; i < rs->raid_disks; i++)
3553 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
3554 __get_dev_name(rs->dev[i].data_dev));
3558 static int raid_message(struct dm_target *ti, unsigned int argc, char **argv)
3560 struct raid_set *rs = ti->private;
3561 struct mddev *mddev = &rs->md;
3563 if (!mddev->pers || !mddev->pers->sync_request)
3566 if (!strcasecmp(argv[0], "frozen"))
3567 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3569 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3571 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
3572 if (mddev->sync_thread) {
3573 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3574 md_reap_sync_thread(mddev);
3576 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3577 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3579 else if (!strcasecmp(argv[0], "resync"))
3580 ; /* MD_RECOVERY_NEEDED set below */
3581 else if (!strcasecmp(argv[0], "recover"))
3582 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3584 if (!strcasecmp(argv[0], "check")) {
3585 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3586 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3587 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3588 } else if (!strcasecmp(argv[0], "repair")) {
3589 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3590 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3594 if (mddev->ro == 2) {
3595 /* A write to sync_action is enough to justify
3596 * canceling read-auto mode
3599 if (!mddev->suspended && mddev->sync_thread)
3600 md_wakeup_thread(mddev->sync_thread);
3602 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3603 if (!mddev->suspended && mddev->thread)
3604 md_wakeup_thread(mddev->thread);
3609 static int raid_iterate_devices(struct dm_target *ti,
3610 iterate_devices_callout_fn fn, void *data)
3612 struct raid_set *rs = ti->private;
3616 for (i = 0; !r && i < rs->raid_disks; i++) {
3617 if (rs->dev[i].data_dev) {
3618 r = fn(ti, rs->dev[i].data_dev,
3619 0, /* No offset on data devs */
3620 rs->md.dev_sectors, data);
3627 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3629 struct raid_set *rs = ti->private;
3630 unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
3632 blk_limits_io_min(limits, chunk_size);
3633 blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
3636 static void raid_presuspend(struct dm_target *ti)
3638 struct raid_set *rs = ti->private;
3640 md_stop_writes(&rs->md);
3643 static void raid_postsuspend(struct dm_target *ti)
3645 struct raid_set *rs = ti->private;
3647 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
3648 mddev_lock_nointr(&rs->md);
3649 mddev_suspend(&rs->md);
3650 mddev_unlock(&rs->md);
3656 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3659 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3660 unsigned long flags;
3661 bool cleared = false;
3662 struct dm_raid_superblock *sb;
3663 struct mddev *mddev = &rs->md;
3666 /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
3667 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3670 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3672 for (i = 0; i < mddev->raid_disks; i++) {
3673 r = &rs->dev[i].rdev;
3674 /* HM FIXME: enhance journal device recovery processing */
3675 if (test_bit(Journal, &r->flags))
3678 if (test_bit(Faulty, &r->flags) &&
3679 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
3680 DMINFO("Faulty %s device #%d has readable super block."
3681 " Attempting to revive it.",
3682 rs->raid_type->name, i);
3685 * Faulty bit may be set, but sometimes the array can
3686 * be suspended before the personalities can respond
3687 * by removing the device from the array (i.e. calling
3688 * 'hot_remove_disk'). If they haven't yet removed
3689 * the failed device, its 'raid_disk' number will be
3690 * '>= 0' - meaning we must call this function
3694 clear_bit(In_sync, &r->flags); /* Mandatory for hot remove. */
3695 if (r->raid_disk >= 0) {
3696 if (mddev->pers->hot_remove_disk(mddev, r)) {
3697 /* Failed to revive this device, try next */
3702 r->raid_disk = r->saved_raid_disk = i;
3704 clear_bit(Faulty, &r->flags);
3705 clear_bit(WriteErrorSeen, &r->flags);
3707 if (mddev->pers->hot_add_disk(mddev, r)) {
3708 /* Failed to revive this device, try next */
3709 r->raid_disk = r->saved_raid_disk = -1;
3712 clear_bit(In_sync, &r->flags);
3713 r->recovery_offset = 0;
3714 set_bit(i, (void *) cleared_failed_devices);
3720 /* If any failed devices could be cleared, update all sbs failed_devices bits */
3722 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3724 rdev_for_each(r, &rs->md) {
3725 if (test_bit(Journal, &r->flags))
3728 sb = page_address(r->sb_page);
3729 sb_retrieve_failed_devices(sb, failed_devices);
3731 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3732 failed_devices[i] &= ~cleared_failed_devices[i];
3734 sb_update_failed_devices(sb, failed_devices);
3739 static int __load_dirty_region_bitmap(struct raid_set *rs)
3743 /* Try loading the bitmap unless "raid0", which does not have one */
3744 if (!rs_is_raid0(rs) &&
3745 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3746 r = bitmap_load(&rs->md);
3748 DMERR("Failed to load bitmap");
3754 /* Enforce updating all superblocks */
3755 static void rs_update_sbs(struct raid_set *rs)
3757 struct mddev *mddev = &rs->md;
3760 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3762 md_update_sb(mddev, 1);
3767 * Reshape changes raid algorithm of @rs to new one within personality
3768 * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes
3769 * disks from a raid set thus growing/shrinking it or resizes the set
3771 * Call mddev_lock_nointr() before!
3773 static int rs_start_reshape(struct raid_set *rs)
3776 struct mddev *mddev = &rs->md;
3777 struct md_personality *pers = mddev->pers;
3779 r = rs_setup_reshape(rs);
3783 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3784 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3785 mddev_resume(mddev);
3788 * Check any reshape constraints enforced by the personalility
3790 * May as well already kick the reshape off so that * pers->start_reshape() becomes optional.
3792 r = pers->check_reshape(mddev);
3794 rs->ti->error = "pers->check_reshape() failed";
3799 * Personality may not provide start reshape method in which
3800 * case check_reshape above has already covered everything
3802 if (pers->start_reshape) {
3803 r = pers->start_reshape(mddev);
3805 rs->ti->error = "pers->start_reshape() failed";
3810 /* Suspend because a resume will happen in raid_resume() */
3811 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3812 mddev_suspend(mddev);
3815 * Now reshape got set up, update superblocks to
3816 * reflect the fact so that a table reload will
3817 * access proper superblock content in the ctr.
3824 static int raid_preresume(struct dm_target *ti)
3827 struct raid_set *rs = ti->private;
3828 struct mddev *mddev = &rs->md;
3830 /* This is a resume after a suspend of the set -> it's already started */
3831 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3835 * The superblocks need to be updated on disk if the
3836 * array is new or new devices got added (thus zeroed
3837 * out by userspace) or __load_dirty_region_bitmap
3838 * will overwrite them in core with old data or fail.
3840 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
3843 /* Load the bitmap from disk unless raid0 */
3844 r = __load_dirty_region_bitmap(rs);
3848 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3849 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3850 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3851 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3852 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
3854 DMERR("Failed to resize bitmap");
3857 /* Check for any resize/reshape on @rs and adjust/initiate */
3858 /* Be prepared for mddev_resume() in raid_resume() */
3859 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3860 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
3861 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3862 mddev->resync_min = mddev->recovery_cp;
3865 /* Check for any reshape request unless new raid set */
3866 if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3867 /* Initiate a reshape. */
3868 rs_set_rdev_sectors(rs);
3869 mddev_lock_nointr(mddev);
3870 r = rs_start_reshape(rs);
3871 mddev_unlock(mddev);
3873 DMWARN("Failed to check/start reshape, continuing without change");
3880 static void raid_resume(struct dm_target *ti)
3882 struct raid_set *rs = ti->private;
3883 struct mddev *mddev = &rs->md;
3885 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
3887 * A secondary resume while the device is active.
3888 * Take this opportunity to check whether any failed
3889 * devices are reachable again.
3891 attempt_restore_of_faulty_devices(rs);
3897 /* Only reduce raid set size before running a disk removing reshape. */
3898 if (mddev->delta_disks < 0)
3899 rs_set_capacity(rs);
3902 * Keep the RAID set frozen if reshape/rebuild flags are set.
3903 * The RAID set is unfrozen once the next table load/resume,
3904 * which clears the reshape/rebuild flags, occurs.
3905 * This ensures that the constructor for the inactive table
3906 * retrieves an up-to-date reshape_position.
3908 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
3909 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3911 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
3912 mddev_lock_nointr(mddev);
3913 mddev_resume(mddev);
3914 mddev_unlock(mddev);
3918 static struct target_type raid_target = {
3920 .version = {1, 13, 0},
3921 .module = THIS_MODULE,
3925 .status = raid_status,
3926 .message = raid_message,
3927 .iterate_devices = raid_iterate_devices,
3928 .io_hints = raid_io_hints,
3929 .presuspend = raid_presuspend,
3930 .postsuspend = raid_postsuspend,
3931 .preresume = raid_preresume,
3932 .resume = raid_resume,
3935 static int __init dm_raid_init(void)
3937 DMINFO("Loading target version %u.%u.%u",
3938 raid_target.version[0],
3939 raid_target.version[1],
3940 raid_target.version[2]);
3941 return dm_register_target(&raid_target);
3944 static void __exit dm_raid_exit(void)
3946 dm_unregister_target(&raid_target);
3949 module_init(dm_raid_init);
3950 module_exit(dm_raid_exit);
3952 module_param(devices_handle_discard_safely, bool, 0644);
3953 MODULE_PARM_DESC(devices_handle_discard_safely,
3954 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
3956 MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
3957 MODULE_ALIAS("dm-raid0");
3958 MODULE_ALIAS("dm-raid1");
3959 MODULE_ALIAS("dm-raid10");
3960 MODULE_ALIAS("dm-raid4");
3961 MODULE_ALIAS("dm-raid5");
3962 MODULE_ALIAS("dm-raid6");
3963 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
3964 MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
3965 MODULE_LICENSE("GPL");