2 * Ram backed block device driver.
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/major.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/highmem.h>
18 #include <linux/mutex.h>
19 #include <linux/radix-tree.h>
21 #include <linux/slab.h>
23 #include <asm/uaccess.h>
25 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
26 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
29 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
30 * the pages containing the block device's contents. A brd page's ->index is
31 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
32 * with, the kernel's pagecache or buffer cache (which sit above our block
38 struct request_queue *brd_queue;
39 struct gendisk *brd_disk;
40 struct list_head brd_list;
43 * Backing store of pages and lock to protect it. This is the contents
44 * of the block device.
47 struct radix_tree_root brd_pages;
51 * Look up and return a brd's page for a given sector.
53 static DEFINE_MUTEX(brd_mutex);
54 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
60 * The page lifetime is protected by the fact that we have opened the
61 * device node -- brd pages will never be deleted under us, so we
62 * don't need any further locking or refcounting.
64 * This is strictly true for the radix-tree nodes as well (ie. we
65 * don't actually need the rcu_read_lock()), however that is not a
66 * documented feature of the radix-tree API so it is better to be
67 * safe here (we don't have total exclusion from radix tree updates
68 * here, only deletes).
71 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
72 page = radix_tree_lookup(&brd->brd_pages, idx);
75 BUG_ON(page && page->index != idx);
81 * Look up and return a brd's page for a given sector.
82 * If one does not exist, allocate an empty page, and insert that. Then
85 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
91 page = brd_lookup_page(brd, sector);
96 * Must use NOIO because we don't want to recurse back into the
97 * block or filesystem layers from page reclaim.
99 * Cannot support DAX and highmem, because our ->direct_access
100 * routine for DAX must return memory that is always addressable.
101 * If DAX was reworked to use pfns and kmap throughout, this
102 * restriction might be able to be lifted.
104 gfp_flags = GFP_NOIO | __GFP_ZERO;
105 #ifndef CONFIG_BLK_DEV_RAM_DAX
106 gfp_flags |= __GFP_HIGHMEM;
108 page = alloc_page(gfp_flags);
112 if (radix_tree_preload(GFP_NOIO)) {
117 spin_lock(&brd->brd_lock);
118 idx = sector >> PAGE_SECTORS_SHIFT;
120 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
122 page = radix_tree_lookup(&brd->brd_pages, idx);
124 BUG_ON(page->index != idx);
126 spin_unlock(&brd->brd_lock);
128 radix_tree_preload_end();
133 static void brd_free_page(struct brd_device *brd, sector_t sector)
138 spin_lock(&brd->brd_lock);
139 idx = sector >> PAGE_SECTORS_SHIFT;
140 page = radix_tree_delete(&brd->brd_pages, idx);
141 spin_unlock(&brd->brd_lock);
146 static void brd_zero_page(struct brd_device *brd, sector_t sector)
150 page = brd_lookup_page(brd, sector);
152 clear_highpage(page);
156 * Free all backing store pages and radix tree. This must only be called when
157 * there are no other users of the device.
159 #define FREE_BATCH 16
160 static void brd_free_pages(struct brd_device *brd)
162 unsigned long pos = 0;
163 struct page *pages[FREE_BATCH];
169 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
170 (void **)pages, pos, FREE_BATCH);
172 for (i = 0; i < nr_pages; i++) {
175 BUG_ON(pages[i]->index < pos);
176 pos = pages[i]->index;
177 ret = radix_tree_delete(&brd->brd_pages, pos);
178 BUG_ON(!ret || ret != pages[i]);
179 __free_page(pages[i]);
185 * This assumes radix_tree_gang_lookup always returns as
186 * many pages as possible. If the radix-tree code changes,
187 * so will this have to.
189 } while (nr_pages == FREE_BATCH);
193 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
195 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
197 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
200 copy = min_t(size_t, n, PAGE_SIZE - offset);
201 if (!brd_insert_page(brd, sector))
204 sector += copy >> SECTOR_SHIFT;
205 if (!brd_insert_page(brd, sector))
211 static void discard_from_brd(struct brd_device *brd,
212 sector_t sector, size_t n)
214 while (n >= PAGE_SIZE) {
216 * Don't want to actually discard pages here because
217 * re-allocating the pages can result in writeback
218 * deadlocks under heavy load.
221 brd_free_page(brd, sector);
223 brd_zero_page(brd, sector);
224 sector += PAGE_SIZE >> SECTOR_SHIFT;
230 * Copy n bytes from src to the brd starting at sector. Does not sleep.
232 static void copy_to_brd(struct brd_device *brd, const void *src,
233 sector_t sector, size_t n)
237 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
240 copy = min_t(size_t, n, PAGE_SIZE - offset);
241 page = brd_lookup_page(brd, sector);
244 dst = kmap_atomic(page);
245 memcpy(dst + offset, src, copy);
250 sector += copy >> SECTOR_SHIFT;
252 page = brd_lookup_page(brd, sector);
255 dst = kmap_atomic(page);
256 memcpy(dst, src, copy);
262 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
264 static void copy_from_brd(void *dst, struct brd_device *brd,
265 sector_t sector, size_t n)
269 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
272 copy = min_t(size_t, n, PAGE_SIZE - offset);
273 page = brd_lookup_page(brd, sector);
275 src = kmap_atomic(page);
276 memcpy(dst, src + offset, copy);
279 memset(dst, 0, copy);
283 sector += copy >> SECTOR_SHIFT;
285 page = brd_lookup_page(brd, sector);
287 src = kmap_atomic(page);
288 memcpy(dst, src, copy);
291 memset(dst, 0, copy);
296 * Process a single bvec of a bio.
298 static int brd_do_bvec(struct brd_device *brd, struct page *page,
299 unsigned int len, unsigned int off, int rw,
306 err = copy_to_brd_setup(brd, sector, len);
311 mem = kmap_atomic(page);
313 copy_from_brd(mem + off, brd, sector, len);
314 flush_dcache_page(page);
316 flush_dcache_page(page);
317 copy_to_brd(brd, mem + off, sector, len);
325 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
327 struct block_device *bdev = bio->bi_bdev;
328 struct brd_device *brd = bdev->bd_disk->private_data;
332 struct bvec_iter iter;
334 sector = bio->bi_iter.bi_sector;
335 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
338 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
339 if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
340 bio->bi_iter.bi_size & ~PAGE_MASK)
342 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
350 bio_for_each_segment(bvec, bio, iter) {
351 unsigned int len = bvec.bv_len;
354 err = brd_do_bvec(brd, bvec.bv_page, len,
355 bvec.bv_offset, rw, sector);
358 sector += len >> SECTOR_SHIFT;
363 return BLK_QC_T_NONE;
366 return BLK_QC_T_NONE;
369 static int brd_rw_page(struct block_device *bdev, sector_t sector,
370 struct page *page, int rw)
372 struct brd_device *brd = bdev->bd_disk->private_data;
373 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
374 page_endio(page, rw & WRITE, err);
378 #ifdef CONFIG_BLK_DEV_RAM_DAX
379 static long brd_direct_access(struct block_device *bdev, sector_t sector,
380 void __pmem **kaddr, unsigned long *pfn)
382 struct brd_device *brd = bdev->bd_disk->private_data;
387 page = brd_insert_page(brd, sector);
390 *kaddr = (void __pmem *)page_address(page);
391 *pfn = page_to_pfn(page);
396 #define brd_direct_access NULL
399 static int brd_ioctl(struct block_device *bdev, fmode_t mode,
400 unsigned int cmd, unsigned long arg)
403 struct brd_device *brd = bdev->bd_disk->private_data;
405 if (cmd != BLKFLSBUF)
409 * ram device BLKFLSBUF has special semantics, we want to actually
410 * release and destroy the ramdisk data.
412 mutex_lock(&brd_mutex);
413 mutex_lock(&bdev->bd_mutex);
415 if (bdev->bd_openers <= 1) {
417 * Kill the cache first, so it isn't written back to the
420 * Another thread might instantiate more buffercache here,
421 * but there is not much we can do to close that race.
427 mutex_unlock(&bdev->bd_mutex);
428 mutex_unlock(&brd_mutex);
433 static const struct block_device_operations brd_fops = {
434 .owner = THIS_MODULE,
435 .rw_page = brd_rw_page,
437 .direct_access = brd_direct_access,
441 * And now the modules code and kernel interface.
443 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
444 module_param(rd_nr, int, S_IRUGO);
445 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
447 int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
448 module_param(rd_size, int, S_IRUGO);
449 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
451 static int max_part = 1;
452 module_param(max_part, int, S_IRUGO);
453 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
455 MODULE_LICENSE("GPL");
456 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
460 /* Legacy boot options - nonmodular */
461 static int __init ramdisk_size(char *str)
463 rd_size = simple_strtol(str, NULL, 0);
466 __setup("ramdisk_size=", ramdisk_size);
470 * The device scheme is derived from loop.c. Keep them in synch where possible
471 * (should share code eventually).
473 static LIST_HEAD(brd_devices);
474 static DEFINE_MUTEX(brd_devices_mutex);
476 static struct brd_device *brd_alloc(int i)
478 struct brd_device *brd;
479 struct gendisk *disk;
481 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
485 spin_lock_init(&brd->brd_lock);
486 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
488 brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
492 blk_queue_make_request(brd->brd_queue, brd_make_request);
493 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
494 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
496 /* This is so fdisk will align partitions on 4k, because of
497 * direct_access API needing 4k alignment, returning a PFN
498 * (This is only a problem on very small devices <= 4M,
499 * otherwise fdisk will align on 1M. Regardless this call
502 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
504 brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
505 blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
506 brd->brd_queue->limits.discard_zeroes_data = 1;
507 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
509 disk = brd->brd_disk = alloc_disk(max_part);
512 disk->major = RAMDISK_MAJOR;
513 disk->first_minor = i * max_part;
514 disk->fops = &brd_fops;
515 disk->private_data = brd;
516 disk->queue = brd->brd_queue;
517 disk->flags = GENHD_FL_EXT_DEVT;
518 sprintf(disk->disk_name, "ram%d", i);
519 set_capacity(disk, rd_size * 2);
524 blk_cleanup_queue(brd->brd_queue);
531 static void brd_free(struct brd_device *brd)
533 put_disk(brd->brd_disk);
534 blk_cleanup_queue(brd->brd_queue);
539 static struct brd_device *brd_init_one(int i, bool *new)
541 struct brd_device *brd;
544 list_for_each_entry(brd, &brd_devices, brd_list) {
545 if (brd->brd_number == i)
551 add_disk(brd->brd_disk);
552 list_add_tail(&brd->brd_list, &brd_devices);
559 static void brd_del_one(struct brd_device *brd)
561 list_del(&brd->brd_list);
562 del_gendisk(brd->brd_disk);
566 static struct kobject *brd_probe(dev_t dev, int *part, void *data)
568 struct brd_device *brd;
569 struct kobject *kobj;
572 mutex_lock(&brd_devices_mutex);
573 brd = brd_init_one(MINOR(dev) / max_part, &new);
574 kobj = brd ? get_disk(brd->brd_disk) : NULL;
575 mutex_unlock(&brd_devices_mutex);
583 static inline void brd_check_and_reset_par(void)
585 if (unlikely(!max_part))
589 * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
590 * otherwise, it is possiable to get same dev_t when adding partitions.
592 if ((1U << MINORBITS) % max_part != 0)
593 max_part = 1UL << fls(max_part);
595 if (max_part > DISK_MAX_PARTS) {
596 pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
597 DISK_MAX_PARTS, DISK_MAX_PARTS);
598 max_part = DISK_MAX_PARTS;
602 static int __init brd_init(void)
604 struct brd_device *brd, *next;
608 * brd module now has a feature to instantiate underlying device
609 * structure on-demand, provided that there is an access dev node.
611 * (1) if rd_nr is specified, create that many upfront. else
612 * it defaults to CONFIG_BLK_DEV_RAM_COUNT
613 * (2) User can further extend brd devices by create dev node themselves
614 * and have kernel automatically instantiate actual device
615 * on-demand. Example:
616 * mknod /path/devnod_name b 1 X # 1 is the rd major
617 * fdisk -l /path/devnod_name
618 * If (X / max_part) was not already created it will be created
622 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
625 brd_check_and_reset_par();
627 for (i = 0; i < rd_nr; i++) {
631 list_add_tail(&brd->brd_list, &brd_devices);
634 /* point of no return */
636 list_for_each_entry(brd, &brd_devices, brd_list)
637 add_disk(brd->brd_disk);
639 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
640 THIS_MODULE, brd_probe, NULL, NULL);
642 pr_info("brd: module loaded\n");
646 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
647 list_del(&brd->brd_list);
650 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
652 pr_info("brd: module NOT loaded !!!\n");
656 static void __exit brd_exit(void)
658 struct brd_device *brd, *next;
660 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
663 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
664 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
666 pr_info("brd: module unloaded\n");
669 module_init(brd_init);
670 module_exit(brd_exit);