blob: 0c76d4016eebebf5525fc4a2aa5351319674dca5 [file] [log] [blame]
Nick Piggin9db55792008-02-08 04:19:49 -08001/*
2 * Ram backed block device driver.
3 *
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
6 *
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
9 */
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/major.h>
15#include <linux/blkdev.h>
16#include <linux/bio.h>
17#include <linux/highmem.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020018#include <linux/mutex.h>
Nick Piggin9db55792008-02-08 04:19:49 -080019#include <linux/radix-tree.h>
Al Viroff01bb42011-09-16 02:31:11 -040020#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080022#ifdef CONFIG_BLK_DEV_RAM_DAX
23#include <linux/pfn_t.h>
24#endif
Nick Piggin9db55792008-02-08 04:19:49 -080025
26#include <asm/uaccess.h>
27
28#define SECTOR_SHIFT 9
29#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
30#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
31
32/*
33 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
34 * the pages containing the block device's contents. A brd page's ->index is
35 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
36 * with, the kernel's pagecache or buffer cache (which sit above our block
37 * device).
38 */
39struct brd_device {
40 int brd_number;
Nick Piggin9db55792008-02-08 04:19:49 -080041
42 struct request_queue *brd_queue;
43 struct gendisk *brd_disk;
44 struct list_head brd_list;
45
46 /*
47 * Backing store of pages and lock to protect it. This is the contents
48 * of the block device.
49 */
50 spinlock_t brd_lock;
51 struct radix_tree_root brd_pages;
52};
53
54/*
55 * Look up and return a brd's page for a given sector.
56 */
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020057static DEFINE_MUTEX(brd_mutex);
Nick Piggin9db55792008-02-08 04:19:49 -080058static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
59{
60 pgoff_t idx;
61 struct page *page;
62
63 /*
64 * The page lifetime is protected by the fact that we have opened the
65 * device node -- brd pages will never be deleted under us, so we
66 * don't need any further locking or refcounting.
67 *
68 * This is strictly true for the radix-tree nodes as well (ie. we
69 * don't actually need the rcu_read_lock()), however that is not a
70 * documented feature of the radix-tree API so it is better to be
71 * safe here (we don't have total exclusion from radix tree updates
72 * here, only deletes).
73 */
74 rcu_read_lock();
75 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
76 page = radix_tree_lookup(&brd->brd_pages, idx);
77 rcu_read_unlock();
78
79 BUG_ON(page && page->index != idx);
80
81 return page;
82}
83
84/*
85 * Look up and return a brd's page for a given sector.
86 * If one does not exist, allocate an empty page, and insert that. Then
87 * return it.
88 */
89static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
90{
91 pgoff_t idx;
92 struct page *page;
Nick Piggin75acb9c2008-02-08 04:19:50 -080093 gfp_t gfp_flags;
Nick Piggin9db55792008-02-08 04:19:49 -080094
95 page = brd_lookup_page(brd, sector);
96 if (page)
97 return page;
98
99 /*
100 * Must use NOIO because we don't want to recurse back into the
101 * block or filesystem layers from page reclaim.
Nick Piggin75acb9c2008-02-08 04:19:50 -0800102 *
Matthew Wilcoxa7a97fc2015-02-16 15:59:41 -0800103 * Cannot support DAX and highmem, because our ->direct_access
104 * routine for DAX must return memory that is always addressable.
105 * If DAX was reworked to use pfns and kmap throughout, this
Nick Piggin75acb9c2008-02-08 04:19:50 -0800106 * restriction might be able to be lifted.
Nick Piggin9db55792008-02-08 04:19:49 -0800107 */
Nick Piggin75acb9c2008-02-08 04:19:50 -0800108 gfp_flags = GFP_NOIO | __GFP_ZERO;
Matthew Wilcoxa7a97fc2015-02-16 15:59:41 -0800109#ifndef CONFIG_BLK_DEV_RAM_DAX
Nick Piggin75acb9c2008-02-08 04:19:50 -0800110 gfp_flags |= __GFP_HIGHMEM;
111#endif
Petr Tesarik26defe32008-04-22 05:36:52 +0200112 page = alloc_page(gfp_flags);
Nick Piggin9db55792008-02-08 04:19:49 -0800113 if (!page)
114 return NULL;
115
116 if (radix_tree_preload(GFP_NOIO)) {
117 __free_page(page);
118 return NULL;
119 }
120
121 spin_lock(&brd->brd_lock);
122 idx = sector >> PAGE_SECTORS_SHIFT;
Brian Behlendorfdfd20b22013-05-24 15:55:28 -0700123 page->index = idx;
Nick Piggin9db55792008-02-08 04:19:49 -0800124 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
125 __free_page(page);
126 page = radix_tree_lookup(&brd->brd_pages, idx);
127 BUG_ON(!page);
128 BUG_ON(page->index != idx);
Brian Behlendorfdfd20b22013-05-24 15:55:28 -0700129 }
Nick Piggin9db55792008-02-08 04:19:49 -0800130 spin_unlock(&brd->brd_lock);
131
132 radix_tree_preload_end();
133
134 return page;
135}
136
Nick Pigginb7c33572010-05-26 15:41:14 +0200137static void brd_free_page(struct brd_device *brd, sector_t sector)
138{
139 struct page *page;
140 pgoff_t idx;
141
142 spin_lock(&brd->brd_lock);
143 idx = sector >> PAGE_SECTORS_SHIFT;
144 page = radix_tree_delete(&brd->brd_pages, idx);
145 spin_unlock(&brd->brd_lock);
146 if (page)
147 __free_page(page);
148}
149
150static void brd_zero_page(struct brd_device *brd, sector_t sector)
151{
152 struct page *page;
153
154 page = brd_lookup_page(brd, sector);
155 if (page)
156 clear_highpage(page);
157}
158
Nick Piggin9db55792008-02-08 04:19:49 -0800159/*
160 * Free all backing store pages and radix tree. This must only be called when
161 * there are no other users of the device.
162 */
163#define FREE_BATCH 16
164static void brd_free_pages(struct brd_device *brd)
165{
166 unsigned long pos = 0;
167 struct page *pages[FREE_BATCH];
168 int nr_pages;
169
170 do {
171 int i;
172
173 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
174 (void **)pages, pos, FREE_BATCH);
175
176 for (i = 0; i < nr_pages; i++) {
177 void *ret;
178
179 BUG_ON(pages[i]->index < pos);
180 pos = pages[i]->index;
181 ret = radix_tree_delete(&brd->brd_pages, pos);
182 BUG_ON(!ret || ret != pages[i]);
183 __free_page(pages[i]);
184 }
185
186 pos++;
187
188 /*
189 * This assumes radix_tree_gang_lookup always returns as
190 * many pages as possible. If the radix-tree code changes,
191 * so will this have to.
192 */
193 } while (nr_pages == FREE_BATCH);
194}
195
196/*
197 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
198 */
199static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
200{
201 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
202 size_t copy;
203
204 copy = min_t(size_t, n, PAGE_SIZE - offset);
205 if (!brd_insert_page(brd, sector))
Matthew Wilcox96f8d8e2014-06-04 16:07:50 -0700206 return -ENOSPC;
Nick Piggin9db55792008-02-08 04:19:49 -0800207 if (copy < n) {
208 sector += copy >> SECTOR_SHIFT;
209 if (!brd_insert_page(brd, sector))
Matthew Wilcox96f8d8e2014-06-04 16:07:50 -0700210 return -ENOSPC;
Nick Piggin9db55792008-02-08 04:19:49 -0800211 }
212 return 0;
213}
214
Nick Pigginb7c33572010-05-26 15:41:14 +0200215static void discard_from_brd(struct brd_device *brd,
216 sector_t sector, size_t n)
217{
218 while (n >= PAGE_SIZE) {
219 /*
220 * Don't want to actually discard pages here because
221 * re-allocating the pages can result in writeback
222 * deadlocks under heavy load.
223 */
224 if (0)
225 brd_free_page(brd, sector);
226 else
227 brd_zero_page(brd, sector);
228 sector += PAGE_SIZE >> SECTOR_SHIFT;
229 n -= PAGE_SIZE;
230 }
231}
232
Nick Piggin9db55792008-02-08 04:19:49 -0800233/*
234 * Copy n bytes from src to the brd starting at sector. Does not sleep.
235 */
236static void copy_to_brd(struct brd_device *brd, const void *src,
237 sector_t sector, size_t n)
238{
239 struct page *page;
240 void *dst;
241 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
242 size_t copy;
243
244 copy = min_t(size_t, n, PAGE_SIZE - offset);
245 page = brd_lookup_page(brd, sector);
246 BUG_ON(!page);
247
Cong Wangcfd80052011-11-25 23:14:18 +0800248 dst = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800249 memcpy(dst + offset, src, copy);
Cong Wangcfd80052011-11-25 23:14:18 +0800250 kunmap_atomic(dst);
Nick Piggin9db55792008-02-08 04:19:49 -0800251
252 if (copy < n) {
253 src += copy;
254 sector += copy >> SECTOR_SHIFT;
255 copy = n - copy;
256 page = brd_lookup_page(brd, sector);
257 BUG_ON(!page);
258
Cong Wangcfd80052011-11-25 23:14:18 +0800259 dst = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800260 memcpy(dst, src, copy);
Cong Wangcfd80052011-11-25 23:14:18 +0800261 kunmap_atomic(dst);
Nick Piggin9db55792008-02-08 04:19:49 -0800262 }
263}
264
265/*
266 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
267 */
268static void copy_from_brd(void *dst, struct brd_device *brd,
269 sector_t sector, size_t n)
270{
271 struct page *page;
272 void *src;
273 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
274 size_t copy;
275
276 copy = min_t(size_t, n, PAGE_SIZE - offset);
277 page = brd_lookup_page(brd, sector);
278 if (page) {
Cong Wangcfd80052011-11-25 23:14:18 +0800279 src = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800280 memcpy(dst, src + offset, copy);
Cong Wangcfd80052011-11-25 23:14:18 +0800281 kunmap_atomic(src);
Nick Piggin9db55792008-02-08 04:19:49 -0800282 } else
283 memset(dst, 0, copy);
284
285 if (copy < n) {
286 dst += copy;
287 sector += copy >> SECTOR_SHIFT;
288 copy = n - copy;
289 page = brd_lookup_page(brd, sector);
290 if (page) {
Cong Wangcfd80052011-11-25 23:14:18 +0800291 src = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800292 memcpy(dst, src, copy);
Cong Wangcfd80052011-11-25 23:14:18 +0800293 kunmap_atomic(src);
Nick Piggin9db55792008-02-08 04:19:49 -0800294 } else
295 memset(dst, 0, copy);
296 }
297}
298
299/*
300 * Process a single bvec of a bio.
301 */
302static int brd_do_bvec(struct brd_device *brd, struct page *page,
Jens Axboec11f0c02016-08-05 08:11:04 -0600303 unsigned int len, unsigned int off, bool is_write,
Nick Piggin9db55792008-02-08 04:19:49 -0800304 sector_t sector)
305{
306 void *mem;
307 int err = 0;
308
Jens Axboec11f0c02016-08-05 08:11:04 -0600309 if (is_write) {
Nick Piggin9db55792008-02-08 04:19:49 -0800310 err = copy_to_brd_setup(brd, sector, len);
311 if (err)
312 goto out;
313 }
314
Cong Wangcfd80052011-11-25 23:14:18 +0800315 mem = kmap_atomic(page);
Jens Axboec11f0c02016-08-05 08:11:04 -0600316 if (!is_write) {
Nick Piggin9db55792008-02-08 04:19:49 -0800317 copy_from_brd(mem + off, brd, sector, len);
318 flush_dcache_page(page);
Nick Pigginc2572f22009-04-15 10:32:07 +0200319 } else {
320 flush_dcache_page(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800321 copy_to_brd(brd, mem + off, sector, len);
Nick Pigginc2572f22009-04-15 10:32:07 +0200322 }
Cong Wangcfd80052011-11-25 23:14:18 +0800323 kunmap_atomic(mem);
Nick Piggin9db55792008-02-08 04:19:49 -0800324
325out:
326 return err;
327}
328
Jens Axboedece1632015-11-05 10:41:16 -0700329static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
Nick Piggin9db55792008-02-08 04:19:49 -0800330{
331 struct block_device *bdev = bio->bi_bdev;
332 struct brd_device *brd = bdev->bd_disk->private_data;
Kent Overstreet79886132013-11-23 17:19:00 -0800333 struct bio_vec bvec;
Nick Piggin9db55792008-02-08 04:19:49 -0800334 sector_t sector;
Kent Overstreet79886132013-11-23 17:19:00 -0800335 struct bvec_iter iter;
Nick Piggin9db55792008-02-08 04:19:49 -0800336
Kent Overstreet4f024f32013-10-11 15:44:27 -0700337 sector = bio->bi_iter.bi_sector;
Kent Overstreetf73a1c72012-09-25 15:05:12 -0700338 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200339 goto io_error;
Nick Piggin9db55792008-02-08 04:19:49 -0800340
Mike Christie95fe6c12016-06-05 14:31:48 -0500341 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
Jan Kara2dbe5492015-11-04 17:13:39 +0100342 if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
Bart Van Assche5e4298b2015-12-15 16:38:22 +0100343 bio->bi_iter.bi_size & ~PAGE_MASK)
Jan Kara2dbe5492015-11-04 17:13:39 +0100344 goto io_error;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700345 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
Nick Pigginb7c33572010-05-26 15:41:14 +0200346 goto out;
347 }
348
Kent Overstreet79886132013-11-23 17:19:00 -0800349 bio_for_each_segment(bvec, bio, iter) {
350 unsigned int len = bvec.bv_len;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200351 int err;
352
Jens Axboec11f0c02016-08-05 08:11:04 -0600353 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
354 op_is_write(bio_op(bio)), sector);
Nick Piggin9db55792008-02-08 04:19:49 -0800355 if (err)
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200356 goto io_error;
Nick Piggin9db55792008-02-08 04:19:49 -0800357 sector += len >> SECTOR_SHIFT;
358 }
359
360out:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200361 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700362 return BLK_QC_T_NONE;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200363io_error:
364 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700365 return BLK_QC_T_NONE;
Nick Piggin9db55792008-02-08 04:19:49 -0800366}
367
Matthew Wilcoxa72132c2014-06-04 16:07:49 -0700368static int brd_rw_page(struct block_device *bdev, sector_t sector,
Jens Axboec11f0c02016-08-05 08:11:04 -0600369 struct page *page, bool is_write)
Matthew Wilcoxa72132c2014-06-04 16:07:49 -0700370{
371 struct brd_device *brd = bdev->bd_disk->private_data;
Jens Axboec11f0c02016-08-05 08:11:04 -0600372 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
373 page_endio(page, is_write, err);
Matthew Wilcoxa72132c2014-06-04 16:07:49 -0700374 return err;
375}
376
Matthew Wilcoxa7a97fc2015-02-16 15:59:41 -0800377#ifdef CONFIG_BLK_DEV_RAM_DAX
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200378static long brd_direct_access(struct block_device *bdev, sector_t sector,
Dan Williams7a9eb202016-06-03 18:06:47 -0700379 void **kaddr, pfn_t *pfn, long size)
Nick Piggin75acb9c2008-02-08 04:19:50 -0800380{
381 struct brd_device *brd = bdev->bd_disk->private_data;
382 struct page *page;
383
384 if (!brd)
385 return -ENODEV;
Nick Piggin75acb9c2008-02-08 04:19:50 -0800386 page = brd_insert_page(brd, sector);
387 if (!page)
Matthew Wilcox96f8d8e2014-06-04 16:07:50 -0700388 return -ENOSPC;
Dan Williams7a9eb202016-06-03 18:06:47 -0700389 *kaddr = page_address(page);
Dan Williams34c0fd52016-01-15 16:56:14 -0800390 *pfn = page_to_pfn_t(page);
Nick Piggin75acb9c2008-02-08 04:19:50 -0800391
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200392 return PAGE_SIZE;
Nick Piggin75acb9c2008-02-08 04:19:50 -0800393}
Matthew Wilcoxa7a97fc2015-02-16 15:59:41 -0800394#else
395#define brd_direct_access NULL
Nick Piggin75acb9c2008-02-08 04:19:50 -0800396#endif
397
Al Viro2b9ecd02008-03-02 09:24:35 -0500398static int brd_ioctl(struct block_device *bdev, fmode_t mode,
Nick Piggin9db55792008-02-08 04:19:49 -0800399 unsigned int cmd, unsigned long arg)
400{
401 int error;
Nick Piggin9db55792008-02-08 04:19:49 -0800402 struct brd_device *brd = bdev->bd_disk->private_data;
403
404 if (cmd != BLKFLSBUF)
405 return -ENOTTY;
406
407 /*
408 * ram device BLKFLSBUF has special semantics, we want to actually
409 * release and destroy the ramdisk data.
410 */
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200411 mutex_lock(&brd_mutex);
Nick Piggin9db55792008-02-08 04:19:49 -0800412 mutex_lock(&bdev->bd_mutex);
413 error = -EBUSY;
414 if (bdev->bd_openers <= 1) {
415 /*
Al Viroff01bb42011-09-16 02:31:11 -0400416 * Kill the cache first, so it isn't written back to the
417 * device.
Nick Piggin9db55792008-02-08 04:19:49 -0800418 *
419 * Another thread might instantiate more buffercache here,
420 * but there is not much we can do to close that race.
421 */
Al Viroff01bb42011-09-16 02:31:11 -0400422 kill_bdev(bdev);
Nick Piggin9db55792008-02-08 04:19:49 -0800423 brd_free_pages(brd);
424 error = 0;
425 }
426 mutex_unlock(&bdev->bd_mutex);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200427 mutex_unlock(&brd_mutex);
Nick Piggin9db55792008-02-08 04:19:49 -0800428
429 return error;
430}
431
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700432static const struct block_device_operations brd_fops = {
Nick Piggin75acb9c2008-02-08 04:19:50 -0800433 .owner = THIS_MODULE,
Matthew Wilcoxa72132c2014-06-04 16:07:49 -0700434 .rw_page = brd_rw_page,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +0200435 .ioctl = brd_ioctl,
Nick Piggin75acb9c2008-02-08 04:19:50 -0800436 .direct_access = brd_direct_access,
Nick Piggin9db55792008-02-08 04:19:49 -0800437};
438
439/*
440 * And now the modules code and kernel interface.
441 */
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200442static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
Namhyung Kim8892cba2011-05-26 21:06:50 +0200443module_param(rd_nr, int, S_IRUGO);
Nick Piggin9db55792008-02-08 04:19:49 -0800444MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200445
446int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
Namhyung Kim8892cba2011-05-26 21:06:50 +0200447module_param(rd_size, int, S_IRUGO);
Nick Piggin9db55792008-02-08 04:19:49 -0800448MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200449
450static int max_part = 1;
Namhyung Kim8892cba2011-05-26 21:06:50 +0200451module_param(max_part, int, S_IRUGO);
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200452MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
453
Nick Piggin9db55792008-02-08 04:19:49 -0800454MODULE_LICENSE("GPL");
455MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
Nick Pigginefedf512008-06-04 17:18:42 +0200456MODULE_ALIAS("rd");
Nick Piggin9db55792008-02-08 04:19:49 -0800457
458#ifndef MODULE
459/* Legacy boot options - nonmodular */
460static int __init ramdisk_size(char *str)
461{
462 rd_size = simple_strtol(str, NULL, 0);
463 return 1;
464}
Robert P. J. Day1adbee52009-06-10 12:57:08 -0700465__setup("ramdisk_size=", ramdisk_size);
Nick Piggin9db55792008-02-08 04:19:49 -0800466#endif
467
468/*
469 * The device scheme is derived from loop.c. Keep them in synch where possible
470 * (should share code eventually).
471 */
472static LIST_HEAD(brd_devices);
473static DEFINE_MUTEX(brd_devices_mutex);
474
475static struct brd_device *brd_alloc(int i)
476{
477 struct brd_device *brd;
478 struct gendisk *disk;
479
480 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
481 if (!brd)
482 goto out;
483 brd->brd_number = i;
484 spin_lock_init(&brd->brd_lock);
485 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
486
487 brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
488 if (!brd->brd_queue)
489 goto out_free_dev;
Boaz Harroshc8fa3172015-01-07 18:09:38 +0200490
Nick Piggin9db55792008-02-08 04:19:49 -0800491 blk_queue_make_request(brd->brd_queue, brd_make_request);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500492 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
Nick Piggin9db55792008-02-08 04:19:49 -0800493 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
494
Boaz Harroshc8fa3172015-01-07 18:09:38 +0200495 /* This is so fdisk will align partitions on 4k, because of
496 * direct_access API needing 4k alignment, returning a PFN
497 * (This is only a problem on very small devices <= 4M,
498 * otherwise fdisk will align on 1M. Regardless this call
499 * is harmless)
500 */
501 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
502
Nick Pigginb7c33572010-05-26 15:41:14 +0200503 brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600504 blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
Nick Pigginb7c33572010-05-26 15:41:14 +0200505 brd->brd_queue->limits.discard_zeroes_data = 1;
506 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
Toshi Kani163d4ba2016-06-23 17:05:50 -0400507#ifdef CONFIG_BLK_DEV_RAM_DAX
508 queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
509#endif
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200510 disk = brd->brd_disk = alloc_disk(max_part);
Nick Piggin9db55792008-02-08 04:19:49 -0800511 if (!disk)
512 goto out_free_queue;
513 disk->major = RAMDISK_MAJOR;
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200514 disk->first_minor = i * max_part;
Nick Piggin9db55792008-02-08 04:19:49 -0800515 disk->fops = &brd_fops;
516 disk->private_data = brd;
517 disk->queue = brd->brd_queue;
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200518 disk->flags = GENHD_FL_EXT_DEVT;
Nick Piggin9db55792008-02-08 04:19:49 -0800519 sprintf(disk->disk_name, "ram%d", i);
520 set_capacity(disk, rd_size * 2);
521
522 return brd;
523
524out_free_queue:
525 blk_cleanup_queue(brd->brd_queue);
526out_free_dev:
527 kfree(brd);
528out:
529 return NULL;
530}
531
532static void brd_free(struct brd_device *brd)
533{
534 put_disk(brd->brd_disk);
535 blk_cleanup_queue(brd->brd_queue);
536 brd_free_pages(brd);
537 kfree(brd);
538}
539
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200540static struct brd_device *brd_init_one(int i, bool *new)
Nick Piggin9db55792008-02-08 04:19:49 -0800541{
542 struct brd_device *brd;
543
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200544 *new = false;
Nick Piggin9db55792008-02-08 04:19:49 -0800545 list_for_each_entry(brd, &brd_devices, brd_list) {
546 if (brd->brd_number == i)
547 goto out;
548 }
549
550 brd = brd_alloc(i);
551 if (brd) {
552 add_disk(brd->brd_disk);
553 list_add_tail(&brd->brd_list, &brd_devices);
554 }
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200555 *new = true;
Nick Piggin9db55792008-02-08 04:19:49 -0800556out:
557 return brd;
558}
559
560static void brd_del_one(struct brd_device *brd)
561{
562 list_del(&brd->brd_list);
563 del_gendisk(brd->brd_disk);
564 brd_free(brd);
565}
566
567static struct kobject *brd_probe(dev_t dev, int *part, void *data)
568{
569 struct brd_device *brd;
570 struct kobject *kobj;
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200571 bool new;
Nick Piggin9db55792008-02-08 04:19:49 -0800572
573 mutex_lock(&brd_devices_mutex);
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200574 brd = brd_init_one(MINOR(dev) / max_part, &new);
Mikulas Patockaa207f592013-10-14 12:13:24 -0400575 kobj = brd ? get_disk(brd->brd_disk) : NULL;
Nick Piggin9db55792008-02-08 04:19:49 -0800576 mutex_unlock(&brd_devices_mutex);
577
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200578 if (new)
579 *part = 0;
580
Nick Piggin9db55792008-02-08 04:19:49 -0800581 return kobj;
582}
583
584static int __init brd_init(void)
585{
Nick Piggin9db55792008-02-08 04:19:49 -0800586 struct brd_device *brd, *next;
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200587 int i;
Nick Piggin9db55792008-02-08 04:19:49 -0800588
589 /*
590 * brd module now has a feature to instantiate underlying device
591 * structure on-demand, provided that there is an access dev node.
Nick Piggin9db55792008-02-08 04:19:49 -0800592 *
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200593 * (1) if rd_nr is specified, create that many upfront. else
594 * it defaults to CONFIG_BLK_DEV_RAM_COUNT
595 * (2) User can further extend brd devices by create dev node themselves
596 * and have kernel automatically instantiate actual device
597 * on-demand. Example:
598 * mknod /path/devnod_name b 1 X # 1 is the rd major
599 * fdisk -l /path/devnod_name
600 * If (X / max_part) was not already created it will be created
601 * dynamically.
Nick Piggin9db55792008-02-08 04:19:49 -0800602 */
Laurent Vivierd7853d12008-04-30 00:55:06 -0700603
Nick Piggin9db55792008-02-08 04:19:49 -0800604 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
605 return -EIO;
606
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200607 if (unlikely(!max_part))
608 max_part = 1;
609
610 for (i = 0; i < rd_nr; i++) {
Nick Piggin9db55792008-02-08 04:19:49 -0800611 brd = brd_alloc(i);
612 if (!brd)
613 goto out_free;
614 list_add_tail(&brd->brd_list, &brd_devices);
615 }
616
617 /* point of no return */
618
619 list_for_each_entry(brd, &brd_devices, brd_list)
620 add_disk(brd->brd_disk);
621
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200622 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
Nick Piggin9db55792008-02-08 04:19:49 -0800623 THIS_MODULE, brd_probe, NULL, NULL);
624
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200625 pr_info("brd: module loaded\n");
Nick Piggin9db55792008-02-08 04:19:49 -0800626 return 0;
627
628out_free:
629 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
630 list_del(&brd->brd_list);
631 brd_free(brd);
632 }
Akinobu Mitac82f2962008-08-20 14:09:09 -0700633 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
Nick Piggin9db55792008-02-08 04:19:49 -0800634
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200635 pr_info("brd: module NOT loaded !!!\n");
Nick Piggin9db55792008-02-08 04:19:49 -0800636 return -ENOMEM;
637}
638
639static void __exit brd_exit(void)
640{
Nick Piggin9db55792008-02-08 04:19:49 -0800641 struct brd_device *brd, *next;
642
Nick Piggin9db55792008-02-08 04:19:49 -0800643 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
644 brd_del_one(brd);
645
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200646 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
Nick Piggin9db55792008-02-08 04:19:49 -0800647 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200648
649 pr_info("brd: module unloaded\n");
Nick Piggin9db55792008-02-08 04:19:49 -0800650}
651
652module_init(brd_init);
653module_exit(brd_exit);
654