blob: f9ab74505e69370f5214c261b0428a51f3e9e4c3 [file] [log] [blame]
Nick Piggin9db55792008-02-08 04:19:49 -08001/*
2 * Ram backed block device driver.
3 *
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
6 *
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
9 */
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/major.h>
15#include <linux/blkdev.h>
16#include <linux/bio.h>
17#include <linux/highmem.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020018#include <linux/mutex.h>
Nick Piggin9db55792008-02-08 04:19:49 -080019#include <linux/radix-tree.h>
Al Viroff01bb482011-09-16 02:31:11 -040020#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Nick Piggin9db55792008-02-08 04:19:49 -080022
23#include <asm/uaccess.h>
24
25#define SECTOR_SHIFT 9
26#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
27#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
28
29/*
30 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
31 * the pages containing the block device's contents. A brd page's ->index is
32 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
33 * with, the kernel's pagecache or buffer cache (which sit above our block
34 * device).
35 */
36struct brd_device {
37 int brd_number;
Nick Piggin9db55792008-02-08 04:19:49 -080038
39 struct request_queue *brd_queue;
40 struct gendisk *brd_disk;
41 struct list_head brd_list;
42
43 /*
44 * Backing store of pages and lock to protect it. This is the contents
45 * of the block device.
46 */
47 spinlock_t brd_lock;
48 struct radix_tree_root brd_pages;
49};
50
51/*
52 * Look up and return a brd's page for a given sector.
53 */
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020054static DEFINE_MUTEX(brd_mutex);
Nick Piggin9db55792008-02-08 04:19:49 -080055static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
56{
57 pgoff_t idx;
58 struct page *page;
59
60 /*
61 * The page lifetime is protected by the fact that we have opened the
62 * device node -- brd pages will never be deleted under us, so we
63 * don't need any further locking or refcounting.
64 *
65 * This is strictly true for the radix-tree nodes as well (ie. we
66 * don't actually need the rcu_read_lock()), however that is not a
67 * documented feature of the radix-tree API so it is better to be
68 * safe here (we don't have total exclusion from radix tree updates
69 * here, only deletes).
70 */
71 rcu_read_lock();
72 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
73 page = radix_tree_lookup(&brd->brd_pages, idx);
74 rcu_read_unlock();
75
76 BUG_ON(page && page->index != idx);
77
78 return page;
79}
80
81/*
82 * Look up and return a brd's page for a given sector.
83 * If one does not exist, allocate an empty page, and insert that. Then
84 * return it.
85 */
86static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
87{
88 pgoff_t idx;
89 struct page *page;
Nick Piggin75acb9c2008-02-08 04:19:50 -080090 gfp_t gfp_flags;
Nick Piggin9db55792008-02-08 04:19:49 -080091
92 page = brd_lookup_page(brd, sector);
93 if (page)
94 return page;
95
96 /*
97 * Must use NOIO because we don't want to recurse back into the
98 * block or filesystem layers from page reclaim.
Nick Piggin75acb9c2008-02-08 04:19:50 -080099 *
Matthew Wilcoxa7a97fc2015-02-16 15:59:41 -0800100 * Cannot support DAX and highmem, because our ->direct_access
101 * routine for DAX must return memory that is always addressable.
102 * If DAX was reworked to use pfns and kmap throughout, this
Nick Piggin75acb9c2008-02-08 04:19:50 -0800103 * restriction might be able to be lifted.
Nick Piggin9db55792008-02-08 04:19:49 -0800104 */
Nick Piggin75acb9c2008-02-08 04:19:50 -0800105 gfp_flags = GFP_NOIO | __GFP_ZERO;
Matthew Wilcoxa7a97fc2015-02-16 15:59:41 -0800106#ifndef CONFIG_BLK_DEV_RAM_DAX
Nick Piggin75acb9c2008-02-08 04:19:50 -0800107 gfp_flags |= __GFP_HIGHMEM;
108#endif
Petr Tesarik26defe32008-04-22 05:36:52 +0200109 page = alloc_page(gfp_flags);
Nick Piggin9db55792008-02-08 04:19:49 -0800110 if (!page)
111 return NULL;
112
113 if (radix_tree_preload(GFP_NOIO)) {
114 __free_page(page);
115 return NULL;
116 }
117
118 spin_lock(&brd->brd_lock);
119 idx = sector >> PAGE_SECTORS_SHIFT;
Brian Behlendorfdfd20b22013-05-24 15:55:28 -0700120 page->index = idx;
Nick Piggin9db55792008-02-08 04:19:49 -0800121 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
122 __free_page(page);
123 page = radix_tree_lookup(&brd->brd_pages, idx);
124 BUG_ON(!page);
125 BUG_ON(page->index != idx);
Brian Behlendorfdfd20b22013-05-24 15:55:28 -0700126 }
Nick Piggin9db55792008-02-08 04:19:49 -0800127 spin_unlock(&brd->brd_lock);
128
129 radix_tree_preload_end();
130
131 return page;
132}
133
Nick Pigginb7c33572010-05-26 15:41:14 +0200134static void brd_free_page(struct brd_device *brd, sector_t sector)
135{
136 struct page *page;
137 pgoff_t idx;
138
139 spin_lock(&brd->brd_lock);
140 idx = sector >> PAGE_SECTORS_SHIFT;
141 page = radix_tree_delete(&brd->brd_pages, idx);
142 spin_unlock(&brd->brd_lock);
143 if (page)
144 __free_page(page);
145}
146
147static void brd_zero_page(struct brd_device *brd, sector_t sector)
148{
149 struct page *page;
150
151 page = brd_lookup_page(brd, sector);
152 if (page)
153 clear_highpage(page);
154}
155
Nick Piggin9db55792008-02-08 04:19:49 -0800156/*
157 * Free all backing store pages and radix tree. This must only be called when
158 * there are no other users of the device.
159 */
160#define FREE_BATCH 16
161static void brd_free_pages(struct brd_device *brd)
162{
163 unsigned long pos = 0;
164 struct page *pages[FREE_BATCH];
165 int nr_pages;
166
167 do {
168 int i;
169
170 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
171 (void **)pages, pos, FREE_BATCH);
172
173 for (i = 0; i < nr_pages; i++) {
174 void *ret;
175
176 BUG_ON(pages[i]->index < pos);
177 pos = pages[i]->index;
178 ret = radix_tree_delete(&brd->brd_pages, pos);
179 BUG_ON(!ret || ret != pages[i]);
180 __free_page(pages[i]);
181 }
182
183 pos++;
184
185 /*
186 * This assumes radix_tree_gang_lookup always returns as
187 * many pages as possible. If the radix-tree code changes,
188 * so will this have to.
189 */
190 } while (nr_pages == FREE_BATCH);
191}
192
193/*
194 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
195 */
196static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
197{
198 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
199 size_t copy;
200
201 copy = min_t(size_t, n, PAGE_SIZE - offset);
202 if (!brd_insert_page(brd, sector))
Matthew Wilcox96f8d8e2014-06-04 16:07:50 -0700203 return -ENOSPC;
Nick Piggin9db55792008-02-08 04:19:49 -0800204 if (copy < n) {
205 sector += copy >> SECTOR_SHIFT;
206 if (!brd_insert_page(brd, sector))
Matthew Wilcox96f8d8e2014-06-04 16:07:50 -0700207 return -ENOSPC;
Nick Piggin9db55792008-02-08 04:19:49 -0800208 }
209 return 0;
210}
211
Nick Pigginb7c33572010-05-26 15:41:14 +0200212static void discard_from_brd(struct brd_device *brd,
213 sector_t sector, size_t n)
214{
215 while (n >= PAGE_SIZE) {
216 /*
217 * Don't want to actually discard pages here because
218 * re-allocating the pages can result in writeback
219 * deadlocks under heavy load.
220 */
221 if (0)
222 brd_free_page(brd, sector);
223 else
224 brd_zero_page(brd, sector);
225 sector += PAGE_SIZE >> SECTOR_SHIFT;
226 n -= PAGE_SIZE;
227 }
228}
229
Nick Piggin9db55792008-02-08 04:19:49 -0800230/*
231 * Copy n bytes from src to the brd starting at sector. Does not sleep.
232 */
233static void copy_to_brd(struct brd_device *brd, const void *src,
234 sector_t sector, size_t n)
235{
236 struct page *page;
237 void *dst;
238 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
239 size_t copy;
240
241 copy = min_t(size_t, n, PAGE_SIZE - offset);
242 page = brd_lookup_page(brd, sector);
243 BUG_ON(!page);
244
Cong Wangcfd80052011-11-25 23:14:18 +0800245 dst = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800246 memcpy(dst + offset, src, copy);
Cong Wangcfd80052011-11-25 23:14:18 +0800247 kunmap_atomic(dst);
Nick Piggin9db55792008-02-08 04:19:49 -0800248
249 if (copy < n) {
250 src += copy;
251 sector += copy >> SECTOR_SHIFT;
252 copy = n - copy;
253 page = brd_lookup_page(brd, sector);
254 BUG_ON(!page);
255
Cong Wangcfd80052011-11-25 23:14:18 +0800256 dst = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800257 memcpy(dst, src, copy);
Cong Wangcfd80052011-11-25 23:14:18 +0800258 kunmap_atomic(dst);
Nick Piggin9db55792008-02-08 04:19:49 -0800259 }
260}
261
262/*
263 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
264 */
265static void copy_from_brd(void *dst, struct brd_device *brd,
266 sector_t sector, size_t n)
267{
268 struct page *page;
269 void *src;
270 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
271 size_t copy;
272
273 copy = min_t(size_t, n, PAGE_SIZE - offset);
274 page = brd_lookup_page(brd, sector);
275 if (page) {
Cong Wangcfd80052011-11-25 23:14:18 +0800276 src = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800277 memcpy(dst, src + offset, copy);
Cong Wangcfd80052011-11-25 23:14:18 +0800278 kunmap_atomic(src);
Nick Piggin9db55792008-02-08 04:19:49 -0800279 } else
280 memset(dst, 0, copy);
281
282 if (copy < n) {
283 dst += copy;
284 sector += copy >> SECTOR_SHIFT;
285 copy = n - copy;
286 page = brd_lookup_page(brd, sector);
287 if (page) {
Cong Wangcfd80052011-11-25 23:14:18 +0800288 src = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800289 memcpy(dst, src, copy);
Cong Wangcfd80052011-11-25 23:14:18 +0800290 kunmap_atomic(src);
Nick Piggin9db55792008-02-08 04:19:49 -0800291 } else
292 memset(dst, 0, copy);
293 }
294}
295
296/*
297 * Process a single bvec of a bio.
298 */
299static int brd_do_bvec(struct brd_device *brd, struct page *page,
300 unsigned int len, unsigned int off, int rw,
301 sector_t sector)
302{
303 void *mem;
304 int err = 0;
305
306 if (rw != READ) {
307 err = copy_to_brd_setup(brd, sector, len);
308 if (err)
309 goto out;
310 }
311
Cong Wangcfd80052011-11-25 23:14:18 +0800312 mem = kmap_atomic(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800313 if (rw == READ) {
314 copy_from_brd(mem + off, brd, sector, len);
315 flush_dcache_page(page);
Nick Pigginc2572f22009-04-15 10:32:07 +0200316 } else {
317 flush_dcache_page(page);
Nick Piggin9db55792008-02-08 04:19:49 -0800318 copy_to_brd(brd, mem + off, sector, len);
Nick Pigginc2572f22009-04-15 10:32:07 +0200319 }
Cong Wangcfd80052011-11-25 23:14:18 +0800320 kunmap_atomic(mem);
Nick Piggin9db55792008-02-08 04:19:49 -0800321
322out:
323 return err;
324}
325
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200326static void brd_make_request(struct request_queue *q, struct bio *bio)
Nick Piggin9db55792008-02-08 04:19:49 -0800327{
328 struct block_device *bdev = bio->bi_bdev;
329 struct brd_device *brd = bdev->bd_disk->private_data;
330 int rw;
Kent Overstreet79886132013-11-23 17:19:00 -0800331 struct bio_vec bvec;
Nick Piggin9db55792008-02-08 04:19:49 -0800332 sector_t sector;
Kent Overstreet79886132013-11-23 17:19:00 -0800333 struct bvec_iter iter;
Nick Piggin9db55792008-02-08 04:19:49 -0800334
Kent Overstreet4f024f32013-10-11 15:44:27 -0700335 sector = bio->bi_iter.bi_sector;
Kent Overstreetf73a1c72012-09-25 15:05:12 -0700336 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200337 goto io_error;
Nick Piggin9db55792008-02-08 04:19:49 -0800338
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200339 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700340 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
Nick Pigginb7c33572010-05-26 15:41:14 +0200341 goto out;
342 }
343
Nick Piggin9db55792008-02-08 04:19:49 -0800344 rw = bio_rw(bio);
345 if (rw == READA)
346 rw = READ;
347
Kent Overstreet79886132013-11-23 17:19:00 -0800348 bio_for_each_segment(bvec, bio, iter) {
349 unsigned int len = bvec.bv_len;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200350 int err;
351
Kent Overstreet79886132013-11-23 17:19:00 -0800352 err = brd_do_bvec(brd, bvec.bv_page, len,
353 bvec.bv_offset, rw, sector);
Nick Piggin9db55792008-02-08 04:19:49 -0800354 if (err)
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200355 goto io_error;
Nick Piggin9db55792008-02-08 04:19:49 -0800356 sector += len >> SECTOR_SHIFT;
357 }
358
359out:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200360 bio_endio(bio);
361 return;
362io_error:
363 bio_io_error(bio);
Nick Piggin9db55792008-02-08 04:19:49 -0800364}
365
Matthew Wilcoxa72132c2014-06-04 16:07:49 -0700366static int brd_rw_page(struct block_device *bdev, sector_t sector,
367 struct page *page, int rw)
368{
369 struct brd_device *brd = bdev->bd_disk->private_data;
370 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
371 page_endio(page, rw & WRITE, err);
372 return err;
373}
374
Matthew Wilcoxa7a97fc2015-02-16 15:59:41 -0800375#ifdef CONFIG_BLK_DEV_RAM_DAX
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200376static long brd_direct_access(struct block_device *bdev, sector_t sector,
377 void **kaddr, unsigned long *pfn, long size)
Nick Piggin75acb9c2008-02-08 04:19:50 -0800378{
379 struct brd_device *brd = bdev->bd_disk->private_data;
380 struct page *page;
381
382 if (!brd)
383 return -ENODEV;
Nick Piggin75acb9c2008-02-08 04:19:50 -0800384 page = brd_insert_page(brd, sector);
385 if (!page)
Matthew Wilcox96f8d8e2014-06-04 16:07:50 -0700386 return -ENOSPC;
Jared Hulbert30afcb42008-04-28 02:13:02 -0700387 *kaddr = page_address(page);
388 *pfn = page_to_pfn(page);
Nick Piggin75acb9c2008-02-08 04:19:50 -0800389
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200390 /*
391 * TODO: If size > PAGE_SIZE, we could look to see if the next page in
392 * the file happens to be mapped to the next page of physical RAM.
393 */
394 return PAGE_SIZE;
Nick Piggin75acb9c2008-02-08 04:19:50 -0800395}
Matthew Wilcoxa7a97fc2015-02-16 15:59:41 -0800396#else
397#define brd_direct_access NULL
Nick Piggin75acb9c2008-02-08 04:19:50 -0800398#endif
399
Al Viro2b9ecd02008-03-02 09:24:35 -0500400static int brd_ioctl(struct block_device *bdev, fmode_t mode,
Nick Piggin9db55792008-02-08 04:19:49 -0800401 unsigned int cmd, unsigned long arg)
402{
403 int error;
Nick Piggin9db55792008-02-08 04:19:49 -0800404 struct brd_device *brd = bdev->bd_disk->private_data;
405
406 if (cmd != BLKFLSBUF)
407 return -ENOTTY;
408
409 /*
410 * ram device BLKFLSBUF has special semantics, we want to actually
411 * release and destroy the ramdisk data.
412 */
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200413 mutex_lock(&brd_mutex);
Nick Piggin9db55792008-02-08 04:19:49 -0800414 mutex_lock(&bdev->bd_mutex);
415 error = -EBUSY;
416 if (bdev->bd_openers <= 1) {
417 /*
Al Viroff01bb482011-09-16 02:31:11 -0400418 * Kill the cache first, so it isn't written back to the
419 * device.
Nick Piggin9db55792008-02-08 04:19:49 -0800420 *
421 * Another thread might instantiate more buffercache here,
422 * but there is not much we can do to close that race.
423 */
Al Viroff01bb482011-09-16 02:31:11 -0400424 kill_bdev(bdev);
Nick Piggin9db55792008-02-08 04:19:49 -0800425 brd_free_pages(brd);
426 error = 0;
427 }
428 mutex_unlock(&bdev->bd_mutex);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +0200429 mutex_unlock(&brd_mutex);
Nick Piggin9db55792008-02-08 04:19:49 -0800430
431 return error;
432}
433
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700434static const struct block_device_operations brd_fops = {
Nick Piggin75acb9c2008-02-08 04:19:50 -0800435 .owner = THIS_MODULE,
Matthew Wilcoxa72132c2014-06-04 16:07:49 -0700436 .rw_page = brd_rw_page,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +0200437 .ioctl = brd_ioctl,
Nick Piggin75acb9c2008-02-08 04:19:50 -0800438 .direct_access = brd_direct_access,
Nick Piggin9db55792008-02-08 04:19:49 -0800439};
440
441/*
442 * And now the modules code and kernel interface.
443 */
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200444static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
Namhyung Kim8892cba2011-05-26 21:06:50 +0200445module_param(rd_nr, int, S_IRUGO);
Nick Piggin9db55792008-02-08 04:19:49 -0800446MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200447
448int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
Namhyung Kim8892cba2011-05-26 21:06:50 +0200449module_param(rd_size, int, S_IRUGO);
Nick Piggin9db55792008-02-08 04:19:49 -0800450MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200451
452static int max_part = 1;
Namhyung Kim8892cba2011-05-26 21:06:50 +0200453module_param(max_part, int, S_IRUGO);
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200454MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
455
Nick Piggin9db55792008-02-08 04:19:49 -0800456MODULE_LICENSE("GPL");
457MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
Nick Pigginefedf512008-06-04 17:18:42 +0200458MODULE_ALIAS("rd");
Nick Piggin9db55792008-02-08 04:19:49 -0800459
460#ifndef MODULE
461/* Legacy boot options - nonmodular */
462static int __init ramdisk_size(char *str)
463{
464 rd_size = simple_strtol(str, NULL, 0);
465 return 1;
466}
Robert P. J. Day1adbee52009-06-10 12:57:08 -0700467__setup("ramdisk_size=", ramdisk_size);
Nick Piggin9db55792008-02-08 04:19:49 -0800468#endif
469
470/*
471 * The device scheme is derived from loop.c. Keep them in synch where possible
472 * (should share code eventually).
473 */
474static LIST_HEAD(brd_devices);
475static DEFINE_MUTEX(brd_devices_mutex);
476
477static struct brd_device *brd_alloc(int i)
478{
479 struct brd_device *brd;
480 struct gendisk *disk;
481
482 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
483 if (!brd)
484 goto out;
485 brd->brd_number = i;
486 spin_lock_init(&brd->brd_lock);
487 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
488
489 brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
490 if (!brd->brd_queue)
491 goto out_free_dev;
Boaz Harroshc8fa3172015-01-07 18:09:38 +0200492
Nick Piggin9db55792008-02-08 04:19:49 -0800493 blk_queue_make_request(brd->brd_queue, brd_make_request);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500494 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
Nick Piggin9db55792008-02-08 04:19:49 -0800495 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
496
Boaz Harroshc8fa3172015-01-07 18:09:38 +0200497 /* This is so fdisk will align partitions on 4k, because of
498 * direct_access API needing 4k alignment, returning a PFN
499 * (This is only a problem on very small devices <= 4M,
500 * otherwise fdisk will align on 1M. Regardless this call
501 * is harmless)
502 */
503 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
504
Nick Pigginb7c33572010-05-26 15:41:14 +0200505 brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600506 blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
Nick Pigginb7c33572010-05-26 15:41:14 +0200507 brd->brd_queue->limits.discard_zeroes_data = 1;
508 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
509
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200510 disk = brd->brd_disk = alloc_disk(max_part);
Nick Piggin9db55792008-02-08 04:19:49 -0800511 if (!disk)
512 goto out_free_queue;
513 disk->major = RAMDISK_MAJOR;
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200514 disk->first_minor = i * max_part;
Nick Piggin9db55792008-02-08 04:19:49 -0800515 disk->fops = &brd_fops;
516 disk->private_data = brd;
517 disk->queue = brd->brd_queue;
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200518 disk->flags = GENHD_FL_EXT_DEVT;
Nick Piggin9db55792008-02-08 04:19:49 -0800519 sprintf(disk->disk_name, "ram%d", i);
520 set_capacity(disk, rd_size * 2);
521
522 return brd;
523
524out_free_queue:
525 blk_cleanup_queue(brd->brd_queue);
526out_free_dev:
527 kfree(brd);
528out:
529 return NULL;
530}
531
532static void brd_free(struct brd_device *brd)
533{
534 put_disk(brd->brd_disk);
535 blk_cleanup_queue(brd->brd_queue);
536 brd_free_pages(brd);
537 kfree(brd);
538}
539
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200540static struct brd_device *brd_init_one(int i, bool *new)
Nick Piggin9db55792008-02-08 04:19:49 -0800541{
542 struct brd_device *brd;
543
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200544 *new = false;
Nick Piggin9db55792008-02-08 04:19:49 -0800545 list_for_each_entry(brd, &brd_devices, brd_list) {
546 if (brd->brd_number == i)
547 goto out;
548 }
549
550 brd = brd_alloc(i);
551 if (brd) {
552 add_disk(brd->brd_disk);
553 list_add_tail(&brd->brd_list, &brd_devices);
554 }
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200555 *new = true;
Nick Piggin9db55792008-02-08 04:19:49 -0800556out:
557 return brd;
558}
559
560static void brd_del_one(struct brd_device *brd)
561{
562 list_del(&brd->brd_list);
563 del_gendisk(brd->brd_disk);
564 brd_free(brd);
565}
566
567static struct kobject *brd_probe(dev_t dev, int *part, void *data)
568{
569 struct brd_device *brd;
570 struct kobject *kobj;
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200571 bool new;
Nick Piggin9db55792008-02-08 04:19:49 -0800572
573 mutex_lock(&brd_devices_mutex);
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200574 brd = brd_init_one(MINOR(dev) / max_part, &new);
Mikulas Patockaa207f592013-10-14 12:13:24 -0400575 kobj = brd ? get_disk(brd->brd_disk) : NULL;
Nick Piggin9db55792008-02-08 04:19:49 -0800576 mutex_unlock(&brd_devices_mutex);
577
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200578 if (new)
579 *part = 0;
580
Nick Piggin9db55792008-02-08 04:19:49 -0800581 return kobj;
582}
583
584static int __init brd_init(void)
585{
Nick Piggin9db55792008-02-08 04:19:49 -0800586 struct brd_device *brd, *next;
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200587 int i;
Nick Piggin9db55792008-02-08 04:19:49 -0800588
589 /*
590 * brd module now has a feature to instantiate underlying device
591 * structure on-demand, provided that there is an access dev node.
Nick Piggin9db55792008-02-08 04:19:49 -0800592 *
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200593 * (1) if rd_nr is specified, create that many upfront. else
594 * it defaults to CONFIG_BLK_DEV_RAM_COUNT
595 * (2) User can further extend brd devices by create dev node themselves
596 * and have kernel automatically instantiate actual device
597 * on-demand. Example:
598 * mknod /path/devnod_name b 1 X # 1 is the rd major
599 * fdisk -l /path/devnod_name
600 * If (X / max_part) was not already created it will be created
601 * dynamically.
Nick Piggin9db55792008-02-08 04:19:49 -0800602 */
Laurent Vivierd7853d12008-04-30 00:55:06 -0700603
Nick Piggin9db55792008-02-08 04:19:49 -0800604 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
605 return -EIO;
606
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200607 if (unlikely(!max_part))
608 max_part = 1;
609
610 for (i = 0; i < rd_nr; i++) {
Nick Piggin9db55792008-02-08 04:19:49 -0800611 brd = brd_alloc(i);
612 if (!brd)
613 goto out_free;
614 list_add_tail(&brd->brd_list, &brd_devices);
615 }
616
617 /* point of no return */
618
619 list_for_each_entry(brd, &brd_devices, brd_list)
620 add_disk(brd->brd_disk);
621
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200622 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
Nick Piggin9db55792008-02-08 04:19:49 -0800623 THIS_MODULE, brd_probe, NULL, NULL);
624
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200625 pr_info("brd: module loaded\n");
Nick Piggin9db55792008-02-08 04:19:49 -0800626 return 0;
627
628out_free:
629 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
630 list_del(&brd->brd_list);
631 brd_free(brd);
632 }
Akinobu Mitac82f2962008-08-20 14:09:09 -0700633 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
Nick Piggin9db55792008-02-08 04:19:49 -0800634
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200635 pr_info("brd: module NOT loaded !!!\n");
Nick Piggin9db55792008-02-08 04:19:49 -0800636 return -ENOMEM;
637}
638
639static void __exit brd_exit(void)
640{
Nick Piggin9db55792008-02-08 04:19:49 -0800641 struct brd_device *brd, *next;
642
Nick Piggin9db55792008-02-08 04:19:49 -0800643 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
644 brd_del_one(brd);
645
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200646 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
Nick Piggin9db55792008-02-08 04:19:49 -0800647 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
Boaz Harrosh937af5e2015-01-07 18:07:56 +0200648
649 pr_info("brd: module unloaded\n");
Nick Piggin9db55792008-02-08 04:19:49 -0800650}
651
652module_init(brd_init);
653module_exit(brd_exit);
654