blob: 287d353e014701f78c2f016d9f05ce644357a2ba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -070014#include <linux/device_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/highmem.h>
16#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/blkpg.h>
Muthu Kumarb502bd12012-03-23 15:01:50 -070020#include <linux/magic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/buffer_head.h>
Al Viroff01bb482011-09-16 02:31:11 -040022#include <linux/swap.h>
Nick Piggin585d3bc2009-02-25 10:44:19 +010023#include <linux/pagevec.h>
David Howells811d7362006-08-29 19:06:09 +010024#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/mpage.h>
26#include <linux/mount.h>
27#include <linux/uio.h>
28#include <linux/namei.h>
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -070029#include <linux/log2.h>
Al Viroff01bb482011-09-16 02:31:11 -040030#include <linux/cleancache.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070031#include <linux/dax.h>
Dan Williamsacc93d32016-05-07 11:40:28 -070032#include <linux/badblocks.h>
Darrick J. Wong25f4c412016-10-11 13:51:11 -070033#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/uaccess.h>
David Howells07f3f052006-09-30 20:52:18 +020035#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37struct bdev_inode {
38 struct block_device bdev;
39 struct inode vfs_inode;
40};
41
Adrian Bunk4c54ac62008-02-18 13:48:31 +010042static const struct address_space_operations def_blk_aops;
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static inline struct bdev_inode *BDEV_I(struct inode *inode)
45{
46 return container_of(inode, struct bdev_inode, vfs_inode);
47}
48
Geert Uytterhoevenff5053f2015-06-26 13:58:32 +020049struct block_device *I_BDEV(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050{
51 return &BDEV_I(inode)->bdev;
52}
Linus Torvalds1da177e2005-04-16 15:20:36 -070053EXPORT_SYMBOL(I_BDEV);
54
Toshi Kani2af3a812016-05-10 10:23:52 -060055void __vfs_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
56{
57 struct va_format vaf;
58 va_list args;
59
60 va_start(args, fmt);
61 vaf.fmt = fmt;
62 vaf.va = &args;
63 printk_ratelimited("%sVFS (%s): %pV\n", prefix, sb->s_id, &vaf);
64 va_end(args);
65}
66
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070067static void bdev_write_inode(struct block_device *bdev)
Christoph Hellwig564f00f2015-01-14 10:42:33 +010068{
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070069 struct inode *inode = bdev->bd_inode;
70 int ret;
71
Christoph Hellwig564f00f2015-01-14 10:42:33 +010072 spin_lock(&inode->i_lock);
73 while (inode->i_state & I_DIRTY) {
74 spin_unlock(&inode->i_lock);
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070075 ret = write_inode_now(inode, true);
76 if (ret) {
77 char name[BDEVNAME_SIZE];
78 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
79 "for block device %s (err=%d).\n",
80 bdevname(bdev, name), ret);
81 }
Christoph Hellwig564f00f2015-01-14 10:42:33 +010082 spin_lock(&inode->i_lock);
83 }
84 spin_unlock(&inode->i_lock);
85}
86
Peter Zijlstraf9a14392007-05-06 14:49:55 -070087/* Kill _all_ buffers and pagecache , dirty or not.. */
Al Viroff01bb482011-09-16 02:31:11 -040088void kill_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
Al Viroff01bb482011-09-16 02:31:11 -040090 struct address_space *mapping = bdev->bd_inode->i_mapping;
91
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080092 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
Peter Zijlstraf9a14392007-05-06 14:49:55 -070093 return;
Al Viroff01bb482011-09-16 02:31:11 -040094
Peter Zijlstraf9a14392007-05-06 14:49:55 -070095 invalidate_bh_lrus();
Al Viroff01bb482011-09-16 02:31:11 -040096 truncate_inode_pages(mapping, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
Al Viroff01bb482011-09-16 02:31:11 -040098EXPORT_SYMBOL(kill_bdev);
99
100/* Invalidate clean unused buffers and pagecache. */
101void invalidate_bdev(struct block_device *bdev)
102{
103 struct address_space *mapping = bdev->bd_inode->i_mapping;
104
Andrey Ryabinin945d0ec2017-05-03 14:56:02 -0700105 if (mapping->nrpages) {
106 invalidate_bh_lrus();
107 lru_add_drain_all(); /* make sure all lru add caches are flushed */
108 invalidate_mapping_pages(mapping, 0, -1);
109 }
Al Viroff01bb482011-09-16 02:31:11 -0400110 /* 99% of the time, we don't need to flush the cleancache on the bdev.
111 * But, for the strange corners, lets be cautious
112 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400113 cleancache_invalidate_inode(mapping);
Al Viroff01bb482011-09-16 02:31:11 -0400114}
115EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
117int set_blocksize(struct block_device *bdev, int size)
118{
119 /* Size must be a power of two, and between 512 and PAGE_SIZE */
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -0700120 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 return -EINVAL;
122
123 /* Size cannot be smaller than the size supported by the device */
Martin K. Petersene1defc42009-05-22 17:17:49 -0400124 if (size < bdev_logical_block_size(bdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 return -EINVAL;
126
127 /* Don't change the size if it is same as current */
128 if (bdev->bd_block_size != size) {
129 sync_blockdev(bdev);
130 bdev->bd_block_size = size;
131 bdev->bd_inode->i_blkbits = blksize_bits(size);
132 kill_bdev(bdev);
133 }
134 return 0;
135}
136
137EXPORT_SYMBOL(set_blocksize);
138
139int sb_set_blocksize(struct super_block *sb, int size)
140{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 if (set_blocksize(sb->s_bdev, size))
142 return 0;
143 /* If we get here, we know size is power of two
144 * and it's value is between 512 and PAGE_SIZE */
145 sb->s_blocksize = size;
Coywolf Qi Hunt38885bd2006-03-24 03:18:05 -0800146 sb->s_blocksize_bits = blksize_bits(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 return sb->s_blocksize;
148}
149
150EXPORT_SYMBOL(sb_set_blocksize);
151
152int sb_min_blocksize(struct super_block *sb, int size)
153{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400154 int minsize = bdev_logical_block_size(sb->s_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 if (size < minsize)
156 size = minsize;
157 return sb_set_blocksize(sb, size);
158}
159
160EXPORT_SYMBOL(sb_min_blocksize);
161
162static int
163blkdev_get_block(struct inode *inode, sector_t iblock,
164 struct buffer_head *bh, int create)
165{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 bh->b_bdev = I_BDEV(inode);
167 bh->b_blocknr = iblock;
168 set_buffer_mapped(bh);
169 return 0;
170}
171
Dan Williams4ebb16c2015-10-28 07:48:19 +0900172static struct inode *bdev_file_inode(struct file *file)
173{
174 return file->f_mapping->host;
175}
176
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800177static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700178blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800179{
180 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +0900181 struct inode *inode = bdev_file_inode(file);
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800182
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700183 return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter,
Jens Axboefe0f07d2015-04-15 17:05:48 -0600184 blkdev_get_block, NULL, NULL,
185 DIO_SKIP_DIO_COUNT);
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800186}
187
Jan Kara5cee5812009-04-27 16:43:51 +0200188int __sync_blockdev(struct block_device *bdev, int wait)
189{
190 if (!bdev)
191 return 0;
192 if (!wait)
193 return filemap_flush(bdev->bd_inode->i_mapping);
194 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
195}
196
Nick Piggin585d3bc2009-02-25 10:44:19 +0100197/*
198 * Write out and wait upon all the dirty data associated with a block
199 * device via its mapping. Does not take the superblock lock.
200 */
201int sync_blockdev(struct block_device *bdev)
202{
Jan Kara5cee5812009-04-27 16:43:51 +0200203 return __sync_blockdev(bdev, 1);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100204}
205EXPORT_SYMBOL(sync_blockdev);
206
207/*
208 * Write out and wait upon all dirty data associated with this
209 * device. Filesystem data as well as the underlying block
210 * device. Takes the superblock lock.
211 */
212int fsync_bdev(struct block_device *bdev)
213{
214 struct super_block *sb = get_super(bdev);
215 if (sb) {
Jan Kara60b06802009-04-27 16:43:53 +0200216 int res = sync_filesystem(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100217 drop_super(sb);
218 return res;
219 }
220 return sync_blockdev(bdev);
221}
Al Viro47e44912009-04-01 07:07:16 -0400222EXPORT_SYMBOL(fsync_bdev);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100223
224/**
225 * freeze_bdev -- lock a filesystem and force it into a consistent state
226 * @bdev: blockdevice to lock
227 *
Nick Piggin585d3bc2009-02-25 10:44:19 +0100228 * If a superblock is found on this device, we take the s_umount semaphore
229 * on it to make sure nobody unmounts until the snapshot creation is done.
230 * The reference counter (bd_fsfreeze_count) guarantees that only the last
231 * unfreeze process can unfreeze the frozen filesystem actually when multiple
232 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
233 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
234 * actually.
235 */
236struct super_block *freeze_bdev(struct block_device *bdev)
237{
238 struct super_block *sb;
239 int error = 0;
240
241 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200242 if (++bdev->bd_fsfreeze_count > 1) {
243 /*
244 * We don't even need to grab a reference - the first call
245 * to freeze_bdev grab an active reference and only the last
246 * thaw_bdev drops it.
247 */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100248 sb = get_super(bdev);
Andrey Ryabinin5bb53c02016-08-23 18:55:31 +0300249 if (sb)
250 drop_super(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100251 mutex_unlock(&bdev->bd_fsfreeze_mutex);
252 return sb;
253 }
Nick Piggin585d3bc2009-02-25 10:44:19 +0100254
Christoph Hellwig45042302009-08-03 23:28:35 +0200255 sb = get_active_super(bdev);
256 if (!sb)
257 goto out;
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600258 if (sb->s_op->freeze_super)
259 error = sb->s_op->freeze_super(sb);
260 else
261 error = freeze_super(sb);
Josef Bacik18e9e512010-03-23 10:34:56 -0400262 if (error) {
263 deactivate_super(sb);
264 bdev->bd_fsfreeze_count--;
Christoph Hellwig45042302009-08-03 23:28:35 +0200265 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Josef Bacik18e9e512010-03-23 10:34:56 -0400266 return ERR_PTR(error);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100267 }
Josef Bacik18e9e512010-03-23 10:34:56 -0400268 deactivate_super(sb);
Christoph Hellwig45042302009-08-03 23:28:35 +0200269 out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100270 sync_blockdev(bdev);
271 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig4fadd7b2009-08-03 23:28:06 +0200272 return sb; /* thaw_bdev releases s->s_umount */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100273}
274EXPORT_SYMBOL(freeze_bdev);
275
276/**
277 * thaw_bdev -- unlock filesystem
278 * @bdev: blockdevice to unlock
279 * @sb: associated superblock
280 *
281 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
282 */
283int thaw_bdev(struct block_device *bdev, struct super_block *sb)
284{
Christoph Hellwig45042302009-08-03 23:28:35 +0200285 int error = -EINVAL;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100286
287 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200288 if (!bdev->bd_fsfreeze_count)
Josef Bacik18e9e512010-03-23 10:34:56 -0400289 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100290
Christoph Hellwig45042302009-08-03 23:28:35 +0200291 error = 0;
292 if (--bdev->bd_fsfreeze_count > 0)
Josef Bacik18e9e512010-03-23 10:34:56 -0400293 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100294
Christoph Hellwig45042302009-08-03 23:28:35 +0200295 if (!sb)
Josef Bacik18e9e512010-03-23 10:34:56 -0400296 goto out;
Christoph Hellwig45042302009-08-03 23:28:35 +0200297
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600298 if (sb->s_op->thaw_super)
299 error = sb->s_op->thaw_super(sb);
300 else
301 error = thaw_super(sb);
Pierre Morel997198b2016-10-04 10:53:40 +0200302 if (error)
Josef Bacik18e9e512010-03-23 10:34:56 -0400303 bdev->bd_fsfreeze_count++;
Josef Bacik18e9e512010-03-23 10:34:56 -0400304out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100305 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Pierre Morel997198b2016-10-04 10:53:40 +0200306 return error;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100307}
308EXPORT_SYMBOL(thaw_bdev);
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
311{
312 return block_write_full_page(page, blkdev_get_block, wbc);
313}
314
315static int blkdev_readpage(struct file * file, struct page * page)
316{
317 return block_read_full_page(page, blkdev_get_block);
318}
319
Akinobu Mita447f05b2014-10-09 15:26:58 -0700320static int blkdev_readpages(struct file *file, struct address_space *mapping,
321 struct list_head *pages, unsigned nr_pages)
322{
323 return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
324}
325
Nick Piggin6272b5a2007-10-16 01:25:04 -0700326static int blkdev_write_begin(struct file *file, struct address_space *mapping,
327 loff_t pos, unsigned len, unsigned flags,
328 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329{
Christoph Hellwig155130a2010-06-04 11:29:58 +0200330 return block_write_begin(mapping, pos, len, flags, pagep,
331 blkdev_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332}
333
Nick Piggin6272b5a2007-10-16 01:25:04 -0700334static int blkdev_write_end(struct file *file, struct address_space *mapping,
335 loff_t pos, unsigned len, unsigned copied,
336 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
Nick Piggin6272b5a2007-10-16 01:25:04 -0700338 int ret;
339 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
340
341 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300342 put_page(page);
Nick Piggin6272b5a2007-10-16 01:25:04 -0700343
344 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345}
346
347/*
348 * private llseek:
Al Viro496ad9a2013-01-23 17:07:38 -0500349 * for a block special file file_inode(file)->i_size is zero
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 * so we compute the size by hand (just as in block_read/write above)
351 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800352static loff_t block_llseek(struct file *file, loff_t offset, int whence)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900354 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 loff_t retval;
356
Al Viro59551022016-01-22 15:40:57 -0500357 inode_lock(bd_inode);
Al Viro5d48f3a2013-06-23 21:34:45 +0400358 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
Al Viro59551022016-01-22 15:40:57 -0500359 inode_unlock(bd_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 return retval;
361}
362
Josef Bacik02c24a82011-07-16 20:44:56 -0400363int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900365 struct inode *bd_inode = bdev_file_inode(filp);
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400366 struct block_device *bdev = I_BDEV(bd_inode);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100367 int error;
Rafael J. Wysockida5aa862011-08-02 02:17:48 +0200368
369 error = filemap_write_and_wait_range(filp->f_mapping, start, end);
370 if (error)
371 return error;
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100372
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400373 /*
374 * There is no need to serialise calls to blkdev_issue_flush with
375 * i_mutex and doing so causes performance issues with concurrent
376 * O_SYNC writers to a block device.
377 */
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200378 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100379 if (error == -EOPNOTSUPP)
380 error = 0;
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400381
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100382 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383}
Andrew Mortonb1dd3b22010-04-06 14:35:00 -0700384EXPORT_SYMBOL(blkdev_fsync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700386/**
387 * bdev_read_page() - Start reading a page from a block device
388 * @bdev: The device to read the page from
389 * @sector: The offset on the device to read the page to (need not be aligned)
390 * @page: The page to read
391 *
392 * On entry, the page should be locked. It will be unlocked when the page
393 * has been read. If the block driver implements rw_page synchronously,
394 * that will be true on exit from this function, but it need not be.
395 *
396 * Errors returned by this function are usually "soft", eg out of memory, or
397 * queue full; callers should try a different route to read this page rather
398 * than propagate an error back up the stack.
399 *
400 * Return: negative errno if an error occurs, 0 if submission was successful.
401 */
402int bdev_read_page(struct block_device *bdev, sector_t sector,
403 struct page *page)
404{
405 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc92015-11-19 13:29:28 -0800406 int result = -EOPNOTSUPP;
407
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400408 if (!ops->rw_page || bdev_get_integrity(bdev))
Dan Williams2e6edc92015-11-19 13:29:28 -0800409 return result;
410
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100411 result = blk_queue_enter(bdev->bd_queue, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800412 if (result)
413 return result;
Jens Axboec11f0c02016-08-05 08:11:04 -0600414 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800415 blk_queue_exit(bdev->bd_queue);
416 return result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700417}
418EXPORT_SYMBOL_GPL(bdev_read_page);
419
420/**
421 * bdev_write_page() - Start writing a page to a block device
422 * @bdev: The device to write the page to
423 * @sector: The offset on the device to write the page to (need not be aligned)
424 * @page: The page to write
425 * @wbc: The writeback_control for the write
426 *
427 * On entry, the page should be locked and not currently under writeback.
428 * On exit, if the write started successfully, the page will be unlocked and
429 * under writeback. If the write failed already (eg the driver failed to
430 * queue the page to the device), the page will still be locked. If the
431 * caller is a ->writepage implementation, it will need to unlock the page.
432 *
433 * Errors returned by this function are usually "soft", eg out of memory, or
434 * queue full; callers should try a different route to write this page rather
435 * than propagate an error back up the stack.
436 *
437 * Return: negative errno if an error occurs, 0 if submission was successful.
438 */
439int bdev_write_page(struct block_device *bdev, sector_t sector,
440 struct page *page, struct writeback_control *wbc)
441{
442 int result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700443 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc92015-11-19 13:29:28 -0800444
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400445 if (!ops->rw_page || bdev_get_integrity(bdev))
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700446 return -EOPNOTSUPP;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100447 result = blk_queue_enter(bdev->bd_queue, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800448 if (result)
449 return result;
450
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700451 set_page_writeback(page);
Jens Axboec11f0c02016-08-05 08:11:04 -0600452 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
Matthew Wilcox133ca5c2017-10-13 15:58:15 -0700453 if (result) {
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700454 end_page_writeback(page);
Matthew Wilcox133ca5c2017-10-13 15:58:15 -0700455 } else {
456 clean_page_buffers(page);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700457 unlock_page(page);
Matthew Wilcox133ca5c2017-10-13 15:58:15 -0700458 }
Dan Williams2e6edc92015-11-19 13:29:28 -0800459 blk_queue_exit(bdev->bd_queue);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700460 return result;
461}
462EXPORT_SYMBOL_GPL(bdev_write_page);
463
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200464/**
465 * bdev_direct_access() - Get the address for directly-accessibly memory
466 * @bdev: The device containing the memory
Dan Williamsb2e0d162016-01-15 16:55:59 -0800467 * @dax: control and output parameters for ->direct_access
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200468 *
469 * If a block device is made up of directly addressable memory, this function
470 * will tell the caller the PFN and the address of the memory. The address
471 * may be directly dereferenced within the kernel without the need to call
472 * ioremap(), kmap() or similar. The PFN is suitable for inserting into
473 * page tables.
474 *
475 * Return: negative errno if an error occurs, otherwise the number of bytes
476 * accessible at this address.
477 */
Dan Williamsb2e0d162016-01-15 16:55:59 -0800478long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200479{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800480 sector_t sector = dax->sector;
481 long avail, size = dax->size;
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200482 const struct block_device_operations *ops = bdev->bd_disk->fops;
483
Matthew Wilcox43c3dd02015-07-03 10:40:43 -0400484 /*
485 * The device driver is allowed to sleep, in order to make the
486 * memory directly accessible.
487 */
488 might_sleep();
489
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200490 if (size < 0)
491 return size;
Toshi Kani163d4ba2016-06-23 17:05:50 -0400492 if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200493 return -EOPNOTSUPP;
494 if ((sector + DIV_ROUND_UP(size, 512)) >
495 part_nr_sects_read(bdev->bd_part))
496 return -ERANGE;
497 sector += get_start_sect(bdev);
498 if (sector % (PAGE_SIZE / 512))
499 return -EINVAL;
Dan Williams0a70bd42016-02-24 14:02:11 -0800500 avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200501 if (!avail)
502 return -ERANGE;
Dan Williamsfe683ad2016-01-15 16:55:56 -0800503 if (avail > 0 && avail & ~PAGE_MASK)
504 return -ENXIO;
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200505 return min(avail, size);
506}
507EXPORT_SYMBOL_GPL(bdev_direct_access);
508
Toshi Kani2d96afc2016-05-10 10:23:53 -0600509/**
510 * bdev_dax_supported() - Check if the device supports dax for filesystem
511 * @sb: The superblock of the device
512 * @blocksize: The block size of the device
513 *
514 * This is a library function for filesystems to check if the block device
515 * can be mounted with dax option.
516 *
517 * Return: negative errno if unsupported, 0 if supported.
518 */
519int bdev_dax_supported(struct super_block *sb, int blocksize)
520{
521 struct blk_dax_ctl dax = {
522 .sector = 0,
523 .size = PAGE_SIZE,
524 };
525 int err;
526
527 if (blocksize != PAGE_SIZE) {
528 vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
529 return -EINVAL;
530 }
531
532 err = bdev_direct_access(sb->s_bdev, &dax);
533 if (err < 0) {
534 switch (err) {
535 case -EOPNOTSUPP:
536 vfs_msg(sb, KERN_ERR,
537 "error: device does not support dax");
538 break;
539 case -EINVAL:
540 vfs_msg(sb, KERN_ERR,
541 "error: unaligned partition for dax");
542 break;
543 default:
544 vfs_msg(sb, KERN_ERR,
545 "error: dax access failed (%d)", err);
546 }
547 return err;
548 }
549
550 return 0;
551}
552EXPORT_SYMBOL_GPL(bdev_dax_supported);
553
Toshi Kania8078b12016-05-10 10:23:57 -0600554/**
555 * bdev_dax_capable() - Return if the raw device is capable for dax
556 * @bdev: The device for raw block device access
557 */
558bool bdev_dax_capable(struct block_device *bdev)
559{
Toshi Kania8078b12016-05-10 10:23:57 -0600560 struct blk_dax_ctl dax = {
561 .size = PAGE_SIZE,
562 };
563
564 if (!IS_ENABLED(CONFIG_FS_DAX))
565 return false;
566
567 dax.sector = 0;
568 if (bdev_direct_access(bdev, &dax) < 0)
569 return false;
570
571 dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512);
572 if (bdev_direct_access(bdev, &dax) < 0)
573 return false;
574
Toshi Kania8078b12016-05-10 10:23:57 -0600575 return true;
576}
577
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578/*
579 * pseudo-fs
580 */
581
582static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
Christoph Lametere18b8902006-12-06 20:33:20 -0800583static struct kmem_cache * bdev_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585static struct inode *bdev_alloc_inode(struct super_block *sb)
586{
Christoph Lametere94b1762006-12-06 20:33:17 -0800587 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 if (!ei)
589 return NULL;
590 return &ei->vfs_inode;
591}
592
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100593static void bdev_i_callback(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100595 struct inode *inode = container_of(head, struct inode, i_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 struct bdev_inode *bdi = BDEV_I(inode);
597
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 kmem_cache_free(bdev_cachep, bdi);
599}
600
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100601static void bdev_destroy_inode(struct inode *inode)
602{
603 call_rcu(&inode->i_rcu, bdev_i_callback);
604}
605
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700606static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607{
608 struct bdev_inode *ei = (struct bdev_inode *) foo;
609 struct block_device *bdev = &ei->bdev;
610
Christoph Lametera35afb82007-05-16 22:10:57 -0700611 memset(bdev, 0, sizeof(*bdev));
612 mutex_init(&bdev->bd_mutex);
Christoph Lametera35afb82007-05-16 22:10:57 -0700613 INIT_LIST_HEAD(&bdev->bd_list);
Tejun Heo49731ba2011-01-14 18:43:57 +0100614#ifdef CONFIG_SYSFS
615 INIT_LIST_HEAD(&bdev->bd_holder_disks);
616#endif
Christoph Lametera35afb82007-05-16 22:10:57 -0700617 inode_init_once(&ei->vfs_inode);
Takashi Satofcccf502009-01-09 16:40:59 -0800618 /* Initialize mutex for freeze. */
619 mutex_init(&bdev->bd_fsfreeze_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621
Al Virob57922d2010-06-07 14:34:48 -0400622static void bdev_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
624 struct block_device *bdev = &BDEV_I(inode)->bdev;
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700625 truncate_inode_pages_final(&inode->i_data);
Al Virob57922d2010-06-07 14:34:48 -0400626 invalidate_inode_buffers(inode); /* is it needed here? */
Jan Karadbd57682012-05-03 14:48:02 +0200627 clear_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 spin_lock(&bdev_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 list_del_init(&bdev->bd_list);
630 spin_unlock(&bdev_lock);
Jan Kara84972a92017-02-02 15:56:52 +0100631 if (bdev->bd_bdi != &noop_backing_dev_info)
632 bdi_put(bdev->bd_bdi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633}
634
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -0800635static const struct super_operations bdev_sops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 .statfs = simple_statfs,
637 .alloc_inode = bdev_alloc_inode,
638 .destroy_inode = bdev_destroy_inode,
639 .drop_inode = generic_delete_inode,
Al Virob57922d2010-06-07 14:34:48 -0400640 .evict_inode = bdev_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641};
642
Al Viro51139ad2010-07-25 23:47:46 +0400643static struct dentry *bd_mount(struct file_system_type *fs_type,
644 int flags, const char *dev_name, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
Shaohua Li3684aa72016-02-22 15:27:40 -0700646 struct dentry *dent;
647 dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
Vegard Nossume9e5e3f2016-08-22 12:47:43 +0200648 if (!IS_ERR(dent))
Shaohua Li3684aa72016-02-22 15:27:40 -0700649 dent->d_sb->s_iflags |= SB_I_CGROUPWB;
650 return dent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651}
652
653static struct file_system_type bd_type = {
654 .name = "bdev",
Al Viro51139ad2010-07-25 23:47:46 +0400655 .mount = bd_mount,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 .kill_sb = kill_anon_super,
657};
658
Tejun Heoa212b102015-05-22 17:13:33 -0400659struct super_block *blockdev_superblock __read_mostly;
660EXPORT_SYMBOL_GPL(blockdev_superblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
662void __init bdev_cache_init(void)
663{
664 int err;
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300665 static struct vfsmount *bd_mnt;
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
Paul Jacksonfffb60f2006-03-24 03:16:06 -0800668 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
Vladimir Davydov5d097052016-01-14 15:18:21 -0800669 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
Paul Mundt20c2df82007-07-20 10:11:58 +0900670 init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 err = register_filesystem(&bd_type);
672 if (err)
673 panic("Cannot register bdev pseudo-fs");
674 bd_mnt = kern_mount(&bd_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 if (IS_ERR(bd_mnt))
676 panic("Cannot create bdev pseudo-fs");
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300677 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
680/*
681 * Most likely _very_ bad one - but then it's hardly critical for small
682 * /dev and can be fixed when somebody will need really large one.
683 * Keep in mind that it will be fed through icache hash function too.
684 */
685static inline unsigned long hash(dev_t dev)
686{
687 return MAJOR(dev)+MINOR(dev);
688}
689
690static int bdev_test(struct inode *inode, void *data)
691{
692 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
693}
694
695static int bdev_set(struct inode *inode, void *data)
696{
697 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
698 return 0;
699}
700
701static LIST_HEAD(all_bdevs);
702
Jan Kara9507b392017-02-02 15:56:49 +0100703/*
704 * If there is a bdev inode for this device, unhash it so that it gets evicted
705 * as soon as last inode reference is dropped.
706 */
707void bdev_unhash_inode(dev_t dev)
708{
709 struct inode *inode;
710
711 inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev);
712 if (inode) {
713 remove_inode_hash(inode);
714 iput(inode);
715 }
716}
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718struct block_device *bdget(dev_t dev)
719{
720 struct block_device *bdev;
721 struct inode *inode;
722
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800723 inode = iget5_locked(blockdev_superblock, hash(dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 bdev_test, bdev_set, &dev);
725
726 if (!inode)
727 return NULL;
728
729 bdev = &BDEV_I(inode)->bdev;
730
731 if (inode->i_state & I_NEW) {
732 bdev->bd_contains = NULL;
Lachlan McIlroy782b94c2011-06-30 11:01:45 +1000733 bdev->bd_super = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 bdev->bd_inode = inode;
Jan Kara84972a92017-02-02 15:56:52 +0100735 bdev->bd_bdi = &noop_backing_dev_info;
Fabian Frederick61604a22017-02-27 14:28:32 -0800736 bdev->bd_block_size = i_blocksize(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 bdev->bd_part_count = 0;
738 bdev->bd_invalidated = 0;
739 inode->i_mode = S_IFBLK;
740 inode->i_rdev = dev;
741 inode->i_bdev = bdev;
742 inode->i_data.a_ops = &def_blk_aops;
743 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 spin_lock(&bdev_lock);
745 list_add(&bdev->bd_list, &all_bdevs);
746 spin_unlock(&bdev_lock);
747 unlock_new_inode(inode);
748 }
749 return bdev;
750}
751
752EXPORT_SYMBOL(bdget);
753
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200754/**
755 * bdgrab -- Grab a reference to an already referenced block device
756 * @bdev: Block device to grab a reference to.
757 */
758struct block_device *bdgrab(struct block_device *bdev)
759{
Al Viro7de9c6ee2010-10-23 11:11:40 -0400760 ihold(bdev->bd_inode);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200761 return bdev;
762}
Anatol Pomozovc1681bf2013-04-01 09:47:56 -0700763EXPORT_SYMBOL(bdgrab);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765long nr_blockdev_pages(void)
766{
Matthias Kaehlcke203a2932007-07-15 23:40:23 -0700767 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 long ret = 0;
769 spin_lock(&bdev_lock);
Matthias Kaehlcke203a2932007-07-15 23:40:23 -0700770 list_for_each_entry(bdev, &all_bdevs, bd_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 ret += bdev->bd_inode->i_mapping->nrpages;
772 }
773 spin_unlock(&bdev_lock);
774 return ret;
775}
776
777void bdput(struct block_device *bdev)
778{
779 iput(bdev->bd_inode);
780}
781
782EXPORT_SYMBOL(bdput);
783
784static struct block_device *bd_acquire(struct inode *inode)
785{
786 struct block_device *bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700787
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 spin_lock(&bdev_lock);
789 bdev = inode->i_bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700790 if (bdev) {
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100791 bdgrab(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 spin_unlock(&bdev_lock);
793 return bdev;
794 }
795 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700796
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 bdev = bdget(inode->i_rdev);
798 if (bdev) {
799 spin_lock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700800 if (!inode->i_bdev) {
801 /*
Al Viro7de9c6ee2010-10-23 11:11:40 -0400802 * We take an additional reference to bd_inode,
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700803 * and it's released in clear_inode() of inode.
804 * So, we can access it via ->i_mapping always
805 * without igrab().
806 */
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100807 bdgrab(bdev);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700808 inode->i_bdev = bdev;
809 inode->i_mapping = bdev->bd_inode->i_mapping;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 spin_unlock(&bdev_lock);
812 }
813 return bdev;
814}
815
816/* Call when you free inode */
817
818void bd_forget(struct inode *inode)
819{
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700820 struct block_device *bdev = NULL;
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 spin_lock(&bdev_lock);
Yan Hongb4ea2ea2013-04-30 15:26:47 -0700823 if (!sb_is_blkdev_sb(inode->i_sb))
824 bdev = inode->i_bdev;
Al Viroa4a4f942016-07-19 13:16:52 -0400825 inode->i_bdev = NULL;
826 inode->i_mapping = &inode->i_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700828
829 if (bdev)
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100830 bdput(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831}
832
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900833/**
834 * bd_may_claim - test whether a block device can be claimed
835 * @bdev: block device of interest
836 * @whole: whole block device containing @bdev, may equal @bdev
837 * @holder: holder trying to claim @bdev
838 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300839 * Test whether @bdev can be claimed by @holder.
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900840 *
841 * CONTEXT:
842 * spin_lock(&bdev_lock).
843 *
844 * RETURNS:
845 * %true if @bdev can be claimed, %false otherwise.
846 */
847static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
848 void *holder)
849{
850 if (bdev->bd_holder == holder)
851 return true; /* already a holder */
852 else if (bdev->bd_holder != NULL)
853 return false; /* held by someone else */
NeilBrowncfa2d652016-12-12 08:21:51 -0700854 else if (whole == bdev)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900855 return true; /* is a whole device which isn't held */
856
Tejun Heoe525fd82010-11-13 11:55:17 +0100857 else if (whole->bd_holder == bd_may_claim)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900858 return true; /* is a partition of a device that is being partitioned */
859 else if (whole->bd_holder != NULL)
860 return false; /* is a partition of a held device */
861 else
862 return true; /* is a partition of an un-held device */
863}
864
865/**
Tejun Heo6b4517a2010-04-07 18:53:59 +0900866 * bd_prepare_to_claim - prepare to claim a block device
867 * @bdev: block device of interest
868 * @whole: the whole device containing @bdev, may equal @bdev
869 * @holder: holder trying to claim @bdev
870 *
871 * Prepare to claim @bdev. This function fails if @bdev is already
872 * claimed by another holder and waits if another claiming is in
873 * progress. This function doesn't actually claim. On successful
874 * return, the caller has ownership of bd_claiming and bd_holder[s].
875 *
876 * CONTEXT:
877 * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
878 * it multiple times.
879 *
880 * RETURNS:
881 * 0 if @bdev can be claimed, -EBUSY otherwise.
882 */
883static int bd_prepare_to_claim(struct block_device *bdev,
884 struct block_device *whole, void *holder)
885{
886retry:
887 /* if someone else claimed, fail */
888 if (!bd_may_claim(bdev, whole, holder))
889 return -EBUSY;
890
Tejun Heoe75aa852010-08-04 17:59:39 +0200891 /* if claiming is already in progress, wait for it to finish */
892 if (whole->bd_claiming) {
Tejun Heo6b4517a2010-04-07 18:53:59 +0900893 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
894 DEFINE_WAIT(wait);
895
896 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
897 spin_unlock(&bdev_lock);
898 schedule();
899 finish_wait(wq, &wait);
900 spin_lock(&bdev_lock);
901 goto retry;
902 }
903
904 /* yay, all mine */
905 return 0;
906}
907
908/**
909 * bd_start_claiming - start claiming a block device
910 * @bdev: block device of interest
911 * @holder: holder trying to claim @bdev
912 *
913 * @bdev is about to be opened exclusively. Check @bdev can be opened
914 * exclusively and mark that an exclusive open is in progress. Each
915 * successful call to this function must be matched with a call to
Nick Pigginb0018362010-05-26 01:51:19 +1000916 * either bd_finish_claiming() or bd_abort_claiming() (which do not
917 * fail).
918 *
919 * This function is used to gain exclusive access to the block device
920 * without actually causing other exclusive open attempts to fail. It
921 * should be used when the open sequence itself requires exclusive
922 * access but may subsequently fail.
Tejun Heo6b4517a2010-04-07 18:53:59 +0900923 *
924 * CONTEXT:
925 * Might sleep.
926 *
927 * RETURNS:
928 * Pointer to the block device containing @bdev on success, ERR_PTR()
929 * value on failure.
930 */
931static struct block_device *bd_start_claiming(struct block_device *bdev,
932 void *holder)
933{
934 struct gendisk *disk;
935 struct block_device *whole;
936 int partno, err;
937
938 might_sleep();
939
940 /*
941 * @bdev might not have been initialized properly yet, look up
942 * and grab the outer block device the hard way.
943 */
944 disk = get_gendisk(bdev->bd_dev, &partno);
945 if (!disk)
946 return ERR_PTR(-ENXIO);
947
Tejun Heod4c208b2011-06-13 12:45:48 +0200948 /*
949 * Normally, @bdev should equal what's returned from bdget_disk()
950 * if partno is 0; however, some drivers (floppy) use multiple
951 * bdev's for the same physical device and @bdev may be one of the
952 * aliases. Keep @bdev if partno is 0. This means claimer
953 * tracking is broken for those devices but it has always been that
954 * way.
955 */
956 if (partno)
957 whole = bdget_disk(disk, 0);
958 else
959 whole = bdgrab(bdev);
960
Nick Piggincf342572010-05-26 01:50:21 +1000961 module_put(disk->fops->owner);
Tejun Heo6b4517a2010-04-07 18:53:59 +0900962 put_disk(disk);
963 if (!whole)
964 return ERR_PTR(-ENOMEM);
965
966 /* prepare to claim, if successful, mark claiming in progress */
967 spin_lock(&bdev_lock);
968
969 err = bd_prepare_to_claim(bdev, whole, holder);
970 if (err == 0) {
971 whole->bd_claiming = holder;
972 spin_unlock(&bdev_lock);
973 return whole;
974 } else {
975 spin_unlock(&bdev_lock);
976 bdput(whole);
977 return ERR_PTR(err);
978 }
979}
980
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800981#ifdef CONFIG_SYSFS
Tejun Heo49731ba2011-01-14 18:43:57 +0100982struct bd_holder_disk {
983 struct list_head list;
984 struct gendisk *disk;
985 int refcnt;
986};
987
988static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
989 struct gendisk *disk)
990{
991 struct bd_holder_disk *holder;
992
993 list_for_each_entry(holder, &bdev->bd_holder_disks, list)
994 if (holder->disk == disk)
995 return holder;
996 return NULL;
997}
998
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700999static int add_symlink(struct kobject *from, struct kobject *to)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001000{
Andrew Morton4d7dd8f2006-09-29 01:58:56 -07001001 return sysfs_create_link(from, to, kobject_name(to));
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001002}
1003
1004static void del_symlink(struct kobject *from, struct kobject *to)
1005{
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001006 sysfs_remove_link(from, kobject_name(to));
1007}
1008
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001009/**
Tejun Heoe09b4572010-11-13 11:55:17 +01001010 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1011 * @bdev: the claimed slave bdev
1012 * @disk: the holding disk
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001013 *
Tejun Heo49731ba2011-01-14 18:43:57 +01001014 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1015 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001016 * This functions creates the following sysfs symlinks.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001017 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001018 * - from "slaves" directory of the holder @disk to the claimed @bdev
1019 * - from "holders" directory of the @bdev to the holder @disk
1020 *
1021 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1022 * passed to bd_link_disk_holder(), then:
1023 *
1024 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1025 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1026 *
1027 * The caller must have claimed @bdev before calling this function and
1028 * ensure that both @bdev and @disk are valid during the creation and
1029 * lifetime of these symlinks.
1030 *
1031 * CONTEXT:
1032 * Might sleep.
1033 *
1034 * RETURNS:
1035 * 0 on success, -errno on failure.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001036 */
Tejun Heoe09b4572010-11-13 11:55:17 +01001037int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001038{
Tejun Heo49731ba2011-01-14 18:43:57 +01001039 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001040 int ret = 0;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001041
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001042 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001043
Tejun Heo49731ba2011-01-14 18:43:57 +01001044 WARN_ON_ONCE(!bdev->bd_holder);
Johannes Weiner4e916722007-07-15 23:41:25 -07001045
Tejun Heoe09b4572010-11-13 11:55:17 +01001046 /* FIXME: remove the following once add_disk() handles errors */
1047 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
1048 goto out_unlock;
Johannes Weiner4e916722007-07-15 23:41:25 -07001049
Tejun Heo49731ba2011-01-14 18:43:57 +01001050 holder = bd_find_holder_disk(bdev, disk);
1051 if (holder) {
1052 holder->refcnt++;
Tejun Heoe09b4572010-11-13 11:55:17 +01001053 goto out_unlock;
1054 }
1055
Tejun Heo49731ba2011-01-14 18:43:57 +01001056 holder = kzalloc(sizeof(*holder), GFP_KERNEL);
1057 if (!holder) {
1058 ret = -ENOMEM;
1059 goto out_unlock;
1060 }
1061
1062 INIT_LIST_HEAD(&holder->list);
1063 holder->disk = disk;
1064 holder->refcnt = 1;
1065
1066 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1067 if (ret)
1068 goto out_free;
1069
1070 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1071 if (ret)
1072 goto out_del;
Tejun Heoe7407d12011-02-24 09:56:32 +01001073 /*
1074 * bdev could be deleted beneath us which would implicitly destroy
1075 * the holder directory. Hold on to it.
1076 */
1077 kobject_get(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001078
1079 list_add(&holder->list, &bdev->bd_holder_disks);
1080 goto out_unlock;
1081
1082out_del:
1083 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1084out_free:
1085 kfree(holder);
Tejun Heoe09b4572010-11-13 11:55:17 +01001086out_unlock:
Jun'ichi Nomurab4cf1b72006-03-27 01:18:00 -08001087 mutex_unlock(&bdev->bd_mutex);
Tejun Heoe09b4572010-11-13 11:55:17 +01001088 return ret;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001089}
Tejun Heoe09b4572010-11-13 11:55:17 +01001090EXPORT_SYMBOL_GPL(bd_link_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001091
Tejun Heo49731ba2011-01-14 18:43:57 +01001092/**
1093 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1094 * @bdev: the calimed slave bdev
1095 * @disk: the holding disk
1096 *
1097 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1098 *
1099 * CONTEXT:
1100 * Might sleep.
1101 */
1102void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001103{
Tejun Heo49731ba2011-01-14 18:43:57 +01001104 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001105
Tejun Heo49731ba2011-01-14 18:43:57 +01001106 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001107
Tejun Heo49731ba2011-01-14 18:43:57 +01001108 holder = bd_find_holder_disk(bdev, disk);
1109
1110 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
1111 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1112 del_symlink(bdev->bd_part->holder_dir,
1113 &disk_to_dev(disk)->kobj);
Tejun Heoe7407d12011-02-24 09:56:32 +01001114 kobject_put(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001115 list_del_init(&holder->list);
1116 kfree(holder);
1117 }
1118
1119 mutex_unlock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001120}
Tejun Heo49731ba2011-01-14 18:43:57 +01001121EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001122#endif
1123
Andrew Patterson0c002c22008-09-04 14:27:20 -06001124/**
Andrew Patterson56ade442008-09-04 14:27:40 -06001125 * flush_disk - invalidates all buffer-cache entries on a disk
1126 *
1127 * @bdev: struct block device to be flushed
Randy Dunlape6eb5ce2011-02-26 10:54:00 -08001128 * @kill_dirty: flag to guide handling of dirty inodes
Andrew Patterson56ade442008-09-04 14:27:40 -06001129 *
1130 * Invalidates all buffer-cache entries on a disk. It should be called
1131 * when a disk has been changed -- either by a media change or online
1132 * resize.
1133 */
NeilBrown93b270f2011-02-24 17:25:47 +11001134static void flush_disk(struct block_device *bdev, bool kill_dirty)
Andrew Patterson56ade442008-09-04 14:27:40 -06001135{
NeilBrown93b270f2011-02-24 17:25:47 +11001136 if (__invalidate_device(bdev, kill_dirty)) {
Andrew Patterson56ade442008-09-04 14:27:40 -06001137 printk(KERN_WARNING "VFS: busy inodes on changed media or "
Dmitry Monakhov424081f2015-04-13 16:31:34 +04001138 "resized disk %s\n",
1139 bdev->bd_disk ? bdev->bd_disk->disk_name : "");
Andrew Patterson56ade442008-09-04 14:27:40 -06001140 }
1141
1142 if (!bdev->bd_disk)
1143 return;
Tejun Heod27769e2011-08-23 20:01:04 +02001144 if (disk_part_scan_enabled(bdev->bd_disk))
Andrew Patterson56ade442008-09-04 14:27:40 -06001145 bdev->bd_invalidated = 1;
1146}
1147
1148/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001149 * check_disk_size_change - checks for disk size change and adjusts bdev size.
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001150 * @disk: struct gendisk to check
1151 * @bdev: struct bdev to adjust.
1152 *
1153 * This routine checks to see if the bdev size does not match the disk size
1154 * and adjusts it if it differs.
1155 */
1156void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1157{
1158 loff_t disk_size, bdev_size;
1159
1160 disk_size = (loff_t)get_capacity(disk) << 9;
1161 bdev_size = i_size_read(bdev->bd_inode);
1162 if (disk_size != bdev_size) {
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001163 printk(KERN_INFO
1164 "%s: detected capacity change from %lld to %lld\n",
Dmitry Monakhov424081f2015-04-13 16:31:34 +04001165 disk->disk_name, bdev_size, disk_size);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001166 i_size_write(bdev->bd_inode, disk_size);
NeilBrown93b270f2011-02-24 17:25:47 +11001167 flush_disk(bdev, false);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001168 }
1169}
1170EXPORT_SYMBOL(check_disk_size_change);
1171
1172/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001173 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
Andrew Patterson0c002c22008-09-04 14:27:20 -06001174 * @disk: struct gendisk to be revalidated
1175 *
1176 * This routine is a wrapper for lower-level driver's revalidate_disk
1177 * call-backs. It is used to do common pre and post operations needed
1178 * for all revalidate_disk operations.
1179 */
1180int revalidate_disk(struct gendisk *disk)
1181{
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001182 struct block_device *bdev;
Andrew Patterson0c002c22008-09-04 14:27:20 -06001183 int ret = 0;
1184
1185 if (disk->fops->revalidate_disk)
1186 ret = disk->fops->revalidate_disk(disk);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001187 bdev = bdget_disk(disk, 0);
1188 if (!bdev)
1189 return ret;
1190
1191 mutex_lock(&bdev->bd_mutex);
1192 check_disk_size_change(disk, bdev);
MITSUNARI Shigeo7630b662013-02-21 16:42:01 -08001193 bdev->bd_invalidated = 0;
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001194 mutex_unlock(&bdev->bd_mutex);
1195 bdput(bdev);
Andrew Patterson0c002c22008-09-04 14:27:20 -06001196 return ret;
1197}
1198EXPORT_SYMBOL(revalidate_disk);
1199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200/*
1201 * This routine checks whether a removable media has been changed,
1202 * and invalidates all buffer-cache-entries in that case. This
1203 * is a relatively slow routine, so we have to try to minimize using
1204 * it. Thus it is called only upon a 'mount' or 'open'. This
1205 * is the best way of combining speed and utility, I think.
1206 * People changing diskettes in the middle of an operation deserve
1207 * to lose :-)
1208 */
1209int check_disk_change(struct block_device *bdev)
1210{
1211 struct gendisk *disk = bdev->bd_disk;
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001212 const struct block_device_operations *bdops = disk->fops;
Tejun Heo77ea8872010-12-08 20:57:37 +01001213 unsigned int events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Tejun Heo77ea8872010-12-08 20:57:37 +01001215 events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
1216 DISK_EVENT_EJECT_REQUEST);
1217 if (!(events & DISK_EVENT_MEDIA_CHANGE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 return 0;
1219
NeilBrown93b270f2011-02-24 17:25:47 +11001220 flush_disk(bdev, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 if (bdops->revalidate_disk)
1222 bdops->revalidate_disk(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 return 1;
1224}
1225
1226EXPORT_SYMBOL(check_disk_change);
1227
1228void bd_set_size(struct block_device *bdev, loff_t size)
1229{
Martin K. Petersene1defc42009-05-22 17:17:49 -04001230 unsigned bsize = bdev_logical_block_size(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Al Viro59551022016-01-22 15:40:57 -05001232 inode_lock(bdev->bd_inode);
Guo Chaod646a022013-02-21 15:16:42 -08001233 i_size_write(bdev->bd_inode, size);
Al Viro59551022016-01-22 15:40:57 -05001234 inode_unlock(bdev->bd_inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001235 while (bsize < PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 if (size & bsize)
1237 break;
1238 bsize <<= 1;
1239 }
1240 bdev->bd_block_size = bsize;
1241 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1242}
1243EXPORT_SYMBOL(bd_set_size);
1244
Al Viro4385bab2013-05-05 22:11:03 -04001245static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001246
Peter Zijlstra6d740cd2007-02-20 13:58:18 -08001247/*
1248 * bd_mutex locking:
1249 *
1250 * mutex_lock(part->bd_mutex)
1251 * mutex_lock_nested(whole->bd_mutex, 1)
1252 */
1253
Al Viro572c4892007-10-08 13:24:05 -04001254static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 struct gendisk *disk;
Tejun Heo523e1d32011-10-19 14:31:07 +02001257 struct module *owner;
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001258 int ret;
Tejun Heocf771cb2008-09-03 09:01:09 +02001259 int partno;
Al Virofe6e9c12008-06-23 08:30:55 -04001260 int perm = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Al Viro572c4892007-10-08 13:24:05 -04001262 if (mode & FMODE_READ)
Al Virofe6e9c12008-06-23 08:30:55 -04001263 perm |= MAY_READ;
Al Viro572c4892007-10-08 13:24:05 -04001264 if (mode & FMODE_WRITE)
Al Virofe6e9c12008-06-23 08:30:55 -04001265 perm |= MAY_WRITE;
1266 /*
1267 * hooks: /n/, see "layering violations".
1268 */
Chris Wrightb7300b72010-08-10 18:02:55 -07001269 if (!for_part) {
1270 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1271 if (ret != 0) {
1272 bdput(bdev);
1273 return ret;
1274 }
Al Viro82666022008-08-01 05:32:04 -04001275 }
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001276
NeilBrownd3374822009-01-09 08:31:10 +11001277 restart:
Tejun Heo0762b8b2008-08-25 19:56:12 +09001278
Tejun Heo89f97492008-11-05 10:21:06 +01001279 ret = -ENXIO;
Tejun Heocf771cb2008-09-03 09:01:09 +02001280 disk = get_gendisk(bdev->bd_dev, &partno);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001281 if (!disk)
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001282 goto out;
Tejun Heo523e1d32011-10-19 14:31:07 +02001283 owner = disk->fops->owner;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Tejun Heo69e02c52011-03-09 19:54:27 +01001285 disk_block_events(disk);
NeilBrown6796bf52006-12-08 02:36:16 -08001286 mutex_lock_nested(&bdev->bd_mutex, for_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 if (!bdev->bd_openers) {
1288 bdev->bd_disk = disk;
Andi Kleen87192a22012-01-12 17:20:34 -08001289 bdev->bd_queue = disk->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 bdev->bd_contains = bdev;
Jan Kara84972a92017-02-02 15:56:52 +01001291 if (bdev->bd_bdi == &noop_backing_dev_info)
1292 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
Dan Williams03cdadb2016-02-26 15:19:43 -08001293
Tejun Heocf771cb2008-09-03 09:01:09 +02001294 if (!partno) {
Tejun Heo89f97492008-11-05 10:21:06 +01001295 ret = -ENXIO;
1296 bdev->bd_part = disk_get_part(disk, partno);
1297 if (!bdev->bd_part)
1298 goto out_clear;
1299
Tejun Heo1196f8b2011-04-21 20:54:45 +02001300 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 if (disk->fops->open) {
Al Viro572c4892007-10-08 13:24:05 -04001302 ret = disk->fops->open(bdev, mode);
NeilBrownd3374822009-01-09 08:31:10 +11001303 if (ret == -ERESTARTSYS) {
1304 /* Lost a race with 'disk' being
1305 * deleted, try again.
1306 * See md.c
1307 */
1308 disk_put_part(bdev->bd_part);
1309 bdev->bd_part = NULL;
NeilBrownd3374822009-01-09 08:31:10 +11001310 bdev->bd_disk = NULL;
Andi Kleen87192a22012-01-12 17:20:34 -08001311 bdev->bd_queue = NULL;
NeilBrownd3374822009-01-09 08:31:10 +11001312 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001313 disk_unblock_events(disk);
Tejun Heo69e02c52011-03-09 19:54:27 +01001314 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001315 module_put(owner);
NeilBrownd3374822009-01-09 08:31:10 +11001316 goto restart;
1317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 }
Tejun Heo7e697232011-05-23 13:26:07 +02001319
Christoph Hellwig22375702016-09-14 11:56:13 +02001320 if (!ret)
Tejun Heo7e697232011-05-23 13:26:07 +02001321 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
Tejun Heo7e697232011-05-23 13:26:07 +02001322
Tejun Heo1196f8b2011-04-21 20:54:45 +02001323 /*
1324 * If the device is invalidated, rescan partition
1325 * if open succeeded or failed with -ENOMEDIUM.
1326 * The latter is necessary to prevent ghost
1327 * partitions on a removed medium.
1328 */
Jun'ichi Nomurafe316bf2012-03-02 10:38:33 +01001329 if (bdev->bd_invalidated) {
1330 if (!ret)
1331 rescan_partitions(disk, bdev);
1332 else if (ret == -ENOMEDIUM)
1333 invalidate_partitions(disk, bdev);
1334 }
Dan Williams5a023cd2015-11-30 10:20:29 -08001335
Tejun Heo1196f8b2011-04-21 20:54:45 +02001336 if (ret)
1337 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 struct block_device *whole;
1340 whole = bdget_disk(disk, 0);
1341 ret = -ENOMEM;
1342 if (!whole)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001343 goto out_clear;
NeilBrown37be4122006-12-08 02:36:16 -08001344 BUG_ON(for_part);
Al Viro572c4892007-10-08 13:24:05 -04001345 ret = __blkdev_get(whole, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 if (ret)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001347 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 bdev->bd_contains = whole;
Tejun Heo89f97492008-11-05 10:21:06 +01001349 bdev->bd_part = disk_get_part(disk, partno);
Tejun Heoe71bf0d2008-09-03 09:03:02 +02001350 if (!(disk->flags & GENHD_FL_UP) ||
Tejun Heo89f97492008-11-05 10:21:06 +01001351 !bdev->bd_part || !bdev->bd_part->nr_sects) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 ret = -ENXIO;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001353 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 }
Tejun Heo89f97492008-11-05 10:21:06 +01001355 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 }
1357 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 if (bdev->bd_contains == bdev) {
Tejun Heo1196f8b2011-04-21 20:54:45 +02001359 ret = 0;
1360 if (bdev->bd_disk->fops->open)
Al Viro572c4892007-10-08 13:24:05 -04001361 ret = bdev->bd_disk->fops->open(bdev, mode);
Tejun Heo1196f8b2011-04-21 20:54:45 +02001362 /* the same as first opener case, read comment there */
Jun'ichi Nomurafe316bf2012-03-02 10:38:33 +01001363 if (bdev->bd_invalidated) {
1364 if (!ret)
1365 rescan_partitions(bdev->bd_disk, bdev);
1366 else if (ret == -ENOMEDIUM)
1367 invalidate_partitions(bdev->bd_disk, bdev);
1368 }
Tejun Heo1196f8b2011-04-21 20:54:45 +02001369 if (ret)
1370 goto out_unlock_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 }
Tejun Heo69e02c52011-03-09 19:54:27 +01001372 /* only one opener holds refs to the module and disk */
Tejun Heo69e02c52011-03-09 19:54:27 +01001373 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001374 module_put(owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 }
1376 bdev->bd_openers++;
NeilBrown37be4122006-12-08 02:36:16 -08001377 if (for_part)
1378 bdev->bd_part_count++;
Arjan van de Venc039e312006-03-23 03:00:28 -08001379 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001380 disk_unblock_events(disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 return 0;
1382
Tejun Heo0762b8b2008-08-25 19:56:12 +09001383 out_clear:
Tejun Heo89f97492008-11-05 10:21:06 +01001384 disk_put_part(bdev->bd_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 bdev->bd_disk = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001386 bdev->bd_part = NULL;
Andi Kleen87192a22012-01-12 17:20:34 -08001387 bdev->bd_queue = NULL;
Jan Kara84972a92017-02-02 15:56:52 +01001388 bdi_put(bdev->bd_bdi);
1389 bdev->bd_bdi = &noop_backing_dev_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 if (bdev != bdev->bd_contains)
Al Viro572c4892007-10-08 13:24:05 -04001391 __blkdev_put(bdev->bd_contains, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 bdev->bd_contains = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001393 out_unlock_bdev:
Arjan van de Venc039e312006-03-23 03:00:28 -08001394 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001395 disk_unblock_events(disk);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001396 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001397 module_put(owner);
Dan Carpenter4345cab2011-03-19 13:53:31 +01001398 out:
Tejun Heo0762b8b2008-08-25 19:56:12 +09001399 bdput(bdev);
1400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 return ret;
1402}
1403
Tejun Heod4d77622010-11-13 11:55:18 +01001404/**
1405 * blkdev_get - open a block device
1406 * @bdev: block_device to open
1407 * @mode: FMODE_* mask
1408 * @holder: exclusive holder identifier
1409 *
1410 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1411 * open with exclusive access. Specifying %FMODE_EXCL with %NULL
1412 * @holder is invalid. Exclusive opens may nest for the same @holder.
1413 *
1414 * On success, the reference count of @bdev is unchanged. On failure,
1415 * @bdev is put.
1416 *
1417 * CONTEXT:
1418 * Might sleep.
1419 *
1420 * RETURNS:
1421 * 0 on success, -errno on failure.
1422 */
Tejun Heoe525fd82010-11-13 11:55:17 +01001423int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424{
Tejun Heoe525fd82010-11-13 11:55:17 +01001425 struct block_device *whole = NULL;
1426 int res;
1427
1428 WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
1429
1430 if ((mode & FMODE_EXCL) && holder) {
1431 whole = bd_start_claiming(bdev, holder);
1432 if (IS_ERR(whole)) {
1433 bdput(bdev);
1434 return PTR_ERR(whole);
1435 }
1436 }
1437
1438 res = __blkdev_get(bdev, mode, 0);
1439
1440 if (whole) {
Tejun Heod4dc2102011-04-21 20:54:46 +02001441 struct gendisk *disk = whole->bd_disk;
1442
Tejun Heo6a027ef2010-11-13 11:55:17 +01001443 /* finish claiming */
Tejun Heo77ea8872010-12-08 20:57:37 +01001444 mutex_lock(&bdev->bd_mutex);
Tejun Heo6a027ef2010-11-13 11:55:17 +01001445 spin_lock(&bdev_lock);
1446
Tejun Heo77ea8872010-12-08 20:57:37 +01001447 if (!res) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001448 BUG_ON(!bd_may_claim(bdev, whole, holder));
1449 /*
1450 * Note that for a whole device bd_holders
1451 * will be incremented twice, and bd_holder
1452 * will be set to bd_may_claim before being
1453 * set to holder
1454 */
1455 whole->bd_holders++;
1456 whole->bd_holder = bd_may_claim;
1457 bdev->bd_holders++;
1458 bdev->bd_holder = holder;
1459 }
1460
1461 /* tell others that we're done */
1462 BUG_ON(whole->bd_claiming != holder);
1463 whole->bd_claiming = NULL;
1464 wake_up_bit(&whole->bd_claiming, 0);
1465
1466 spin_unlock(&bdev_lock);
Tejun Heo77ea8872010-12-08 20:57:37 +01001467
1468 /*
Tejun Heod4dc2102011-04-21 20:54:46 +02001469 * Block event polling for write claims if requested. Any
1470 * write holder makes the write_holder state stick until
1471 * all are released. This is good enough and tracking
1472 * individual writeable reference is too fragile given the
1473 * way @mode is used in blkdev_get/put().
Tejun Heo77ea8872010-12-08 20:57:37 +01001474 */
Tejun Heo4c49ff32011-06-01 08:27:41 +02001475 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1476 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
Tejun Heo77ea8872010-12-08 20:57:37 +01001477 bdev->bd_write_holder = true;
Tejun Heod4dc2102011-04-21 20:54:46 +02001478 disk_block_events(disk);
Tejun Heo77ea8872010-12-08 20:57:37 +01001479 }
1480
1481 mutex_unlock(&bdev->bd_mutex);
Tejun Heo6a027ef2010-11-13 11:55:17 +01001482 bdput(whole);
Tejun Heoe525fd82010-11-13 11:55:17 +01001483 }
1484
1485 return res;
NeilBrown37be4122006-12-08 02:36:16 -08001486}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487EXPORT_SYMBOL(blkdev_get);
1488
Tejun Heod4d77622010-11-13 11:55:18 +01001489/**
1490 * blkdev_get_by_path - open a block device by name
1491 * @path: path to the block device to open
1492 * @mode: FMODE_* mask
1493 * @holder: exclusive holder identifier
1494 *
1495 * Open the blockdevice described by the device file at @path. @mode
1496 * and @holder are identical to blkdev_get().
1497 *
1498 * On success, the returned block_device has reference count of one.
1499 *
1500 * CONTEXT:
1501 * Might sleep.
1502 *
1503 * RETURNS:
1504 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1505 */
1506struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1507 void *holder)
1508{
1509 struct block_device *bdev;
1510 int err;
1511
1512 bdev = lookup_bdev(path);
1513 if (IS_ERR(bdev))
1514 return bdev;
1515
1516 err = blkdev_get(bdev, mode, holder);
1517 if (err)
1518 return ERR_PTR(err);
1519
Chuck Ebberte51900f2011-02-16 18:11:53 -05001520 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1521 blkdev_put(bdev, mode);
1522 return ERR_PTR(-EACCES);
1523 }
1524
Tejun Heod4d77622010-11-13 11:55:18 +01001525 return bdev;
1526}
1527EXPORT_SYMBOL(blkdev_get_by_path);
1528
1529/**
1530 * blkdev_get_by_dev - open a block device by device number
1531 * @dev: device number of block device to open
1532 * @mode: FMODE_* mask
1533 * @holder: exclusive holder identifier
1534 *
1535 * Open the blockdevice described by device number @dev. @mode and
1536 * @holder are identical to blkdev_get().
1537 *
1538 * Use it ONLY if you really do not have anything better - i.e. when
1539 * you are behind a truly sucky interface and all you are given is a
1540 * device number. _Never_ to be used for internal purposes. If you
1541 * ever need it - reconsider your API.
1542 *
1543 * On success, the returned block_device has reference count of one.
1544 *
1545 * CONTEXT:
1546 * Might sleep.
1547 *
1548 * RETURNS:
1549 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1550 */
1551struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1552{
1553 struct block_device *bdev;
1554 int err;
1555
1556 bdev = bdget(dev);
1557 if (!bdev)
1558 return ERR_PTR(-ENOMEM);
1559
1560 err = blkdev_get(bdev, mode, holder);
1561 if (err)
1562 return ERR_PTR(err);
1563
1564 return bdev;
1565}
1566EXPORT_SYMBOL(blkdev_get_by_dev);
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568static int blkdev_open(struct inode * inode, struct file * filp)
1569{
1570 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
1572 /*
1573 * Preserve backwards compatibility and allow large file access
1574 * even if userspace doesn't ask for it explicitly. Some mkfs
1575 * binary needs it. We might want to drop this workaround
1576 * during an unstable branch.
1577 */
1578 filp->f_flags |= O_LARGEFILE;
1579
Al Viro572c4892007-10-08 13:24:05 -04001580 if (filp->f_flags & O_NDELAY)
1581 filp->f_mode |= FMODE_NDELAY;
1582 if (filp->f_flags & O_EXCL)
1583 filp->f_mode |= FMODE_EXCL;
1584 if ((filp->f_flags & O_ACCMODE) == 3)
1585 filp->f_mode |= FMODE_WRITE_IOCTL;
1586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 bdev = bd_acquire(inode);
Pavel Emelianov6a2aae02006-10-28 10:38:33 -07001588 if (bdev == NULL)
1589 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Al Viro572c4892007-10-08 13:24:05 -04001591 filp->f_mapping = bdev->bd_inode->i_mapping;
1592
Tejun Heoe525fd82010-11-13 11:55:17 +01001593 return blkdev_get(bdev, filp->f_mode, filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594}
1595
Al Viro4385bab2013-05-05 22:11:03 -04001596static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001597{
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001598 struct gendisk *disk = bdev->bd_disk;
NeilBrown37be4122006-12-08 02:36:16 -08001599 struct block_device *victim = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001600
NeilBrown6796bf52006-12-08 02:36:16 -08001601 mutex_lock_nested(&bdev->bd_mutex, for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001602 if (for_part)
1603 bdev->bd_part_count--;
1604
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001605 if (!--bdev->bd_openers) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001606 WARN_ON_ONCE(bdev->bd_holders);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001607 sync_blockdev(bdev);
1608 kill_bdev(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001609
Vivek Goyaldbd3ca52015-11-09 09:23:40 -07001610 bdev_write_inode(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001611 /*
1612 * Detaching bdev inode from its wb in __destroy_inode()
1613 * is too late: the queue which embeds its bdi (along with
1614 * root wb) can be gone as soon as we put_disk() below.
1615 */
1616 inode_detach_wb(bdev->bd_inode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001617 }
1618 if (bdev->bd_contains == bdev) {
1619 if (disk->fops->release)
Al Virodb2a1442013-05-05 21:52:57 -04001620 disk->fops->release(disk, mode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001621 }
1622 if (!bdev->bd_openers) {
1623 struct module *owner = disk->fops->owner;
1624
Tejun Heo0762b8b2008-08-25 19:56:12 +09001625 disk_put_part(bdev->bd_part);
1626 bdev->bd_part = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001627 bdev->bd_disk = NULL;
NeilBrown37be4122006-12-08 02:36:16 -08001628 if (bdev != bdev->bd_contains)
1629 victim = bdev->bd_contains;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001630 bdev->bd_contains = NULL;
Tejun Heo523e1d32011-10-19 14:31:07 +02001631
1632 put_disk(disk);
1633 module_put(owner);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001634 }
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001635 mutex_unlock(&bdev->bd_mutex);
1636 bdput(bdev);
NeilBrown37be4122006-12-08 02:36:16 -08001637 if (victim)
Al Viro9a1c3542008-02-22 20:40:24 -05001638 __blkdev_put(victim, mode, 1);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001639}
1640
Al Viro4385bab2013-05-05 22:11:03 -04001641void blkdev_put(struct block_device *bdev, fmode_t mode)
NeilBrown37be4122006-12-08 02:36:16 -08001642{
Tejun Heo85ef06d2011-07-01 16:17:47 +02001643 mutex_lock(&bdev->bd_mutex);
1644
Tejun Heoe525fd82010-11-13 11:55:17 +01001645 if (mode & FMODE_EXCL) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001646 bool bdev_free;
1647
1648 /*
1649 * Release a claim on the device. The holder fields
1650 * are protected with bdev_lock. bd_mutex is to
1651 * synchronize disk_holder unlinking.
1652 */
Tejun Heo6a027ef2010-11-13 11:55:17 +01001653 spin_lock(&bdev_lock);
1654
1655 WARN_ON_ONCE(--bdev->bd_holders < 0);
1656 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1657
1658 /* bd_contains might point to self, check in a separate step */
1659 if ((bdev_free = !bdev->bd_holders))
1660 bdev->bd_holder = NULL;
1661 if (!bdev->bd_contains->bd_holders)
1662 bdev->bd_contains->bd_holder = NULL;
1663
1664 spin_unlock(&bdev_lock);
1665
Tejun Heo77ea8872010-12-08 20:57:37 +01001666 /*
1667 * If this was the last claim, remove holder link and
1668 * unblock evpoll if it was a write holder.
1669 */
Tejun Heo85ef06d2011-07-01 16:17:47 +02001670 if (bdev_free && bdev->bd_write_holder) {
1671 disk_unblock_events(bdev->bd_disk);
1672 bdev->bd_write_holder = false;
Tejun Heo77ea8872010-12-08 20:57:37 +01001673 }
Tejun Heo69362172011-03-09 19:54:27 +01001674 }
Tejun Heo77ea8872010-12-08 20:57:37 +01001675
Tejun Heo85ef06d2011-07-01 16:17:47 +02001676 /*
1677 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1678 * event. This is to ensure detection of media removal commanded
1679 * from userland - e.g. eject(1).
1680 */
1681 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1682
1683 mutex_unlock(&bdev->bd_mutex);
1684
Al Viro4385bab2013-05-05 22:11:03 -04001685 __blkdev_put(bdev, mode, 0);
NeilBrown37be4122006-12-08 02:36:16 -08001686}
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001687EXPORT_SYMBOL(blkdev_put);
1688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689static int blkdev_close(struct inode * inode, struct file * filp)
1690{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001691 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
Al Viro4385bab2013-05-05 22:11:03 -04001692 blkdev_put(bdev, filp->f_mode);
1693 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694}
1695
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001696static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001698 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Al Viro56b26ad2008-09-19 03:17:36 -04001699 fmode_t mode = file->f_mode;
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001700
1701 /*
1702 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1703 * to updated it before every ioctl.
1704 */
Al Viro56b26ad2008-09-19 03:17:36 -04001705 if (file->f_flags & O_NDELAY)
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001706 mode |= FMODE_NDELAY;
1707 else
1708 mode &= ~FMODE_NDELAY;
1709
Al Viro56b26ad2008-09-19 03:17:36 -04001710 return blkdev_ioctl(bdev, mode, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711}
1712
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001713/*
Christoph Hellwigeef99382009-08-20 17:43:41 +02001714 * Write data to the block device. Only intended for the block device itself
1715 * and the raw driver which basically is a fake block device.
1716 *
1717 * Does not take i_mutex for the write and thus is not for general purpose
1718 * use.
1719 */
Al Viro1456c0a2014-04-03 03:21:50 -04001720ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
Christoph Hellwigeef99382009-08-20 17:43:41 +02001721{
1722 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001723 struct inode *bd_inode = bdev_file_inode(file);
Al Viro7ec7b942015-04-07 11:35:14 -04001724 loff_t size = i_size_read(bd_inode);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001725 struct blk_plug plug;
Christoph Hellwigeef99382009-08-20 17:43:41 +02001726 ssize_t ret;
Al Viro5f380c72015-04-07 11:28:12 -04001727
Al Viro7ec7b942015-04-07 11:35:14 -04001728 if (bdev_read_only(I_BDEV(bd_inode)))
1729 return -EPERM;
Al Viro5f380c72015-04-07 11:28:12 -04001730
Al Viro7ec7b942015-04-07 11:35:14 -04001731 if (!iov_iter_count(from))
Al Viro5f380c72015-04-07 11:28:12 -04001732 return 0;
1733
Al Viro7ec7b942015-04-07 11:35:14 -04001734 if (iocb->ki_pos >= size)
1735 return -ENOSPC;
1736
1737 iov_iter_truncate(from, size - iocb->ki_pos);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001738
Jianpeng Ma53362a02012-08-02 09:50:39 +02001739 blk_start_plug(&plug);
Al Viro1456c0a2014-04-03 03:21:50 -04001740 ret = __generic_file_write_iter(iocb, from);
Christoph Hellwige2592212016-04-07 08:52:01 -07001741 if (ret > 0)
1742 ret = generic_write_sync(iocb, ret);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001743 blk_finish_plug(&plug);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001744 return ret;
1745}
Al Viro1456c0a2014-04-03 03:21:50 -04001746EXPORT_SYMBOL_GPL(blkdev_write_iter);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001747
David Jefferyb2de5252014-09-29 10:21:10 -04001748ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001749{
1750 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001751 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001752 loff_t size = i_size_read(bd_inode);
Al Viroa8860382014-04-02 20:02:21 -04001753 loff_t pos = iocb->ki_pos;
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001754
1755 if (pos >= size)
1756 return 0;
1757
1758 size -= pos;
Al Viroa8860382014-04-02 20:02:21 -04001759 iov_iter_truncate(to, size);
1760 return generic_file_read_iter(iocb, to);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001761}
David Jefferyb2de5252014-09-29 10:21:10 -04001762EXPORT_SYMBOL_GPL(blkdev_read_iter);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001763
Christoph Hellwigeef99382009-08-20 17:43:41 +02001764/*
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001765 * Try to release a page associated with block device when the system
1766 * is under memory pressure.
1767 */
1768static int blkdev_releasepage(struct page *page, gfp_t wait)
1769{
1770 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1771
1772 if (super && super->s_op->bdev_try_to_free_page)
1773 return super->s_op->bdev_try_to_free_page(super, page, wait);
1774
1775 return try_to_free_buffers(page);
1776}
1777
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001778static int blkdev_writepages(struct address_space *mapping,
1779 struct writeback_control *wbc)
1780{
1781 if (dax_mapping(mapping)) {
1782 struct block_device *bdev = I_BDEV(mapping->host);
1783
1784 return dax_writeback_mapping_range(mapping, bdev, wbc);
1785 }
1786 return generic_writepages(mapping, wbc);
1787}
1788
Adrian Bunk4c54ac62008-02-18 13:48:31 +01001789static const struct address_space_operations def_blk_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 .readpage = blkdev_readpage,
Akinobu Mita447f05b2014-10-09 15:26:58 -07001791 .readpages = blkdev_readpages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 .writepage = blkdev_writepage,
Nick Piggin6272b5a2007-10-16 01:25:04 -07001793 .write_begin = blkdev_write_begin,
1794 .write_end = blkdev_write_end,
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001795 .writepages = blkdev_writepages,
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001796 .releasepage = blkdev_releasepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 .direct_IO = blkdev_direct_IO,
Mel Gormanb4597222013-07-03 15:02:05 -07001798 .is_dirty_writeback = buffer_check_dirty_writeback,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799};
1800
Darrick J. Wong25f4c412016-10-11 13:51:11 -07001801#define BLKDEV_FALLOC_FL_SUPPORTED \
1802 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1803 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
1804
1805static long blkdev_fallocate(struct file *file, int mode, loff_t start,
1806 loff_t len)
1807{
1808 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
1809 struct request_queue *q = bdev_get_queue(bdev);
1810 struct address_space *mapping;
1811 loff_t end = start + len - 1;
1812 loff_t isize;
1813 int error;
1814
1815 /* Fail if we don't recognize the flags. */
1816 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
1817 return -EOPNOTSUPP;
1818
1819 /* Don't go off the end of the device. */
1820 isize = i_size_read(bdev->bd_inode);
1821 if (start >= isize)
1822 return -EINVAL;
1823 if (end >= isize) {
1824 if (mode & FALLOC_FL_KEEP_SIZE) {
1825 len = isize - start;
1826 end = start + len - 1;
1827 } else
1828 return -EINVAL;
1829 }
1830
1831 /*
1832 * Don't allow IO that isn't aligned to logical block size.
1833 */
1834 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
1835 return -EINVAL;
1836
1837 /* Invalidate the page cache, including dirty pages. */
1838 mapping = bdev->bd_inode->i_mapping;
1839 truncate_inode_pages_range(mapping, start, end);
1840
1841 switch (mode) {
1842 case FALLOC_FL_ZERO_RANGE:
1843 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
1844 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
1845 GFP_KERNEL, false);
1846 break;
1847 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
1848 /* Only punch if the device can do zeroing discard. */
1849 if (!blk_queue_discard(q) || !q->limits.discard_zeroes_data)
1850 return -EOPNOTSUPP;
1851 error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
1852 GFP_KERNEL, 0);
1853 break;
1854 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
1855 if (!blk_queue_discard(q))
1856 return -EOPNOTSUPP;
1857 error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
1858 GFP_KERNEL, 0);
1859 break;
1860 default:
1861 return -EOPNOTSUPP;
1862 }
1863 if (error)
1864 return error;
1865
1866 /*
1867 * Invalidate again; if someone wandered in and dirtied a page,
1868 * the caller will be given -EBUSY. The third argument is
1869 * inclusive, so the rounding here is safe.
1870 */
1871 return invalidate_inode_pages2_range(mapping,
1872 start >> PAGE_SHIFT,
1873 end >> PAGE_SHIFT);
1874}
1875
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001876const struct file_operations def_blk_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 .open = blkdev_open,
1878 .release = blkdev_close,
1879 .llseek = block_llseek,
Al Viroa8860382014-04-02 20:02:21 -04001880 .read_iter = blkdev_read_iter,
Al Viro1456c0a2014-04-03 03:21:50 -04001881 .write_iter = blkdev_write_iter,
Dan Williamsacc93d32016-05-07 11:40:28 -07001882 .mmap = generic_file_mmap,
Andrew Mortonb1dd3b22010-04-06 14:35:00 -07001883 .fsync = blkdev_fsync,
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001884 .unlocked_ioctl = block_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885#ifdef CONFIG_COMPAT
1886 .compat_ioctl = compat_blkdev_ioctl,
1887#endif
Linus Torvalds1e8b3332012-11-29 10:49:50 -08001888 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001889 .splice_write = iter_file_splice_write,
Darrick J. Wong25f4c412016-10-11 13:51:11 -07001890 .fallocate = blkdev_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891};
1892
1893int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1894{
1895 int res;
1896 mm_segment_t old_fs = get_fs();
1897 set_fs(KERNEL_DS);
Al Viro56b26ad2008-09-19 03:17:36 -04001898 res = blkdev_ioctl(bdev, 0, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 set_fs(old_fs);
1900 return res;
1901}
1902
1903EXPORT_SYMBOL(ioctl_by_bdev);
1904
1905/**
1906 * lookup_bdev - lookup a struct block_device by name
Randy Dunlap94e29592009-01-06 14:41:15 -08001907 * @pathname: special file representing the block device
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 *
Randy Dunlap57d1b532008-10-09 10:42:38 +02001909 * Get a reference to the blockdevice at @pathname in the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 * namespace if possible and return it. Return ERR_PTR(error)
1911 * otherwise.
1912 */
Al Viro421748e2008-08-02 01:04:36 -04001913struct block_device *lookup_bdev(const char *pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914{
1915 struct block_device *bdev;
1916 struct inode *inode;
Al Viro421748e2008-08-02 01:04:36 -04001917 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 int error;
1919
Al Viro421748e2008-08-02 01:04:36 -04001920 if (!pathname || !*pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 return ERR_PTR(-EINVAL);
1922
Al Viro421748e2008-08-02 01:04:36 -04001923 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 if (error)
1925 return ERR_PTR(error);
1926
David Howellsbb6687342015-03-17 22:26:21 +00001927 inode = d_backing_inode(path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 error = -ENOTBLK;
1929 if (!S_ISBLK(inode->i_mode))
1930 goto fail;
1931 error = -EACCES;
Eric W. Biedermana2982cc2016-06-09 15:34:02 -05001932 if (!may_open_dev(&path))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 goto fail;
1934 error = -ENOMEM;
1935 bdev = bd_acquire(inode);
1936 if (!bdev)
1937 goto fail;
1938out:
Al Viro421748e2008-08-02 01:04:36 -04001939 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 return bdev;
1941fail:
1942 bdev = ERR_PTR(error);
1943 goto out;
1944}
Al Virod5686b42008-08-01 05:00:11 -04001945EXPORT_SYMBOL(lookup_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
NeilBrown93b270f2011-02-24 17:25:47 +11001947int __invalidate_device(struct block_device *bdev, bool kill_dirty)
David Howellsb71e8a42006-08-29 19:06:11 +01001948{
1949 struct super_block *sb = get_super(bdev);
1950 int res = 0;
1951
1952 if (sb) {
1953 /*
1954 * no need to lock the super, get_super holds the
1955 * read mutex so the filesystem cannot go away
1956 * under us (->put_super runs with the write lock
1957 * hold).
1958 */
1959 shrink_dcache_sb(sb);
NeilBrown93b270f2011-02-24 17:25:47 +11001960 res = invalidate_inodes(sb, kill_dirty);
David Howellsb71e8a42006-08-29 19:06:11 +01001961 drop_super(sb);
1962 }
Peter Zijlstraf98393a2007-05-06 14:49:54 -07001963 invalidate_bdev(bdev);
David Howellsb71e8a42006-08-29 19:06:11 +01001964 return res;
1965}
1966EXPORT_SYMBOL(__invalidate_device);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001967
1968void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
1969{
1970 struct inode *inode, *old_inode = NULL;
1971
Dave Chinner74278da2015-03-04 12:37:22 -05001972 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001973 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1974 struct address_space *mapping = inode->i_mapping;
Rabin Vincent11aa5c12016-12-01 09:18:28 +01001975 struct block_device *bdev;
Jan Kara5c0d6b62012-07-03 16:45:31 +02001976
1977 spin_lock(&inode->i_lock);
1978 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1979 mapping->nrpages == 0) {
1980 spin_unlock(&inode->i_lock);
1981 continue;
1982 }
1983 __iget(inode);
1984 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -05001985 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001986 /*
1987 * We hold a reference to 'inode' so it couldn't have been
1988 * removed from s_inodes list while we dropped the
Dave Chinner74278da2015-03-04 12:37:22 -05001989 * s_inode_list_lock We cannot iput the inode now as we can
Jan Kara5c0d6b62012-07-03 16:45:31 +02001990 * be holding the last reference and we cannot iput it under
Dave Chinner74278da2015-03-04 12:37:22 -05001991 * s_inode_list_lock. So we keep the reference and iput it
Jan Kara5c0d6b62012-07-03 16:45:31 +02001992 * later.
1993 */
1994 iput(old_inode);
1995 old_inode = inode;
Rabin Vincent11aa5c12016-12-01 09:18:28 +01001996 bdev = I_BDEV(inode);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001997
Rabin Vincent11aa5c12016-12-01 09:18:28 +01001998 mutex_lock(&bdev->bd_mutex);
1999 if (bdev->bd_openers)
2000 func(bdev, arg);
2001 mutex_unlock(&bdev->bd_mutex);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002002
Dave Chinner74278da2015-03-04 12:37:22 -05002003 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002004 }
Dave Chinner74278da2015-03-04 12:37:22 -05002005 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002006 iput(old_inode);
2007}