blob: c3cdde87cc8c678542bab9c39f12477058986156 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -070014#include <linux/device_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/highmem.h>
16#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/blkpg.h>
Muthu Kumarb502bd12012-03-23 15:01:50 -070020#include <linux/magic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/buffer_head.h>
Al Viroff01bb42011-09-16 02:31:11 -040022#include <linux/swap.h>
Nick Piggin585d3bc2009-02-25 10:44:19 +010023#include <linux/pagevec.h>
David Howells811d7362006-08-29 19:06:09 +010024#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/mpage.h>
26#include <linux/mount.h>
27#include <linux/uio.h>
28#include <linux/namei.h>
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -070029#include <linux/log2.h>
Al Viroff01bb42011-09-16 02:31:11 -040030#include <linux/cleancache.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070031#include <linux/dax.h>
Dan Williamsacc93d32016-05-07 11:40:28 -070032#include <linux/badblocks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/uaccess.h>
David Howells07f3f05c2006-09-30 20:52:18 +020034#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36struct bdev_inode {
37 struct block_device bdev;
38 struct inode vfs_inode;
39};
40
Adrian Bunk4c54ac62008-02-18 13:48:31 +010041static const struct address_space_operations def_blk_aops;
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043static inline struct bdev_inode *BDEV_I(struct inode *inode)
44{
45 return container_of(inode, struct bdev_inode, vfs_inode);
46}
47
Geert Uytterhoevenff5053f2015-06-26 13:58:32 +020048struct block_device *I_BDEV(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -070049{
50 return &BDEV_I(inode)->bdev;
51}
Linus Torvalds1da177e2005-04-16 15:20:36 -070052EXPORT_SYMBOL(I_BDEV);
53
Toshi Kani2af3a812016-05-10 10:23:52 -060054void __vfs_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
55{
56 struct va_format vaf;
57 va_list args;
58
59 va_start(args, fmt);
60 vaf.fmt = fmt;
61 vaf.va = &args;
62 printk_ratelimited("%sVFS (%s): %pV\n", prefix, sb->s_id, &vaf);
63 va_end(args);
64}
65
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070066static void bdev_write_inode(struct block_device *bdev)
Christoph Hellwig564f00f2015-01-14 10:42:33 +010067{
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070068 struct inode *inode = bdev->bd_inode;
69 int ret;
70
Christoph Hellwig564f00f2015-01-14 10:42:33 +010071 spin_lock(&inode->i_lock);
72 while (inode->i_state & I_DIRTY) {
73 spin_unlock(&inode->i_lock);
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070074 ret = write_inode_now(inode, true);
75 if (ret) {
76 char name[BDEVNAME_SIZE];
77 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
78 "for block device %s (err=%d).\n",
79 bdevname(bdev, name), ret);
80 }
Christoph Hellwig564f00f2015-01-14 10:42:33 +010081 spin_lock(&inode->i_lock);
82 }
83 spin_unlock(&inode->i_lock);
84}
85
Peter Zijlstraf9a14392007-05-06 14:49:55 -070086/* Kill _all_ buffers and pagecache , dirty or not.. */
Al Viroff01bb42011-09-16 02:31:11 -040087void kill_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Al Viroff01bb42011-09-16 02:31:11 -040089 struct address_space *mapping = bdev->bd_inode->i_mapping;
90
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080091 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
Peter Zijlstraf9a14392007-05-06 14:49:55 -070092 return;
Al Viroff01bb42011-09-16 02:31:11 -040093
Peter Zijlstraf9a14392007-05-06 14:49:55 -070094 invalidate_bh_lrus();
Al Viroff01bb42011-09-16 02:31:11 -040095 truncate_inode_pages(mapping, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
Al Viroff01bb42011-09-16 02:31:11 -040097EXPORT_SYMBOL(kill_bdev);
98
99/* Invalidate clean unused buffers and pagecache. */
100void invalidate_bdev(struct block_device *bdev)
101{
102 struct address_space *mapping = bdev->bd_inode->i_mapping;
103
104 if (mapping->nrpages == 0)
105 return;
106
107 invalidate_bh_lrus();
108 lru_add_drain_all(); /* make sure all lru add caches are flushed */
109 invalidate_mapping_pages(mapping, 0, -1);
110 /* 99% of the time, we don't need to flush the cleancache on the bdev.
111 * But, for the strange corners, lets be cautious
112 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400113 cleancache_invalidate_inode(mapping);
Al Viroff01bb42011-09-16 02:31:11 -0400114}
115EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
117int set_blocksize(struct block_device *bdev, int size)
118{
119 /* Size must be a power of two, and between 512 and PAGE_SIZE */
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -0700120 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 return -EINVAL;
122
123 /* Size cannot be smaller than the size supported by the device */
Martin K. Petersene1defc42009-05-22 17:17:49 -0400124 if (size < bdev_logical_block_size(bdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 return -EINVAL;
126
127 /* Don't change the size if it is same as current */
128 if (bdev->bd_block_size != size) {
129 sync_blockdev(bdev);
130 bdev->bd_block_size = size;
131 bdev->bd_inode->i_blkbits = blksize_bits(size);
132 kill_bdev(bdev);
133 }
134 return 0;
135}
136
137EXPORT_SYMBOL(set_blocksize);
138
139int sb_set_blocksize(struct super_block *sb, int size)
140{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 if (set_blocksize(sb->s_bdev, size))
142 return 0;
143 /* If we get here, we know size is power of two
144 * and it's value is between 512 and PAGE_SIZE */
145 sb->s_blocksize = size;
Coywolf Qi Hunt38885bd2006-03-24 03:18:05 -0800146 sb->s_blocksize_bits = blksize_bits(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 return sb->s_blocksize;
148}
149
150EXPORT_SYMBOL(sb_set_blocksize);
151
152int sb_min_blocksize(struct super_block *sb, int size)
153{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400154 int minsize = bdev_logical_block_size(sb->s_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 if (size < minsize)
156 size = minsize;
157 return sb_set_blocksize(sb, size);
158}
159
160EXPORT_SYMBOL(sb_min_blocksize);
161
162static int
163blkdev_get_block(struct inode *inode, sector_t iblock,
164 struct buffer_head *bh, int create)
165{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 bh->b_bdev = I_BDEV(inode);
167 bh->b_blocknr = iblock;
168 set_buffer_mapped(bh);
169 return 0;
170}
171
Dan Williams4ebb16c2015-10-28 07:48:19 +0900172static struct inode *bdev_file_inode(struct file *file)
173{
174 return file->f_mapping->host;
175}
176
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800177static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700178blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800179{
180 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +0900181 struct inode *inode = bdev_file_inode(file);
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800182
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400183 if (IS_DAX(inode))
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700184 return dax_do_io(iocb, inode, iter, blkdev_get_block,
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400185 NULL, DIO_SKIP_DIO_COUNT);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700186 return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter,
Jens Axboefe0f07d2015-04-15 17:05:48 -0600187 blkdev_get_block, NULL, NULL,
188 DIO_SKIP_DIO_COUNT);
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800189}
190
Jan Kara5cee5812009-04-27 16:43:51 +0200191int __sync_blockdev(struct block_device *bdev, int wait)
192{
193 if (!bdev)
194 return 0;
195 if (!wait)
196 return filemap_flush(bdev->bd_inode->i_mapping);
197 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
198}
199
Nick Piggin585d3bc2009-02-25 10:44:19 +0100200/*
201 * Write out and wait upon all the dirty data associated with a block
202 * device via its mapping. Does not take the superblock lock.
203 */
204int sync_blockdev(struct block_device *bdev)
205{
Jan Kara5cee5812009-04-27 16:43:51 +0200206 return __sync_blockdev(bdev, 1);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100207}
208EXPORT_SYMBOL(sync_blockdev);
209
210/*
211 * Write out and wait upon all dirty data associated with this
212 * device. Filesystem data as well as the underlying block
213 * device. Takes the superblock lock.
214 */
215int fsync_bdev(struct block_device *bdev)
216{
217 struct super_block *sb = get_super(bdev);
218 if (sb) {
Jan Kara60b06802009-04-27 16:43:53 +0200219 int res = sync_filesystem(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100220 drop_super(sb);
221 return res;
222 }
223 return sync_blockdev(bdev);
224}
Al Viro47e44912009-04-01 07:07:16 -0400225EXPORT_SYMBOL(fsync_bdev);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100226
227/**
228 * freeze_bdev -- lock a filesystem and force it into a consistent state
229 * @bdev: blockdevice to lock
230 *
Nick Piggin585d3bc2009-02-25 10:44:19 +0100231 * If a superblock is found on this device, we take the s_umount semaphore
232 * on it to make sure nobody unmounts until the snapshot creation is done.
233 * The reference counter (bd_fsfreeze_count) guarantees that only the last
234 * unfreeze process can unfreeze the frozen filesystem actually when multiple
235 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
236 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
237 * actually.
238 */
239struct super_block *freeze_bdev(struct block_device *bdev)
240{
241 struct super_block *sb;
242 int error = 0;
243
244 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200245 if (++bdev->bd_fsfreeze_count > 1) {
246 /*
247 * We don't even need to grab a reference - the first call
248 * to freeze_bdev grab an active reference and only the last
249 * thaw_bdev drops it.
250 */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100251 sb = get_super(bdev);
Christoph Hellwig45042302009-08-03 23:28:35 +0200252 drop_super(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100253 mutex_unlock(&bdev->bd_fsfreeze_mutex);
254 return sb;
255 }
Nick Piggin585d3bc2009-02-25 10:44:19 +0100256
Christoph Hellwig45042302009-08-03 23:28:35 +0200257 sb = get_active_super(bdev);
258 if (!sb)
259 goto out;
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600260 if (sb->s_op->freeze_super)
261 error = sb->s_op->freeze_super(sb);
262 else
263 error = freeze_super(sb);
Josef Bacik18e9e512010-03-23 10:34:56 -0400264 if (error) {
265 deactivate_super(sb);
266 bdev->bd_fsfreeze_count--;
Christoph Hellwig45042302009-08-03 23:28:35 +0200267 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Josef Bacik18e9e512010-03-23 10:34:56 -0400268 return ERR_PTR(error);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100269 }
Josef Bacik18e9e512010-03-23 10:34:56 -0400270 deactivate_super(sb);
Christoph Hellwig45042302009-08-03 23:28:35 +0200271 out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100272 sync_blockdev(bdev);
273 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig4fadd7b2009-08-03 23:28:06 +0200274 return sb; /* thaw_bdev releases s->s_umount */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100275}
276EXPORT_SYMBOL(freeze_bdev);
277
278/**
279 * thaw_bdev -- unlock filesystem
280 * @bdev: blockdevice to unlock
281 * @sb: associated superblock
282 *
283 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
284 */
285int thaw_bdev(struct block_device *bdev, struct super_block *sb)
286{
Christoph Hellwig45042302009-08-03 23:28:35 +0200287 int error = -EINVAL;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100288
289 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200290 if (!bdev->bd_fsfreeze_count)
Josef Bacik18e9e512010-03-23 10:34:56 -0400291 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100292
Christoph Hellwig45042302009-08-03 23:28:35 +0200293 error = 0;
294 if (--bdev->bd_fsfreeze_count > 0)
Josef Bacik18e9e512010-03-23 10:34:56 -0400295 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100296
Christoph Hellwig45042302009-08-03 23:28:35 +0200297 if (!sb)
Josef Bacik18e9e512010-03-23 10:34:56 -0400298 goto out;
Christoph Hellwig45042302009-08-03 23:28:35 +0200299
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600300 if (sb->s_op->thaw_super)
301 error = sb->s_op->thaw_super(sb);
302 else
303 error = thaw_super(sb);
Josef Bacik18e9e512010-03-23 10:34:56 -0400304 if (error) {
305 bdev->bd_fsfreeze_count++;
306 mutex_unlock(&bdev->bd_fsfreeze_mutex);
307 return error;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100308 }
Josef Bacik18e9e512010-03-23 10:34:56 -0400309out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100310 mutex_unlock(&bdev->bd_fsfreeze_mutex);
311 return 0;
312}
313EXPORT_SYMBOL(thaw_bdev);
314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
316{
317 return block_write_full_page(page, blkdev_get_block, wbc);
318}
319
320static int blkdev_readpage(struct file * file, struct page * page)
321{
322 return block_read_full_page(page, blkdev_get_block);
323}
324
Akinobu Mita447f05b2014-10-09 15:26:58 -0700325static int blkdev_readpages(struct file *file, struct address_space *mapping,
326 struct list_head *pages, unsigned nr_pages)
327{
328 return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
329}
330
Nick Piggin6272b5a2007-10-16 01:25:04 -0700331static int blkdev_write_begin(struct file *file, struct address_space *mapping,
332 loff_t pos, unsigned len, unsigned flags,
333 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Christoph Hellwig155130a2010-06-04 11:29:58 +0200335 return block_write_begin(mapping, pos, len, flags, pagep,
336 blkdev_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Nick Piggin6272b5a2007-10-16 01:25:04 -0700339static int blkdev_write_end(struct file *file, struct address_space *mapping,
340 loff_t pos, unsigned len, unsigned copied,
341 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342{
Nick Piggin6272b5a2007-10-16 01:25:04 -0700343 int ret;
344 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
345
346 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300347 put_page(page);
Nick Piggin6272b5a2007-10-16 01:25:04 -0700348
349 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350}
351
352/*
353 * private llseek:
Al Viro496ad9a2013-01-23 17:07:38 -0500354 * for a block special file file_inode(file)->i_size is zero
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 * so we compute the size by hand (just as in block_read/write above)
356 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800357static loff_t block_llseek(struct file *file, loff_t offset, int whence)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900359 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 loff_t retval;
361
Al Viro59551022016-01-22 15:40:57 -0500362 inode_lock(bd_inode);
Al Viro5d48f3a2013-06-23 21:34:45 +0400363 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
Al Viro59551022016-01-22 15:40:57 -0500364 inode_unlock(bd_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 return retval;
366}
367
Josef Bacik02c24a82011-07-16 20:44:56 -0400368int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900370 struct inode *bd_inode = bdev_file_inode(filp);
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400371 struct block_device *bdev = I_BDEV(bd_inode);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100372 int error;
Rafael J. Wysockida5aa862011-08-02 02:17:48 +0200373
374 error = filemap_write_and_wait_range(filp->f_mapping, start, end);
375 if (error)
376 return error;
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100377
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400378 /*
379 * There is no need to serialise calls to blkdev_issue_flush with
380 * i_mutex and doing so causes performance issues with concurrent
381 * O_SYNC writers to a block device.
382 */
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200383 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100384 if (error == -EOPNOTSUPP)
385 error = 0;
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400386
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100387 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
Andrew Mortonb1dd3b22010-04-06 14:35:00 -0700389EXPORT_SYMBOL(blkdev_fsync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700391/**
392 * bdev_read_page() - Start reading a page from a block device
393 * @bdev: The device to read the page from
394 * @sector: The offset on the device to read the page to (need not be aligned)
395 * @page: The page to read
396 *
397 * On entry, the page should be locked. It will be unlocked when the page
398 * has been read. If the block driver implements rw_page synchronously,
399 * that will be true on exit from this function, but it need not be.
400 *
401 * Errors returned by this function are usually "soft", eg out of memory, or
402 * queue full; callers should try a different route to read this page rather
403 * than propagate an error back up the stack.
404 *
405 * Return: negative errno if an error occurs, 0 if submission was successful.
406 */
407int bdev_read_page(struct block_device *bdev, sector_t sector,
408 struct page *page)
409{
410 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc92015-11-19 13:29:28 -0800411 int result = -EOPNOTSUPP;
412
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400413 if (!ops->rw_page || bdev_get_integrity(bdev))
Dan Williams2e6edc92015-11-19 13:29:28 -0800414 return result;
415
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100416 result = blk_queue_enter(bdev->bd_queue, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800417 if (result)
418 return result;
Jens Axboec11f0c02016-08-05 08:11:04 -0600419 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800420 blk_queue_exit(bdev->bd_queue);
421 return result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700422}
423EXPORT_SYMBOL_GPL(bdev_read_page);
424
425/**
426 * bdev_write_page() - Start writing a page to a block device
427 * @bdev: The device to write the page to
428 * @sector: The offset on the device to write the page to (need not be aligned)
429 * @page: The page to write
430 * @wbc: The writeback_control for the write
431 *
432 * On entry, the page should be locked and not currently under writeback.
433 * On exit, if the write started successfully, the page will be unlocked and
434 * under writeback. If the write failed already (eg the driver failed to
435 * queue the page to the device), the page will still be locked. If the
436 * caller is a ->writepage implementation, it will need to unlock the page.
437 *
438 * Errors returned by this function are usually "soft", eg out of memory, or
439 * queue full; callers should try a different route to write this page rather
440 * than propagate an error back up the stack.
441 *
442 * Return: negative errno if an error occurs, 0 if submission was successful.
443 */
444int bdev_write_page(struct block_device *bdev, sector_t sector,
445 struct page *page, struct writeback_control *wbc)
446{
447 int result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700448 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc92015-11-19 13:29:28 -0800449
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400450 if (!ops->rw_page || bdev_get_integrity(bdev))
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700451 return -EOPNOTSUPP;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100452 result = blk_queue_enter(bdev->bd_queue, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800453 if (result)
454 return result;
455
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700456 set_page_writeback(page);
Jens Axboec11f0c02016-08-05 08:11:04 -0600457 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700458 if (result)
459 end_page_writeback(page);
460 else
461 unlock_page(page);
Dan Williams2e6edc92015-11-19 13:29:28 -0800462 blk_queue_exit(bdev->bd_queue);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700463 return result;
464}
465EXPORT_SYMBOL_GPL(bdev_write_page);
466
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200467/**
468 * bdev_direct_access() - Get the address for directly-accessibly memory
469 * @bdev: The device containing the memory
Dan Williamsb2e0d162016-01-15 16:55:59 -0800470 * @dax: control and output parameters for ->direct_access
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200471 *
472 * If a block device is made up of directly addressable memory, this function
473 * will tell the caller the PFN and the address of the memory. The address
474 * may be directly dereferenced within the kernel without the need to call
475 * ioremap(), kmap() or similar. The PFN is suitable for inserting into
476 * page tables.
477 *
478 * Return: negative errno if an error occurs, otherwise the number of bytes
479 * accessible at this address.
480 */
Dan Williamsb2e0d162016-01-15 16:55:59 -0800481long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200482{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800483 sector_t sector = dax->sector;
484 long avail, size = dax->size;
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200485 const struct block_device_operations *ops = bdev->bd_disk->fops;
486
Matthew Wilcox43c3dd02015-07-03 10:40:43 -0400487 /*
488 * The device driver is allowed to sleep, in order to make the
489 * memory directly accessible.
490 */
491 might_sleep();
492
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200493 if (size < 0)
494 return size;
Toshi Kani163d4ba2016-06-23 17:05:50 -0400495 if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200496 return -EOPNOTSUPP;
497 if ((sector + DIV_ROUND_UP(size, 512)) >
498 part_nr_sects_read(bdev->bd_part))
499 return -ERANGE;
500 sector += get_start_sect(bdev);
501 if (sector % (PAGE_SIZE / 512))
502 return -EINVAL;
Dan Williams0a70bd42016-02-24 14:02:11 -0800503 avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200504 if (!avail)
505 return -ERANGE;
Dan Williamsfe683ad2016-01-15 16:55:56 -0800506 if (avail > 0 && avail & ~PAGE_MASK)
507 return -ENXIO;
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200508 return min(avail, size);
509}
510EXPORT_SYMBOL_GPL(bdev_direct_access);
511
Toshi Kani2d96afc2016-05-10 10:23:53 -0600512/**
513 * bdev_dax_supported() - Check if the device supports dax for filesystem
514 * @sb: The superblock of the device
515 * @blocksize: The block size of the device
516 *
517 * This is a library function for filesystems to check if the block device
518 * can be mounted with dax option.
519 *
520 * Return: negative errno if unsupported, 0 if supported.
521 */
522int bdev_dax_supported(struct super_block *sb, int blocksize)
523{
524 struct blk_dax_ctl dax = {
525 .sector = 0,
526 .size = PAGE_SIZE,
527 };
528 int err;
529
530 if (blocksize != PAGE_SIZE) {
531 vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
532 return -EINVAL;
533 }
534
535 err = bdev_direct_access(sb->s_bdev, &dax);
536 if (err < 0) {
537 switch (err) {
538 case -EOPNOTSUPP:
539 vfs_msg(sb, KERN_ERR,
540 "error: device does not support dax");
541 break;
542 case -EINVAL:
543 vfs_msg(sb, KERN_ERR,
544 "error: unaligned partition for dax");
545 break;
546 default:
547 vfs_msg(sb, KERN_ERR,
548 "error: dax access failed (%d)", err);
549 }
550 return err;
551 }
552
553 return 0;
554}
555EXPORT_SYMBOL_GPL(bdev_dax_supported);
556
Toshi Kania8078b12016-05-10 10:23:57 -0600557/**
558 * bdev_dax_capable() - Return if the raw device is capable for dax
559 * @bdev: The device for raw block device access
560 */
561bool bdev_dax_capable(struct block_device *bdev)
562{
Toshi Kania8078b12016-05-10 10:23:57 -0600563 struct blk_dax_ctl dax = {
564 .size = PAGE_SIZE,
565 };
566
567 if (!IS_ENABLED(CONFIG_FS_DAX))
568 return false;
569
570 dax.sector = 0;
571 if (bdev_direct_access(bdev, &dax) < 0)
572 return false;
573
574 dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512);
575 if (bdev_direct_access(bdev, &dax) < 0)
576 return false;
577
Toshi Kania8078b12016-05-10 10:23:57 -0600578 return true;
579}
580
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581/*
582 * pseudo-fs
583 */
584
585static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
Christoph Lametere18b8902006-12-06 20:33:20 -0800586static struct kmem_cache * bdev_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588static struct inode *bdev_alloc_inode(struct super_block *sb)
589{
Christoph Lametere94b1762006-12-06 20:33:17 -0800590 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 if (!ei)
592 return NULL;
593 return &ei->vfs_inode;
594}
595
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100596static void bdev_i_callback(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100598 struct inode *inode = container_of(head, struct inode, i_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 struct bdev_inode *bdi = BDEV_I(inode);
600
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 kmem_cache_free(bdev_cachep, bdi);
602}
603
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100604static void bdev_destroy_inode(struct inode *inode)
605{
606 call_rcu(&inode->i_rcu, bdev_i_callback);
607}
608
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700609static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610{
611 struct bdev_inode *ei = (struct bdev_inode *) foo;
612 struct block_device *bdev = &ei->bdev;
613
Christoph Lametera35afb82007-05-16 22:10:57 -0700614 memset(bdev, 0, sizeof(*bdev));
615 mutex_init(&bdev->bd_mutex);
Christoph Lametera35afb82007-05-16 22:10:57 -0700616 INIT_LIST_HEAD(&bdev->bd_list);
Tejun Heo49731ba2011-01-14 18:43:57 +0100617#ifdef CONFIG_SYSFS
618 INIT_LIST_HEAD(&bdev->bd_holder_disks);
619#endif
Christoph Lametera35afb82007-05-16 22:10:57 -0700620 inode_init_once(&ei->vfs_inode);
Takashi Satofcccf502009-01-09 16:40:59 -0800621 /* Initialize mutex for freeze. */
622 mutex_init(&bdev->bd_fsfreeze_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623}
624
Al Virob57922d2010-06-07 14:34:48 -0400625static void bdev_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
627 struct block_device *bdev = &BDEV_I(inode)->bdev;
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700628 truncate_inode_pages_final(&inode->i_data);
Al Virob57922d2010-06-07 14:34:48 -0400629 invalidate_inode_buffers(inode); /* is it needed here? */
Jan Karadbd57682012-05-03 14:48:02 +0200630 clear_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 spin_lock(&bdev_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 list_del_init(&bdev->bd_list);
633 spin_unlock(&bdev_lock);
634}
635
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -0800636static const struct super_operations bdev_sops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 .statfs = simple_statfs,
638 .alloc_inode = bdev_alloc_inode,
639 .destroy_inode = bdev_destroy_inode,
640 .drop_inode = generic_delete_inode,
Al Virob57922d2010-06-07 14:34:48 -0400641 .evict_inode = bdev_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642};
643
Al Viro51139ad2010-07-25 23:47:46 +0400644static struct dentry *bd_mount(struct file_system_type *fs_type,
645 int flags, const char *dev_name, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646{
Shaohua Li3684aa72016-02-22 15:27:40 -0700647 struct dentry *dent;
648 dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
649 if (dent)
650 dent->d_sb->s_iflags |= SB_I_CGROUPWB;
651 return dent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
654static struct file_system_type bd_type = {
655 .name = "bdev",
Al Viro51139ad2010-07-25 23:47:46 +0400656 .mount = bd_mount,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 .kill_sb = kill_anon_super,
658};
659
Tejun Heoa212b102015-05-22 17:13:33 -0400660struct super_block *blockdev_superblock __read_mostly;
661EXPORT_SYMBOL_GPL(blockdev_superblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
663void __init bdev_cache_init(void)
664{
665 int err;
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300666 static struct vfsmount *bd_mnt;
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
Paul Jacksonfffb60f2006-03-24 03:16:06 -0800669 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
Vladimir Davydov5d097052016-01-14 15:18:21 -0800670 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
Paul Mundt20c2df82007-07-20 10:11:58 +0900671 init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 err = register_filesystem(&bd_type);
673 if (err)
674 panic("Cannot register bdev pseudo-fs");
675 bd_mnt = kern_mount(&bd_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (IS_ERR(bd_mnt))
677 panic("Cannot create bdev pseudo-fs");
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300678 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679}
680
681/*
682 * Most likely _very_ bad one - but then it's hardly critical for small
683 * /dev and can be fixed when somebody will need really large one.
684 * Keep in mind that it will be fed through icache hash function too.
685 */
686static inline unsigned long hash(dev_t dev)
687{
688 return MAJOR(dev)+MINOR(dev);
689}
690
691static int bdev_test(struct inode *inode, void *data)
692{
693 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
694}
695
696static int bdev_set(struct inode *inode, void *data)
697{
698 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
699 return 0;
700}
701
702static LIST_HEAD(all_bdevs);
703
704struct block_device *bdget(dev_t dev)
705{
706 struct block_device *bdev;
707 struct inode *inode;
708
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800709 inode = iget5_locked(blockdev_superblock, hash(dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 bdev_test, bdev_set, &dev);
711
712 if (!inode)
713 return NULL;
714
715 bdev = &BDEV_I(inode)->bdev;
716
717 if (inode->i_state & I_NEW) {
718 bdev->bd_contains = NULL;
Lachlan McIlroy782b94c2011-06-30 11:01:45 +1000719 bdev->bd_super = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 bdev->bd_inode = inode;
721 bdev->bd_block_size = (1 << inode->i_blkbits);
722 bdev->bd_part_count = 0;
723 bdev->bd_invalidated = 0;
724 inode->i_mode = S_IFBLK;
725 inode->i_rdev = dev;
726 inode->i_bdev = bdev;
727 inode->i_data.a_ops = &def_blk_aops;
728 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 spin_lock(&bdev_lock);
730 list_add(&bdev->bd_list, &all_bdevs);
731 spin_unlock(&bdev_lock);
732 unlock_new_inode(inode);
733 }
734 return bdev;
735}
736
737EXPORT_SYMBOL(bdget);
738
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200739/**
740 * bdgrab -- Grab a reference to an already referenced block device
741 * @bdev: Block device to grab a reference to.
742 */
743struct block_device *bdgrab(struct block_device *bdev)
744{
Al Viro7de9c6ee2010-10-23 11:11:40 -0400745 ihold(bdev->bd_inode);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200746 return bdev;
747}
Anatol Pomozovc1681bf2013-04-01 09:47:56 -0700748EXPORT_SYMBOL(bdgrab);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750long nr_blockdev_pages(void)
751{
Matthias Kaehlcke203a2932007-07-15 23:40:23 -0700752 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 long ret = 0;
754 spin_lock(&bdev_lock);
Matthias Kaehlcke203a2932007-07-15 23:40:23 -0700755 list_for_each_entry(bdev, &all_bdevs, bd_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 ret += bdev->bd_inode->i_mapping->nrpages;
757 }
758 spin_unlock(&bdev_lock);
759 return ret;
760}
761
762void bdput(struct block_device *bdev)
763{
764 iput(bdev->bd_inode);
765}
766
767EXPORT_SYMBOL(bdput);
768
769static struct block_device *bd_acquire(struct inode *inode)
770{
771 struct block_device *bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 spin_lock(&bdev_lock);
774 bdev = inode->i_bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700775 if (bdev) {
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100776 bdgrab(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 spin_unlock(&bdev_lock);
778 return bdev;
779 }
780 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 bdev = bdget(inode->i_rdev);
783 if (bdev) {
784 spin_lock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700785 if (!inode->i_bdev) {
786 /*
Al Viro7de9c6ee2010-10-23 11:11:40 -0400787 * We take an additional reference to bd_inode,
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700788 * and it's released in clear_inode() of inode.
789 * So, we can access it via ->i_mapping always
790 * without igrab().
791 */
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100792 bdgrab(bdev);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700793 inode->i_bdev = bdev;
794 inode->i_mapping = bdev->bd_inode->i_mapping;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 spin_unlock(&bdev_lock);
797 }
798 return bdev;
799}
800
801/* Call when you free inode */
802
803void bd_forget(struct inode *inode)
804{
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700805 struct block_device *bdev = NULL;
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 spin_lock(&bdev_lock);
Yan Hongb4ea2ea2013-04-30 15:26:47 -0700808 if (!sb_is_blkdev_sb(inode->i_sb))
809 bdev = inode->i_bdev;
Al Viroa4a4f942016-07-19 13:16:52 -0400810 inode->i_bdev = NULL;
811 inode->i_mapping = &inode->i_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700813
814 if (bdev)
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100815 bdput(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816}
817
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900818/**
819 * bd_may_claim - test whether a block device can be claimed
820 * @bdev: block device of interest
821 * @whole: whole block device containing @bdev, may equal @bdev
822 * @holder: holder trying to claim @bdev
823 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300824 * Test whether @bdev can be claimed by @holder.
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900825 *
826 * CONTEXT:
827 * spin_lock(&bdev_lock).
828 *
829 * RETURNS:
830 * %true if @bdev can be claimed, %false otherwise.
831 */
832static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
833 void *holder)
834{
835 if (bdev->bd_holder == holder)
836 return true; /* already a holder */
837 else if (bdev->bd_holder != NULL)
838 return false; /* held by someone else */
839 else if (bdev->bd_contains == bdev)
840 return true; /* is a whole device which isn't held */
841
Tejun Heoe525fd82010-11-13 11:55:17 +0100842 else if (whole->bd_holder == bd_may_claim)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900843 return true; /* is a partition of a device that is being partitioned */
844 else if (whole->bd_holder != NULL)
845 return false; /* is a partition of a held device */
846 else
847 return true; /* is a partition of an un-held device */
848}
849
850/**
Tejun Heo6b4517a2010-04-07 18:53:59 +0900851 * bd_prepare_to_claim - prepare to claim a block device
852 * @bdev: block device of interest
853 * @whole: the whole device containing @bdev, may equal @bdev
854 * @holder: holder trying to claim @bdev
855 *
856 * Prepare to claim @bdev. This function fails if @bdev is already
857 * claimed by another holder and waits if another claiming is in
858 * progress. This function doesn't actually claim. On successful
859 * return, the caller has ownership of bd_claiming and bd_holder[s].
860 *
861 * CONTEXT:
862 * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
863 * it multiple times.
864 *
865 * RETURNS:
866 * 0 if @bdev can be claimed, -EBUSY otherwise.
867 */
868static int bd_prepare_to_claim(struct block_device *bdev,
869 struct block_device *whole, void *holder)
870{
871retry:
872 /* if someone else claimed, fail */
873 if (!bd_may_claim(bdev, whole, holder))
874 return -EBUSY;
875
Tejun Heoe75aa852010-08-04 17:59:39 +0200876 /* if claiming is already in progress, wait for it to finish */
877 if (whole->bd_claiming) {
Tejun Heo6b4517a2010-04-07 18:53:59 +0900878 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
879 DEFINE_WAIT(wait);
880
881 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
882 spin_unlock(&bdev_lock);
883 schedule();
884 finish_wait(wq, &wait);
885 spin_lock(&bdev_lock);
886 goto retry;
887 }
888
889 /* yay, all mine */
890 return 0;
891}
892
893/**
894 * bd_start_claiming - start claiming a block device
895 * @bdev: block device of interest
896 * @holder: holder trying to claim @bdev
897 *
898 * @bdev is about to be opened exclusively. Check @bdev can be opened
899 * exclusively and mark that an exclusive open is in progress. Each
900 * successful call to this function must be matched with a call to
Nick Pigginb0018362010-05-26 01:51:19 +1000901 * either bd_finish_claiming() or bd_abort_claiming() (which do not
902 * fail).
903 *
904 * This function is used to gain exclusive access to the block device
905 * without actually causing other exclusive open attempts to fail. It
906 * should be used when the open sequence itself requires exclusive
907 * access but may subsequently fail.
Tejun Heo6b4517a2010-04-07 18:53:59 +0900908 *
909 * CONTEXT:
910 * Might sleep.
911 *
912 * RETURNS:
913 * Pointer to the block device containing @bdev on success, ERR_PTR()
914 * value on failure.
915 */
916static struct block_device *bd_start_claiming(struct block_device *bdev,
917 void *holder)
918{
919 struct gendisk *disk;
920 struct block_device *whole;
921 int partno, err;
922
923 might_sleep();
924
925 /*
926 * @bdev might not have been initialized properly yet, look up
927 * and grab the outer block device the hard way.
928 */
929 disk = get_gendisk(bdev->bd_dev, &partno);
930 if (!disk)
931 return ERR_PTR(-ENXIO);
932
Tejun Heod4c208b2011-06-13 12:45:48 +0200933 /*
934 * Normally, @bdev should equal what's returned from bdget_disk()
935 * if partno is 0; however, some drivers (floppy) use multiple
936 * bdev's for the same physical device and @bdev may be one of the
937 * aliases. Keep @bdev if partno is 0. This means claimer
938 * tracking is broken for those devices but it has always been that
939 * way.
940 */
941 if (partno)
942 whole = bdget_disk(disk, 0);
943 else
944 whole = bdgrab(bdev);
945
Nick Piggincf342572010-05-26 01:50:21 +1000946 module_put(disk->fops->owner);
Tejun Heo6b4517a2010-04-07 18:53:59 +0900947 put_disk(disk);
948 if (!whole)
949 return ERR_PTR(-ENOMEM);
950
951 /* prepare to claim, if successful, mark claiming in progress */
952 spin_lock(&bdev_lock);
953
954 err = bd_prepare_to_claim(bdev, whole, holder);
955 if (err == 0) {
956 whole->bd_claiming = holder;
957 spin_unlock(&bdev_lock);
958 return whole;
959 } else {
960 spin_unlock(&bdev_lock);
961 bdput(whole);
962 return ERR_PTR(err);
963 }
964}
965
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800966#ifdef CONFIG_SYSFS
Tejun Heo49731ba2011-01-14 18:43:57 +0100967struct bd_holder_disk {
968 struct list_head list;
969 struct gendisk *disk;
970 int refcnt;
971};
972
973static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
974 struct gendisk *disk)
975{
976 struct bd_holder_disk *holder;
977
978 list_for_each_entry(holder, &bdev->bd_holder_disks, list)
979 if (holder->disk == disk)
980 return holder;
981 return NULL;
982}
983
Andrew Morton4d7dd8fd2006-09-29 01:58:56 -0700984static int add_symlink(struct kobject *from, struct kobject *to)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800985{
Andrew Morton4d7dd8fd2006-09-29 01:58:56 -0700986 return sysfs_create_link(from, to, kobject_name(to));
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800987}
988
989static void del_symlink(struct kobject *from, struct kobject *to)
990{
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800991 sysfs_remove_link(from, kobject_name(to));
992}
993
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800994/**
Tejun Heoe09b4572010-11-13 11:55:17 +0100995 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
996 * @bdev: the claimed slave bdev
997 * @disk: the holding disk
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500998 *
Tejun Heo49731ba2011-01-14 18:43:57 +0100999 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1000 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001001 * This functions creates the following sysfs symlinks.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001002 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001003 * - from "slaves" directory of the holder @disk to the claimed @bdev
1004 * - from "holders" directory of the @bdev to the holder @disk
1005 *
1006 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1007 * passed to bd_link_disk_holder(), then:
1008 *
1009 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1010 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1011 *
1012 * The caller must have claimed @bdev before calling this function and
1013 * ensure that both @bdev and @disk are valid during the creation and
1014 * lifetime of these symlinks.
1015 *
1016 * CONTEXT:
1017 * Might sleep.
1018 *
1019 * RETURNS:
1020 * 0 on success, -errno on failure.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001021 */
Tejun Heoe09b4572010-11-13 11:55:17 +01001022int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001023{
Tejun Heo49731ba2011-01-14 18:43:57 +01001024 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001025 int ret = 0;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001026
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001027 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001028
Tejun Heo49731ba2011-01-14 18:43:57 +01001029 WARN_ON_ONCE(!bdev->bd_holder);
Johannes Weiner4e916722007-07-15 23:41:25 -07001030
Tejun Heoe09b4572010-11-13 11:55:17 +01001031 /* FIXME: remove the following once add_disk() handles errors */
1032 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
1033 goto out_unlock;
Johannes Weiner4e916722007-07-15 23:41:25 -07001034
Tejun Heo49731ba2011-01-14 18:43:57 +01001035 holder = bd_find_holder_disk(bdev, disk);
1036 if (holder) {
1037 holder->refcnt++;
Tejun Heoe09b4572010-11-13 11:55:17 +01001038 goto out_unlock;
1039 }
1040
Tejun Heo49731ba2011-01-14 18:43:57 +01001041 holder = kzalloc(sizeof(*holder), GFP_KERNEL);
1042 if (!holder) {
1043 ret = -ENOMEM;
1044 goto out_unlock;
1045 }
1046
1047 INIT_LIST_HEAD(&holder->list);
1048 holder->disk = disk;
1049 holder->refcnt = 1;
1050
1051 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1052 if (ret)
1053 goto out_free;
1054
1055 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1056 if (ret)
1057 goto out_del;
Tejun Heoe7407d12011-02-24 09:56:32 +01001058 /*
1059 * bdev could be deleted beneath us which would implicitly destroy
1060 * the holder directory. Hold on to it.
1061 */
1062 kobject_get(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001063
1064 list_add(&holder->list, &bdev->bd_holder_disks);
1065 goto out_unlock;
1066
1067out_del:
1068 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1069out_free:
1070 kfree(holder);
Tejun Heoe09b4572010-11-13 11:55:17 +01001071out_unlock:
Jun'ichi Nomurab4cf1b72006-03-27 01:18:00 -08001072 mutex_unlock(&bdev->bd_mutex);
Tejun Heoe09b4572010-11-13 11:55:17 +01001073 return ret;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001074}
Tejun Heoe09b4572010-11-13 11:55:17 +01001075EXPORT_SYMBOL_GPL(bd_link_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001076
Tejun Heo49731ba2011-01-14 18:43:57 +01001077/**
1078 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1079 * @bdev: the calimed slave bdev
1080 * @disk: the holding disk
1081 *
1082 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1083 *
1084 * CONTEXT:
1085 * Might sleep.
1086 */
1087void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001088{
Tejun Heo49731ba2011-01-14 18:43:57 +01001089 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001090
Tejun Heo49731ba2011-01-14 18:43:57 +01001091 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001092
Tejun Heo49731ba2011-01-14 18:43:57 +01001093 holder = bd_find_holder_disk(bdev, disk);
1094
1095 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
1096 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1097 del_symlink(bdev->bd_part->holder_dir,
1098 &disk_to_dev(disk)->kobj);
Tejun Heoe7407d12011-02-24 09:56:32 +01001099 kobject_put(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001100 list_del_init(&holder->list);
1101 kfree(holder);
1102 }
1103
1104 mutex_unlock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001105}
Tejun Heo49731ba2011-01-14 18:43:57 +01001106EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001107#endif
1108
Andrew Patterson0c002c22008-09-04 14:27:20 -06001109/**
Andrew Patterson56ade442008-09-04 14:27:40 -06001110 * flush_disk - invalidates all buffer-cache entries on a disk
1111 *
1112 * @bdev: struct block device to be flushed
Randy Dunlape6eb5ce2011-02-26 10:54:00 -08001113 * @kill_dirty: flag to guide handling of dirty inodes
Andrew Patterson56ade442008-09-04 14:27:40 -06001114 *
1115 * Invalidates all buffer-cache entries on a disk. It should be called
1116 * when a disk has been changed -- either by a media change or online
1117 * resize.
1118 */
NeilBrown93b270f2011-02-24 17:25:47 +11001119static void flush_disk(struct block_device *bdev, bool kill_dirty)
Andrew Patterson56ade442008-09-04 14:27:40 -06001120{
NeilBrown93b270f2011-02-24 17:25:47 +11001121 if (__invalidate_device(bdev, kill_dirty)) {
Andrew Patterson56ade442008-09-04 14:27:40 -06001122 printk(KERN_WARNING "VFS: busy inodes on changed media or "
Dmitry Monakhov424081f2015-04-13 16:31:34 +04001123 "resized disk %s\n",
1124 bdev->bd_disk ? bdev->bd_disk->disk_name : "");
Andrew Patterson56ade442008-09-04 14:27:40 -06001125 }
1126
1127 if (!bdev->bd_disk)
1128 return;
Tejun Heod27769e2011-08-23 20:01:04 +02001129 if (disk_part_scan_enabled(bdev->bd_disk))
Andrew Patterson56ade442008-09-04 14:27:40 -06001130 bdev->bd_invalidated = 1;
1131}
1132
1133/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001134 * check_disk_size_change - checks for disk size change and adjusts bdev size.
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001135 * @disk: struct gendisk to check
1136 * @bdev: struct bdev to adjust.
1137 *
1138 * This routine checks to see if the bdev size does not match the disk size
1139 * and adjusts it if it differs.
1140 */
1141void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1142{
1143 loff_t disk_size, bdev_size;
1144
1145 disk_size = (loff_t)get_capacity(disk) << 9;
1146 bdev_size = i_size_read(bdev->bd_inode);
1147 if (disk_size != bdev_size) {
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001148 printk(KERN_INFO
1149 "%s: detected capacity change from %lld to %lld\n",
Dmitry Monakhov424081f2015-04-13 16:31:34 +04001150 disk->disk_name, bdev_size, disk_size);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001151 i_size_write(bdev->bd_inode, disk_size);
NeilBrown93b270f2011-02-24 17:25:47 +11001152 flush_disk(bdev, false);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001153 }
1154}
1155EXPORT_SYMBOL(check_disk_size_change);
1156
1157/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001158 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
Andrew Patterson0c002c22008-09-04 14:27:20 -06001159 * @disk: struct gendisk to be revalidated
1160 *
1161 * This routine is a wrapper for lower-level driver's revalidate_disk
1162 * call-backs. It is used to do common pre and post operations needed
1163 * for all revalidate_disk operations.
1164 */
1165int revalidate_disk(struct gendisk *disk)
1166{
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001167 struct block_device *bdev;
Andrew Patterson0c002c22008-09-04 14:27:20 -06001168 int ret = 0;
1169
1170 if (disk->fops->revalidate_disk)
1171 ret = disk->fops->revalidate_disk(disk);
Martin K. Petersen25520d52015-10-21 13:19:49 -04001172 blk_integrity_revalidate(disk);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001173 bdev = bdget_disk(disk, 0);
1174 if (!bdev)
1175 return ret;
1176
1177 mutex_lock(&bdev->bd_mutex);
1178 check_disk_size_change(disk, bdev);
MITSUNARI Shigeo7630b662013-02-21 16:42:01 -08001179 bdev->bd_invalidated = 0;
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001180 mutex_unlock(&bdev->bd_mutex);
1181 bdput(bdev);
Andrew Patterson0c002c22008-09-04 14:27:20 -06001182 return ret;
1183}
1184EXPORT_SYMBOL(revalidate_disk);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186/*
1187 * This routine checks whether a removable media has been changed,
1188 * and invalidates all buffer-cache-entries in that case. This
1189 * is a relatively slow routine, so we have to try to minimize using
1190 * it. Thus it is called only upon a 'mount' or 'open'. This
1191 * is the best way of combining speed and utility, I think.
1192 * People changing diskettes in the middle of an operation deserve
1193 * to lose :-)
1194 */
1195int check_disk_change(struct block_device *bdev)
1196{
1197 struct gendisk *disk = bdev->bd_disk;
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001198 const struct block_device_operations *bdops = disk->fops;
Tejun Heo77ea8872010-12-08 20:57:37 +01001199 unsigned int events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
Tejun Heo77ea8872010-12-08 20:57:37 +01001201 events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
1202 DISK_EVENT_EJECT_REQUEST);
1203 if (!(events & DISK_EVENT_MEDIA_CHANGE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 return 0;
1205
NeilBrown93b270f2011-02-24 17:25:47 +11001206 flush_disk(bdev, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 if (bdops->revalidate_disk)
1208 bdops->revalidate_disk(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 return 1;
1210}
1211
1212EXPORT_SYMBOL(check_disk_change);
1213
1214void bd_set_size(struct block_device *bdev, loff_t size)
1215{
Martin K. Petersene1defc42009-05-22 17:17:49 -04001216 unsigned bsize = bdev_logical_block_size(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Al Viro59551022016-01-22 15:40:57 -05001218 inode_lock(bdev->bd_inode);
Guo Chaod646a022013-02-21 15:16:42 -08001219 i_size_write(bdev->bd_inode, size);
Al Viro59551022016-01-22 15:40:57 -05001220 inode_unlock(bdev->bd_inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001221 while (bsize < PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 if (size & bsize)
1223 break;
1224 bsize <<= 1;
1225 }
1226 bdev->bd_block_size = bsize;
1227 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1228}
1229EXPORT_SYMBOL(bd_set_size);
1230
Al Viro4385bab2013-05-05 22:11:03 -04001231static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001232
Peter Zijlstra6d740cd2007-02-20 13:58:18 -08001233/*
1234 * bd_mutex locking:
1235 *
1236 * mutex_lock(part->bd_mutex)
1237 * mutex_lock_nested(whole->bd_mutex, 1)
1238 */
1239
Al Viro572c4892007-10-08 13:24:05 -04001240static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 struct gendisk *disk;
Tejun Heo523e1d32011-10-19 14:31:07 +02001243 struct module *owner;
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001244 int ret;
Tejun Heocf771cb2008-09-03 09:01:09 +02001245 int partno;
Al Virofe6e9c12008-06-23 08:30:55 -04001246 int perm = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
Al Viro572c4892007-10-08 13:24:05 -04001248 if (mode & FMODE_READ)
Al Virofe6e9c12008-06-23 08:30:55 -04001249 perm |= MAY_READ;
Al Viro572c4892007-10-08 13:24:05 -04001250 if (mode & FMODE_WRITE)
Al Virofe6e9c12008-06-23 08:30:55 -04001251 perm |= MAY_WRITE;
1252 /*
1253 * hooks: /n/, see "layering violations".
1254 */
Chris Wrightb7300b72010-08-10 18:02:55 -07001255 if (!for_part) {
1256 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1257 if (ret != 0) {
1258 bdput(bdev);
1259 return ret;
1260 }
Al Viro82666022008-08-01 05:32:04 -04001261 }
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001262
NeilBrownd3374822009-01-09 08:31:10 +11001263 restart:
Tejun Heo0762b8b2008-08-25 19:56:12 +09001264
Tejun Heo89f97492008-11-05 10:21:06 +01001265 ret = -ENXIO;
Tejun Heocf771cb2008-09-03 09:01:09 +02001266 disk = get_gendisk(bdev->bd_dev, &partno);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001267 if (!disk)
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001268 goto out;
Tejun Heo523e1d32011-10-19 14:31:07 +02001269 owner = disk->fops->owner;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Tejun Heo69e02c52011-03-09 19:54:27 +01001271 disk_block_events(disk);
NeilBrown6796bf52006-12-08 02:36:16 -08001272 mutex_lock_nested(&bdev->bd_mutex, for_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 if (!bdev->bd_openers) {
1274 bdev->bd_disk = disk;
Andi Kleen87192a22012-01-12 17:20:34 -08001275 bdev->bd_queue = disk->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 bdev->bd_contains = bdev;
Ross Zwisler99a01cd2016-08-03 13:46:15 -07001277 bdev->bd_inode->i_flags = 0;
Dan Williams03cdadb2016-02-26 15:19:43 -08001278
Tejun Heocf771cb2008-09-03 09:01:09 +02001279 if (!partno) {
Tejun Heo89f97492008-11-05 10:21:06 +01001280 ret = -ENXIO;
1281 bdev->bd_part = disk_get_part(disk, partno);
1282 if (!bdev->bd_part)
1283 goto out_clear;
1284
Tejun Heo1196f8b2011-04-21 20:54:45 +02001285 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 if (disk->fops->open) {
Al Viro572c4892007-10-08 13:24:05 -04001287 ret = disk->fops->open(bdev, mode);
NeilBrownd3374822009-01-09 08:31:10 +11001288 if (ret == -ERESTARTSYS) {
1289 /* Lost a race with 'disk' being
1290 * deleted, try again.
1291 * See md.c
1292 */
1293 disk_put_part(bdev->bd_part);
1294 bdev->bd_part = NULL;
NeilBrownd3374822009-01-09 08:31:10 +11001295 bdev->bd_disk = NULL;
Andi Kleen87192a22012-01-12 17:20:34 -08001296 bdev->bd_queue = NULL;
NeilBrownd3374822009-01-09 08:31:10 +11001297 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001298 disk_unblock_events(disk);
Tejun Heo69e02c52011-03-09 19:54:27 +01001299 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001300 module_put(owner);
NeilBrownd3374822009-01-09 08:31:10 +11001301 goto restart;
1302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 }
Tejun Heo7e697232011-05-23 13:26:07 +02001304
Dan Williams5a023cd2015-11-30 10:20:29 -08001305 if (!ret) {
Tejun Heo7e697232011-05-23 13:26:07 +02001306 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
Toshi Kania8078b12016-05-10 10:23:57 -06001307 if (!bdev_dax_capable(bdev))
Dan Williams5a023cd2015-11-30 10:20:29 -08001308 bdev->bd_inode->i_flags &= ~S_DAX;
1309 }
Tejun Heo7e697232011-05-23 13:26:07 +02001310
Tejun Heo1196f8b2011-04-21 20:54:45 +02001311 /*
1312 * If the device is invalidated, rescan partition
1313 * if open succeeded or failed with -ENOMEDIUM.
1314 * The latter is necessary to prevent ghost
1315 * partitions on a removed medium.
1316 */
Jun'ichi Nomurafe316bf2012-03-02 10:38:33 +01001317 if (bdev->bd_invalidated) {
1318 if (!ret)
1319 rescan_partitions(disk, bdev);
1320 else if (ret == -ENOMEDIUM)
1321 invalidate_partitions(disk, bdev);
1322 }
Dan Williams5a023cd2015-11-30 10:20:29 -08001323
Tejun Heo1196f8b2011-04-21 20:54:45 +02001324 if (ret)
1325 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 struct block_device *whole;
1328 whole = bdget_disk(disk, 0);
1329 ret = -ENOMEM;
1330 if (!whole)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001331 goto out_clear;
NeilBrown37be4122006-12-08 02:36:16 -08001332 BUG_ON(for_part);
Al Viro572c4892007-10-08 13:24:05 -04001333 ret = __blkdev_get(whole, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 if (ret)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001335 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 bdev->bd_contains = whole;
Tejun Heo89f97492008-11-05 10:21:06 +01001337 bdev->bd_part = disk_get_part(disk, partno);
Tejun Heoe71bf0d2008-09-03 09:03:02 +02001338 if (!(disk->flags & GENHD_FL_UP) ||
Tejun Heo89f97492008-11-05 10:21:06 +01001339 !bdev->bd_part || !bdev->bd_part->nr_sects) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 ret = -ENXIO;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001341 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 }
Tejun Heo89f97492008-11-05 10:21:06 +01001343 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
Toshi Kania8078b12016-05-10 10:23:57 -06001344 if (!bdev_dax_capable(bdev))
Jeff Moyerf0b2e562015-08-14 16:15:32 -04001345 bdev->bd_inode->i_flags &= ~S_DAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 }
1347 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 if (bdev->bd_contains == bdev) {
Tejun Heo1196f8b2011-04-21 20:54:45 +02001349 ret = 0;
1350 if (bdev->bd_disk->fops->open)
Al Viro572c4892007-10-08 13:24:05 -04001351 ret = bdev->bd_disk->fops->open(bdev, mode);
Tejun Heo1196f8b2011-04-21 20:54:45 +02001352 /* the same as first opener case, read comment there */
Jun'ichi Nomurafe316bf2012-03-02 10:38:33 +01001353 if (bdev->bd_invalidated) {
1354 if (!ret)
1355 rescan_partitions(bdev->bd_disk, bdev);
1356 else if (ret == -ENOMEDIUM)
1357 invalidate_partitions(bdev->bd_disk, bdev);
1358 }
Tejun Heo1196f8b2011-04-21 20:54:45 +02001359 if (ret)
1360 goto out_unlock_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 }
Tejun Heo69e02c52011-03-09 19:54:27 +01001362 /* only one opener holds refs to the module and disk */
Tejun Heo69e02c52011-03-09 19:54:27 +01001363 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001364 module_put(owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 }
1366 bdev->bd_openers++;
NeilBrown37be4122006-12-08 02:36:16 -08001367 if (for_part)
1368 bdev->bd_part_count++;
Arjan van de Venc039e312006-03-23 03:00:28 -08001369 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001370 disk_unblock_events(disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 return 0;
1372
Tejun Heo0762b8b2008-08-25 19:56:12 +09001373 out_clear:
Tejun Heo89f97492008-11-05 10:21:06 +01001374 disk_put_part(bdev->bd_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 bdev->bd_disk = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001376 bdev->bd_part = NULL;
Andi Kleen87192a22012-01-12 17:20:34 -08001377 bdev->bd_queue = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 if (bdev != bdev->bd_contains)
Al Viro572c4892007-10-08 13:24:05 -04001379 __blkdev_put(bdev->bd_contains, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 bdev->bd_contains = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001381 out_unlock_bdev:
Arjan van de Venc039e312006-03-23 03:00:28 -08001382 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001383 disk_unblock_events(disk);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001384 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001385 module_put(owner);
Dan Carpenter4345cab2011-03-19 13:53:31 +01001386 out:
Tejun Heo0762b8b2008-08-25 19:56:12 +09001387 bdput(bdev);
1388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 return ret;
1390}
1391
Tejun Heod4d77622010-11-13 11:55:18 +01001392/**
1393 * blkdev_get - open a block device
1394 * @bdev: block_device to open
1395 * @mode: FMODE_* mask
1396 * @holder: exclusive holder identifier
1397 *
1398 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1399 * open with exclusive access. Specifying %FMODE_EXCL with %NULL
1400 * @holder is invalid. Exclusive opens may nest for the same @holder.
1401 *
1402 * On success, the reference count of @bdev is unchanged. On failure,
1403 * @bdev is put.
1404 *
1405 * CONTEXT:
1406 * Might sleep.
1407 *
1408 * RETURNS:
1409 * 0 on success, -errno on failure.
1410 */
Tejun Heoe525fd82010-11-13 11:55:17 +01001411int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412{
Tejun Heoe525fd82010-11-13 11:55:17 +01001413 struct block_device *whole = NULL;
1414 int res;
1415
1416 WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
1417
1418 if ((mode & FMODE_EXCL) && holder) {
1419 whole = bd_start_claiming(bdev, holder);
1420 if (IS_ERR(whole)) {
1421 bdput(bdev);
1422 return PTR_ERR(whole);
1423 }
1424 }
1425
1426 res = __blkdev_get(bdev, mode, 0);
1427
1428 if (whole) {
Tejun Heod4dc2102011-04-21 20:54:46 +02001429 struct gendisk *disk = whole->bd_disk;
1430
Tejun Heo6a027ef2010-11-13 11:55:17 +01001431 /* finish claiming */
Tejun Heo77ea8872010-12-08 20:57:37 +01001432 mutex_lock(&bdev->bd_mutex);
Tejun Heo6a027ef2010-11-13 11:55:17 +01001433 spin_lock(&bdev_lock);
1434
Tejun Heo77ea8872010-12-08 20:57:37 +01001435 if (!res) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001436 BUG_ON(!bd_may_claim(bdev, whole, holder));
1437 /*
1438 * Note that for a whole device bd_holders
1439 * will be incremented twice, and bd_holder
1440 * will be set to bd_may_claim before being
1441 * set to holder
1442 */
1443 whole->bd_holders++;
1444 whole->bd_holder = bd_may_claim;
1445 bdev->bd_holders++;
1446 bdev->bd_holder = holder;
1447 }
1448
1449 /* tell others that we're done */
1450 BUG_ON(whole->bd_claiming != holder);
1451 whole->bd_claiming = NULL;
1452 wake_up_bit(&whole->bd_claiming, 0);
1453
1454 spin_unlock(&bdev_lock);
Tejun Heo77ea8872010-12-08 20:57:37 +01001455
1456 /*
Tejun Heod4dc2102011-04-21 20:54:46 +02001457 * Block event polling for write claims if requested. Any
1458 * write holder makes the write_holder state stick until
1459 * all are released. This is good enough and tracking
1460 * individual writeable reference is too fragile given the
1461 * way @mode is used in blkdev_get/put().
Tejun Heo77ea8872010-12-08 20:57:37 +01001462 */
Tejun Heo4c49ff32011-06-01 08:27:41 +02001463 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1464 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
Tejun Heo77ea8872010-12-08 20:57:37 +01001465 bdev->bd_write_holder = true;
Tejun Heod4dc2102011-04-21 20:54:46 +02001466 disk_block_events(disk);
Tejun Heo77ea8872010-12-08 20:57:37 +01001467 }
1468
1469 mutex_unlock(&bdev->bd_mutex);
Tejun Heo6a027ef2010-11-13 11:55:17 +01001470 bdput(whole);
Tejun Heoe525fd82010-11-13 11:55:17 +01001471 }
1472
1473 return res;
NeilBrown37be4122006-12-08 02:36:16 -08001474}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475EXPORT_SYMBOL(blkdev_get);
1476
Tejun Heod4d77622010-11-13 11:55:18 +01001477/**
1478 * blkdev_get_by_path - open a block device by name
1479 * @path: path to the block device to open
1480 * @mode: FMODE_* mask
1481 * @holder: exclusive holder identifier
1482 *
1483 * Open the blockdevice described by the device file at @path. @mode
1484 * and @holder are identical to blkdev_get().
1485 *
1486 * On success, the returned block_device has reference count of one.
1487 *
1488 * CONTEXT:
1489 * Might sleep.
1490 *
1491 * RETURNS:
1492 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1493 */
1494struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1495 void *holder)
1496{
1497 struct block_device *bdev;
1498 int err;
1499
1500 bdev = lookup_bdev(path);
1501 if (IS_ERR(bdev))
1502 return bdev;
1503
1504 err = blkdev_get(bdev, mode, holder);
1505 if (err)
1506 return ERR_PTR(err);
1507
Chuck Ebberte51900f2011-02-16 18:11:53 -05001508 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1509 blkdev_put(bdev, mode);
1510 return ERR_PTR(-EACCES);
1511 }
1512
Tejun Heod4d77622010-11-13 11:55:18 +01001513 return bdev;
1514}
1515EXPORT_SYMBOL(blkdev_get_by_path);
1516
1517/**
1518 * blkdev_get_by_dev - open a block device by device number
1519 * @dev: device number of block device to open
1520 * @mode: FMODE_* mask
1521 * @holder: exclusive holder identifier
1522 *
1523 * Open the blockdevice described by device number @dev. @mode and
1524 * @holder are identical to blkdev_get().
1525 *
1526 * Use it ONLY if you really do not have anything better - i.e. when
1527 * you are behind a truly sucky interface and all you are given is a
1528 * device number. _Never_ to be used for internal purposes. If you
1529 * ever need it - reconsider your API.
1530 *
1531 * On success, the returned block_device has reference count of one.
1532 *
1533 * CONTEXT:
1534 * Might sleep.
1535 *
1536 * RETURNS:
1537 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1538 */
1539struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1540{
1541 struct block_device *bdev;
1542 int err;
1543
1544 bdev = bdget(dev);
1545 if (!bdev)
1546 return ERR_PTR(-ENOMEM);
1547
1548 err = blkdev_get(bdev, mode, holder);
1549 if (err)
1550 return ERR_PTR(err);
1551
1552 return bdev;
1553}
1554EXPORT_SYMBOL(blkdev_get_by_dev);
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556static int blkdev_open(struct inode * inode, struct file * filp)
1557{
1558 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
1560 /*
1561 * Preserve backwards compatibility and allow large file access
1562 * even if userspace doesn't ask for it explicitly. Some mkfs
1563 * binary needs it. We might want to drop this workaround
1564 * during an unstable branch.
1565 */
1566 filp->f_flags |= O_LARGEFILE;
1567
Al Viro572c4892007-10-08 13:24:05 -04001568 if (filp->f_flags & O_NDELAY)
1569 filp->f_mode |= FMODE_NDELAY;
1570 if (filp->f_flags & O_EXCL)
1571 filp->f_mode |= FMODE_EXCL;
1572 if ((filp->f_flags & O_ACCMODE) == 3)
1573 filp->f_mode |= FMODE_WRITE_IOCTL;
1574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 bdev = bd_acquire(inode);
Pavel Emelianov6a2aae02006-10-28 10:38:33 -07001576 if (bdev == NULL)
1577 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
Al Viro572c4892007-10-08 13:24:05 -04001579 filp->f_mapping = bdev->bd_inode->i_mapping;
1580
Tejun Heoe525fd82010-11-13 11:55:17 +01001581 return blkdev_get(bdev, filp->f_mode, filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582}
1583
Al Viro4385bab2013-05-05 22:11:03 -04001584static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001585{
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001586 struct gendisk *disk = bdev->bd_disk;
NeilBrown37be4122006-12-08 02:36:16 -08001587 struct block_device *victim = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001588
NeilBrown6796bf52006-12-08 02:36:16 -08001589 mutex_lock_nested(&bdev->bd_mutex, for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001590 if (for_part)
1591 bdev->bd_part_count--;
1592
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001593 if (!--bdev->bd_openers) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001594 WARN_ON_ONCE(bdev->bd_holders);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001595 sync_blockdev(bdev);
1596 kill_bdev(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001597
Vivek Goyaldbd3ca52015-11-09 09:23:40 -07001598 bdev_write_inode(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001599 /*
1600 * Detaching bdev inode from its wb in __destroy_inode()
1601 * is too late: the queue which embeds its bdi (along with
1602 * root wb) can be gone as soon as we put_disk() below.
1603 */
1604 inode_detach_wb(bdev->bd_inode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001605 }
1606 if (bdev->bd_contains == bdev) {
1607 if (disk->fops->release)
Al Virodb2a1442013-05-05 21:52:57 -04001608 disk->fops->release(disk, mode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001609 }
1610 if (!bdev->bd_openers) {
1611 struct module *owner = disk->fops->owner;
1612
Tejun Heo0762b8b2008-08-25 19:56:12 +09001613 disk_put_part(bdev->bd_part);
1614 bdev->bd_part = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001615 bdev->bd_disk = NULL;
NeilBrown37be4122006-12-08 02:36:16 -08001616 if (bdev != bdev->bd_contains)
1617 victim = bdev->bd_contains;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001618 bdev->bd_contains = NULL;
Tejun Heo523e1d32011-10-19 14:31:07 +02001619
1620 put_disk(disk);
1621 module_put(owner);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001622 }
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001623 mutex_unlock(&bdev->bd_mutex);
1624 bdput(bdev);
NeilBrown37be4122006-12-08 02:36:16 -08001625 if (victim)
Al Viro9a1c3542008-02-22 20:40:24 -05001626 __blkdev_put(victim, mode, 1);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001627}
1628
Al Viro4385bab2013-05-05 22:11:03 -04001629void blkdev_put(struct block_device *bdev, fmode_t mode)
NeilBrown37be4122006-12-08 02:36:16 -08001630{
Tejun Heo85ef06d2011-07-01 16:17:47 +02001631 mutex_lock(&bdev->bd_mutex);
1632
Tejun Heoe525fd82010-11-13 11:55:17 +01001633 if (mode & FMODE_EXCL) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001634 bool bdev_free;
1635
1636 /*
1637 * Release a claim on the device. The holder fields
1638 * are protected with bdev_lock. bd_mutex is to
1639 * synchronize disk_holder unlinking.
1640 */
Tejun Heo6a027ef2010-11-13 11:55:17 +01001641 spin_lock(&bdev_lock);
1642
1643 WARN_ON_ONCE(--bdev->bd_holders < 0);
1644 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1645
1646 /* bd_contains might point to self, check in a separate step */
1647 if ((bdev_free = !bdev->bd_holders))
1648 bdev->bd_holder = NULL;
1649 if (!bdev->bd_contains->bd_holders)
1650 bdev->bd_contains->bd_holder = NULL;
1651
1652 spin_unlock(&bdev_lock);
1653
Tejun Heo77ea8872010-12-08 20:57:37 +01001654 /*
1655 * If this was the last claim, remove holder link and
1656 * unblock evpoll if it was a write holder.
1657 */
Tejun Heo85ef06d2011-07-01 16:17:47 +02001658 if (bdev_free && bdev->bd_write_holder) {
1659 disk_unblock_events(bdev->bd_disk);
1660 bdev->bd_write_holder = false;
Tejun Heo77ea8872010-12-08 20:57:37 +01001661 }
Tejun Heo69362172011-03-09 19:54:27 +01001662 }
Tejun Heo77ea8872010-12-08 20:57:37 +01001663
Tejun Heo85ef06d2011-07-01 16:17:47 +02001664 /*
1665 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1666 * event. This is to ensure detection of media removal commanded
1667 * from userland - e.g. eject(1).
1668 */
1669 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1670
1671 mutex_unlock(&bdev->bd_mutex);
1672
Al Viro4385bab2013-05-05 22:11:03 -04001673 __blkdev_put(bdev, mode, 0);
NeilBrown37be4122006-12-08 02:36:16 -08001674}
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001675EXPORT_SYMBOL(blkdev_put);
1676
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677static int blkdev_close(struct inode * inode, struct file * filp)
1678{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001679 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
Al Viro4385bab2013-05-05 22:11:03 -04001680 blkdev_put(bdev, filp->f_mode);
1681 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682}
1683
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001684static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001686 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Al Viro56b26ad2008-09-19 03:17:36 -04001687 fmode_t mode = file->f_mode;
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001688
1689 /*
1690 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1691 * to updated it before every ioctl.
1692 */
Al Viro56b26ad2008-09-19 03:17:36 -04001693 if (file->f_flags & O_NDELAY)
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001694 mode |= FMODE_NDELAY;
1695 else
1696 mode &= ~FMODE_NDELAY;
1697
Al Viro56b26ad2008-09-19 03:17:36 -04001698 return blkdev_ioctl(bdev, mode, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699}
1700
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001701/*
Christoph Hellwigeef99382009-08-20 17:43:41 +02001702 * Write data to the block device. Only intended for the block device itself
1703 * and the raw driver which basically is a fake block device.
1704 *
1705 * Does not take i_mutex for the write and thus is not for general purpose
1706 * use.
1707 */
Al Viro1456c0a2014-04-03 03:21:50 -04001708ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
Christoph Hellwigeef99382009-08-20 17:43:41 +02001709{
1710 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001711 struct inode *bd_inode = bdev_file_inode(file);
Al Viro7ec7b942015-04-07 11:35:14 -04001712 loff_t size = i_size_read(bd_inode);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001713 struct blk_plug plug;
Christoph Hellwigeef99382009-08-20 17:43:41 +02001714 ssize_t ret;
Al Viro5f380c72015-04-07 11:28:12 -04001715
Al Viro7ec7b942015-04-07 11:35:14 -04001716 if (bdev_read_only(I_BDEV(bd_inode)))
1717 return -EPERM;
Al Viro5f380c72015-04-07 11:28:12 -04001718
Al Viro7ec7b942015-04-07 11:35:14 -04001719 if (!iov_iter_count(from))
Al Viro5f380c72015-04-07 11:28:12 -04001720 return 0;
1721
Al Viro7ec7b942015-04-07 11:35:14 -04001722 if (iocb->ki_pos >= size)
1723 return -ENOSPC;
1724
1725 iov_iter_truncate(from, size - iocb->ki_pos);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001726
Jianpeng Ma53362a02012-08-02 09:50:39 +02001727 blk_start_plug(&plug);
Al Viro1456c0a2014-04-03 03:21:50 -04001728 ret = __generic_file_write_iter(iocb, from);
Christoph Hellwige2592212016-04-07 08:52:01 -07001729 if (ret > 0)
1730 ret = generic_write_sync(iocb, ret);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001731 blk_finish_plug(&plug);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001732 return ret;
1733}
Al Viro1456c0a2014-04-03 03:21:50 -04001734EXPORT_SYMBOL_GPL(blkdev_write_iter);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001735
David Jefferyb2de5252014-09-29 10:21:10 -04001736ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001737{
1738 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001739 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001740 loff_t size = i_size_read(bd_inode);
Al Viroa8860382014-04-02 20:02:21 -04001741 loff_t pos = iocb->ki_pos;
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001742
1743 if (pos >= size)
1744 return 0;
1745
1746 size -= pos;
Al Viroa8860382014-04-02 20:02:21 -04001747 iov_iter_truncate(to, size);
1748 return generic_file_read_iter(iocb, to);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001749}
David Jefferyb2de5252014-09-29 10:21:10 -04001750EXPORT_SYMBOL_GPL(blkdev_read_iter);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001751
Christoph Hellwigeef99382009-08-20 17:43:41 +02001752/*
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001753 * Try to release a page associated with block device when the system
1754 * is under memory pressure.
1755 */
1756static int blkdev_releasepage(struct page *page, gfp_t wait)
1757{
1758 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1759
1760 if (super && super->s_op->bdev_try_to_free_page)
1761 return super->s_op->bdev_try_to_free_page(super, page, wait);
1762
1763 return try_to_free_buffers(page);
1764}
1765
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001766static int blkdev_writepages(struct address_space *mapping,
1767 struct writeback_control *wbc)
1768{
1769 if (dax_mapping(mapping)) {
1770 struct block_device *bdev = I_BDEV(mapping->host);
1771
1772 return dax_writeback_mapping_range(mapping, bdev, wbc);
1773 }
1774 return generic_writepages(mapping, wbc);
1775}
1776
Adrian Bunk4c54ac62008-02-18 13:48:31 +01001777static const struct address_space_operations def_blk_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 .readpage = blkdev_readpage,
Akinobu Mita447f05b2014-10-09 15:26:58 -07001779 .readpages = blkdev_readpages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 .writepage = blkdev_writepage,
Nick Piggin6272b5a2007-10-16 01:25:04 -07001781 .write_begin = blkdev_write_begin,
1782 .write_end = blkdev_write_end,
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001783 .writepages = blkdev_writepages,
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001784 .releasepage = blkdev_releasepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 .direct_IO = blkdev_direct_IO,
Mel Gormanb4597222013-07-03 15:02:05 -07001786 .is_dirty_writeback = buffer_check_dirty_writeback,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787};
1788
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001789const struct file_operations def_blk_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 .open = blkdev_open,
1791 .release = blkdev_close,
1792 .llseek = block_llseek,
Al Viroa8860382014-04-02 20:02:21 -04001793 .read_iter = blkdev_read_iter,
Al Viro1456c0a2014-04-03 03:21:50 -04001794 .write_iter = blkdev_write_iter,
Dan Williamsacc93d32016-05-07 11:40:28 -07001795 .mmap = generic_file_mmap,
Andrew Mortonb1dd3b22010-04-06 14:35:00 -07001796 .fsync = blkdev_fsync,
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001797 .unlocked_ioctl = block_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798#ifdef CONFIG_COMPAT
1799 .compat_ioctl = compat_blkdev_ioctl,
1800#endif
Linus Torvalds1e8b3332012-11-29 10:49:50 -08001801 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001802 .splice_write = iter_file_splice_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803};
1804
1805int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1806{
1807 int res;
1808 mm_segment_t old_fs = get_fs();
1809 set_fs(KERNEL_DS);
Al Viro56b26ad2008-09-19 03:17:36 -04001810 res = blkdev_ioctl(bdev, 0, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 set_fs(old_fs);
1812 return res;
1813}
1814
1815EXPORT_SYMBOL(ioctl_by_bdev);
1816
1817/**
1818 * lookup_bdev - lookup a struct block_device by name
Randy Dunlap94e29592009-01-06 14:41:15 -08001819 * @pathname: special file representing the block device
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 *
Randy Dunlap57d1b532008-10-09 10:42:38 +02001821 * Get a reference to the blockdevice at @pathname in the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 * namespace if possible and return it. Return ERR_PTR(error)
1823 * otherwise.
1824 */
Al Viro421748e2008-08-02 01:04:36 -04001825struct block_device *lookup_bdev(const char *pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826{
1827 struct block_device *bdev;
1828 struct inode *inode;
Al Viro421748e2008-08-02 01:04:36 -04001829 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 int error;
1831
Al Viro421748e2008-08-02 01:04:36 -04001832 if (!pathname || !*pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 return ERR_PTR(-EINVAL);
1834
Al Viro421748e2008-08-02 01:04:36 -04001835 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 if (error)
1837 return ERR_PTR(error);
1838
David Howellsbb6687342015-03-17 22:26:21 +00001839 inode = d_backing_inode(path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 error = -ENOTBLK;
1841 if (!S_ISBLK(inode->i_mode))
1842 goto fail;
1843 error = -EACCES;
Eric W. Biedermana2982cc2016-06-09 15:34:02 -05001844 if (!may_open_dev(&path))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 goto fail;
1846 error = -ENOMEM;
1847 bdev = bd_acquire(inode);
1848 if (!bdev)
1849 goto fail;
1850out:
Al Viro421748e2008-08-02 01:04:36 -04001851 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 return bdev;
1853fail:
1854 bdev = ERR_PTR(error);
1855 goto out;
1856}
Al Virod5686b42008-08-01 05:00:11 -04001857EXPORT_SYMBOL(lookup_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
NeilBrown93b270f2011-02-24 17:25:47 +11001859int __invalidate_device(struct block_device *bdev, bool kill_dirty)
David Howellsb71e8a42006-08-29 19:06:11 +01001860{
1861 struct super_block *sb = get_super(bdev);
1862 int res = 0;
1863
1864 if (sb) {
1865 /*
1866 * no need to lock the super, get_super holds the
1867 * read mutex so the filesystem cannot go away
1868 * under us (->put_super runs with the write lock
1869 * hold).
1870 */
1871 shrink_dcache_sb(sb);
NeilBrown93b270f2011-02-24 17:25:47 +11001872 res = invalidate_inodes(sb, kill_dirty);
David Howellsb71e8a42006-08-29 19:06:11 +01001873 drop_super(sb);
1874 }
Peter Zijlstraf98393a2007-05-06 14:49:54 -07001875 invalidate_bdev(bdev);
David Howellsb71e8a42006-08-29 19:06:11 +01001876 return res;
1877}
1878EXPORT_SYMBOL(__invalidate_device);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001879
1880void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
1881{
1882 struct inode *inode, *old_inode = NULL;
1883
Dave Chinner74278da2015-03-04 12:37:22 -05001884 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001885 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1886 struct address_space *mapping = inode->i_mapping;
1887
1888 spin_lock(&inode->i_lock);
1889 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1890 mapping->nrpages == 0) {
1891 spin_unlock(&inode->i_lock);
1892 continue;
1893 }
1894 __iget(inode);
1895 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -05001896 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001897 /*
1898 * We hold a reference to 'inode' so it couldn't have been
1899 * removed from s_inodes list while we dropped the
Dave Chinner74278da2015-03-04 12:37:22 -05001900 * s_inode_list_lock We cannot iput the inode now as we can
Jan Kara5c0d6b62012-07-03 16:45:31 +02001901 * be holding the last reference and we cannot iput it under
Dave Chinner74278da2015-03-04 12:37:22 -05001902 * s_inode_list_lock. So we keep the reference and iput it
Jan Kara5c0d6b62012-07-03 16:45:31 +02001903 * later.
1904 */
1905 iput(old_inode);
1906 old_inode = inode;
1907
1908 func(I_BDEV(inode), arg);
1909
Dave Chinner74278da2015-03-04 12:37:22 -05001910 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001911 }
Dave Chinner74278da2015-03-04 12:37:22 -05001912 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001913 iput(old_inode);
1914}