blob: 91e0ec0233c0d47382ef2bb33d464850076c360c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -070014#include <linux/device_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/highmem.h>
16#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/blkpg.h>
Muthu Kumarb502bd12012-03-23 15:01:50 -070020#include <linux/magic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/buffer_head.h>
Al Viroff01bb482011-09-16 02:31:11 -040022#include <linux/swap.h>
Nick Piggin585d3bc2009-02-25 10:44:19 +010023#include <linux/pagevec.h>
David Howells811d7362006-08-29 19:06:09 +010024#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/mpage.h>
26#include <linux/mount.h>
27#include <linux/uio.h>
28#include <linux/namei.h>
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -070029#include <linux/log2.h>
Al Viroff01bb482011-09-16 02:31:11 -040030#include <linux/cleancache.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070031#include <linux/dax.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/uaccess.h>
David Howells07f3f052006-09-30 20:52:18 +020033#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35struct bdev_inode {
36 struct block_device bdev;
37 struct inode vfs_inode;
38};
39
Adrian Bunk4c54ac62008-02-18 13:48:31 +010040static const struct address_space_operations def_blk_aops;
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042static inline struct bdev_inode *BDEV_I(struct inode *inode)
43{
44 return container_of(inode, struct bdev_inode, vfs_inode);
45}
46
Geert Uytterhoevenff5053f2015-06-26 13:58:32 +020047struct block_device *I_BDEV(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
49 return &BDEV_I(inode)->bdev;
50}
Linus Torvalds1da177e2005-04-16 15:20:36 -070051EXPORT_SYMBOL(I_BDEV);
52
Toshi Kani2af3a812016-05-10 10:23:52 -060053void __vfs_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
54{
55 struct va_format vaf;
56 va_list args;
57
58 va_start(args, fmt);
59 vaf.fmt = fmt;
60 vaf.va = &args;
61 printk_ratelimited("%sVFS (%s): %pV\n", prefix, sb->s_id, &vaf);
62 va_end(args);
63}
64
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070065static void bdev_write_inode(struct block_device *bdev)
Christoph Hellwig564f00f2015-01-14 10:42:33 +010066{
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070067 struct inode *inode = bdev->bd_inode;
68 int ret;
69
Christoph Hellwig564f00f2015-01-14 10:42:33 +010070 spin_lock(&inode->i_lock);
71 while (inode->i_state & I_DIRTY) {
72 spin_unlock(&inode->i_lock);
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070073 ret = write_inode_now(inode, true);
74 if (ret) {
75 char name[BDEVNAME_SIZE];
76 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
77 "for block device %s (err=%d).\n",
78 bdevname(bdev, name), ret);
79 }
Christoph Hellwig564f00f2015-01-14 10:42:33 +010080 spin_lock(&inode->i_lock);
81 }
82 spin_unlock(&inode->i_lock);
83}
84
Peter Zijlstraf9a14392007-05-06 14:49:55 -070085/* Kill _all_ buffers and pagecache , dirty or not.. */
Al Viroff01bb482011-09-16 02:31:11 -040086void kill_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Al Viroff01bb482011-09-16 02:31:11 -040088 struct address_space *mapping = bdev->bd_inode->i_mapping;
89
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080090 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
Peter Zijlstraf9a14392007-05-06 14:49:55 -070091 return;
Al Viroff01bb482011-09-16 02:31:11 -040092
Peter Zijlstraf9a14392007-05-06 14:49:55 -070093 invalidate_bh_lrus();
Al Viroff01bb482011-09-16 02:31:11 -040094 truncate_inode_pages(mapping, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
Al Viroff01bb482011-09-16 02:31:11 -040096EXPORT_SYMBOL(kill_bdev);
97
98/* Invalidate clean unused buffers and pagecache. */
99void invalidate_bdev(struct block_device *bdev)
100{
101 struct address_space *mapping = bdev->bd_inode->i_mapping;
102
103 if (mapping->nrpages == 0)
104 return;
105
106 invalidate_bh_lrus();
107 lru_add_drain_all(); /* make sure all lru add caches are flushed */
108 invalidate_mapping_pages(mapping, 0, -1);
109 /* 99% of the time, we don't need to flush the cleancache on the bdev.
110 * But, for the strange corners, lets be cautious
111 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400112 cleancache_invalidate_inode(mapping);
Al Viroff01bb482011-09-16 02:31:11 -0400113}
114EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116int set_blocksize(struct block_device *bdev, int size)
117{
118 /* Size must be a power of two, and between 512 and PAGE_SIZE */
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -0700119 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 return -EINVAL;
121
122 /* Size cannot be smaller than the size supported by the device */
Martin K. Petersene1defc42009-05-22 17:17:49 -0400123 if (size < bdev_logical_block_size(bdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 return -EINVAL;
125
126 /* Don't change the size if it is same as current */
127 if (bdev->bd_block_size != size) {
128 sync_blockdev(bdev);
129 bdev->bd_block_size = size;
130 bdev->bd_inode->i_blkbits = blksize_bits(size);
131 kill_bdev(bdev);
132 }
133 return 0;
134}
135
136EXPORT_SYMBOL(set_blocksize);
137
138int sb_set_blocksize(struct super_block *sb, int size)
139{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 if (set_blocksize(sb->s_bdev, size))
141 return 0;
142 /* If we get here, we know size is power of two
143 * and it's value is between 512 and PAGE_SIZE */
144 sb->s_blocksize = size;
Coywolf Qi Hunt38885bd2006-03-24 03:18:05 -0800145 sb->s_blocksize_bits = blksize_bits(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 return sb->s_blocksize;
147}
148
149EXPORT_SYMBOL(sb_set_blocksize);
150
151int sb_min_blocksize(struct super_block *sb, int size)
152{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400153 int minsize = bdev_logical_block_size(sb->s_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 if (size < minsize)
155 size = minsize;
156 return sb_set_blocksize(sb, size);
157}
158
159EXPORT_SYMBOL(sb_min_blocksize);
160
161static int
162blkdev_get_block(struct inode *inode, sector_t iblock,
163 struct buffer_head *bh, int create)
164{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 bh->b_bdev = I_BDEV(inode);
166 bh->b_blocknr = iblock;
167 set_buffer_mapped(bh);
168 return 0;
169}
170
Dan Williams4ebb16c2015-10-28 07:48:19 +0900171static struct inode *bdev_file_inode(struct file *file)
172{
173 return file->f_mapping->host;
174}
175
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800176static ssize_t
Omar Sandoval22c61862015-03-16 04:33:53 -0700177blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800178{
179 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +0900180 struct inode *inode = bdev_file_inode(file);
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800181
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400182 if (IS_DAX(inode))
183 return dax_do_io(iocb, inode, iter, offset, blkdev_get_block,
184 NULL, DIO_SKIP_DIO_COUNT);
Omar Sandoval17f8c842015-03-16 04:33:50 -0700185 return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
Jens Axboefe0f07d2015-04-15 17:05:48 -0600186 blkdev_get_block, NULL, NULL,
187 DIO_SKIP_DIO_COUNT);
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800188}
189
Jan Kara5cee5812009-04-27 16:43:51 +0200190int __sync_blockdev(struct block_device *bdev, int wait)
191{
192 if (!bdev)
193 return 0;
194 if (!wait)
195 return filemap_flush(bdev->bd_inode->i_mapping);
196 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
197}
198
Nick Piggin585d3bc2009-02-25 10:44:19 +0100199/*
200 * Write out and wait upon all the dirty data associated with a block
201 * device via its mapping. Does not take the superblock lock.
202 */
203int sync_blockdev(struct block_device *bdev)
204{
Jan Kara5cee5812009-04-27 16:43:51 +0200205 return __sync_blockdev(bdev, 1);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100206}
207EXPORT_SYMBOL(sync_blockdev);
208
209/*
210 * Write out and wait upon all dirty data associated with this
211 * device. Filesystem data as well as the underlying block
212 * device. Takes the superblock lock.
213 */
214int fsync_bdev(struct block_device *bdev)
215{
216 struct super_block *sb = get_super(bdev);
217 if (sb) {
Jan Kara60b06802009-04-27 16:43:53 +0200218 int res = sync_filesystem(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100219 drop_super(sb);
220 return res;
221 }
222 return sync_blockdev(bdev);
223}
Al Viro47e44912009-04-01 07:07:16 -0400224EXPORT_SYMBOL(fsync_bdev);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100225
226/**
227 * freeze_bdev -- lock a filesystem and force it into a consistent state
228 * @bdev: blockdevice to lock
229 *
Nick Piggin585d3bc2009-02-25 10:44:19 +0100230 * If a superblock is found on this device, we take the s_umount semaphore
231 * on it to make sure nobody unmounts until the snapshot creation is done.
232 * The reference counter (bd_fsfreeze_count) guarantees that only the last
233 * unfreeze process can unfreeze the frozen filesystem actually when multiple
234 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
235 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
236 * actually.
237 */
238struct super_block *freeze_bdev(struct block_device *bdev)
239{
240 struct super_block *sb;
241 int error = 0;
242
243 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200244 if (++bdev->bd_fsfreeze_count > 1) {
245 /*
246 * We don't even need to grab a reference - the first call
247 * to freeze_bdev grab an active reference and only the last
248 * thaw_bdev drops it.
249 */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100250 sb = get_super(bdev);
Christoph Hellwig45042302009-08-03 23:28:35 +0200251 drop_super(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100252 mutex_unlock(&bdev->bd_fsfreeze_mutex);
253 return sb;
254 }
Nick Piggin585d3bc2009-02-25 10:44:19 +0100255
Christoph Hellwig45042302009-08-03 23:28:35 +0200256 sb = get_active_super(bdev);
257 if (!sb)
258 goto out;
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600259 if (sb->s_op->freeze_super)
260 error = sb->s_op->freeze_super(sb);
261 else
262 error = freeze_super(sb);
Josef Bacik18e9e512010-03-23 10:34:56 -0400263 if (error) {
264 deactivate_super(sb);
265 bdev->bd_fsfreeze_count--;
Christoph Hellwig45042302009-08-03 23:28:35 +0200266 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Josef Bacik18e9e512010-03-23 10:34:56 -0400267 return ERR_PTR(error);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100268 }
Josef Bacik18e9e512010-03-23 10:34:56 -0400269 deactivate_super(sb);
Christoph Hellwig45042302009-08-03 23:28:35 +0200270 out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100271 sync_blockdev(bdev);
272 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig4fadd7b2009-08-03 23:28:06 +0200273 return sb; /* thaw_bdev releases s->s_umount */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100274}
275EXPORT_SYMBOL(freeze_bdev);
276
277/**
278 * thaw_bdev -- unlock filesystem
279 * @bdev: blockdevice to unlock
280 * @sb: associated superblock
281 *
282 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
283 */
284int thaw_bdev(struct block_device *bdev, struct super_block *sb)
285{
Christoph Hellwig45042302009-08-03 23:28:35 +0200286 int error = -EINVAL;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100287
288 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200289 if (!bdev->bd_fsfreeze_count)
Josef Bacik18e9e512010-03-23 10:34:56 -0400290 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100291
Christoph Hellwig45042302009-08-03 23:28:35 +0200292 error = 0;
293 if (--bdev->bd_fsfreeze_count > 0)
Josef Bacik18e9e512010-03-23 10:34:56 -0400294 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100295
Christoph Hellwig45042302009-08-03 23:28:35 +0200296 if (!sb)
Josef Bacik18e9e512010-03-23 10:34:56 -0400297 goto out;
Christoph Hellwig45042302009-08-03 23:28:35 +0200298
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600299 if (sb->s_op->thaw_super)
300 error = sb->s_op->thaw_super(sb);
301 else
302 error = thaw_super(sb);
Josef Bacik18e9e512010-03-23 10:34:56 -0400303 if (error) {
304 bdev->bd_fsfreeze_count++;
305 mutex_unlock(&bdev->bd_fsfreeze_mutex);
306 return error;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100307 }
Josef Bacik18e9e512010-03-23 10:34:56 -0400308out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100309 mutex_unlock(&bdev->bd_fsfreeze_mutex);
310 return 0;
311}
312EXPORT_SYMBOL(thaw_bdev);
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
315{
316 return block_write_full_page(page, blkdev_get_block, wbc);
317}
318
319static int blkdev_readpage(struct file * file, struct page * page)
320{
321 return block_read_full_page(page, blkdev_get_block);
322}
323
Akinobu Mita447f05b2014-10-09 15:26:58 -0700324static int blkdev_readpages(struct file *file, struct address_space *mapping,
325 struct list_head *pages, unsigned nr_pages)
326{
327 return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
328}
329
Nick Piggin6272b5a2007-10-16 01:25:04 -0700330static int blkdev_write_begin(struct file *file, struct address_space *mapping,
331 loff_t pos, unsigned len, unsigned flags,
332 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
Christoph Hellwig155130a2010-06-04 11:29:58 +0200334 return block_write_begin(mapping, pos, len, flags, pagep,
335 blkdev_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
Nick Piggin6272b5a2007-10-16 01:25:04 -0700338static int blkdev_write_end(struct file *file, struct address_space *mapping,
339 loff_t pos, unsigned len, unsigned copied,
340 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Nick Piggin6272b5a2007-10-16 01:25:04 -0700342 int ret;
343 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
344
345 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300346 put_page(page);
Nick Piggin6272b5a2007-10-16 01:25:04 -0700347
348 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349}
350
351/*
352 * private llseek:
Al Viro496ad9a2013-01-23 17:07:38 -0500353 * for a block special file file_inode(file)->i_size is zero
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 * so we compute the size by hand (just as in block_read/write above)
355 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800356static loff_t block_llseek(struct file *file, loff_t offset, int whence)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900358 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 loff_t retval;
360
Al Viro59551022016-01-22 15:40:57 -0500361 inode_lock(bd_inode);
Al Viro5d48f3a2013-06-23 21:34:45 +0400362 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
Al Viro59551022016-01-22 15:40:57 -0500363 inode_unlock(bd_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 return retval;
365}
366
Josef Bacik02c24a82011-07-16 20:44:56 -0400367int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900369 struct inode *bd_inode = bdev_file_inode(filp);
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400370 struct block_device *bdev = I_BDEV(bd_inode);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100371 int error;
Rafael J. Wysockida5aa862011-08-02 02:17:48 +0200372
373 error = filemap_write_and_wait_range(filp->f_mapping, start, end);
374 if (error)
375 return error;
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100376
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400377 /*
378 * There is no need to serialise calls to blkdev_issue_flush with
379 * i_mutex and doing so causes performance issues with concurrent
380 * O_SYNC writers to a block device.
381 */
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200382 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100383 if (error == -EOPNOTSUPP)
384 error = 0;
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400385
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100386 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
Andrew Mortonb1dd3b22010-04-06 14:35:00 -0700388EXPORT_SYMBOL(blkdev_fsync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700390/**
391 * bdev_read_page() - Start reading a page from a block device
392 * @bdev: The device to read the page from
393 * @sector: The offset on the device to read the page to (need not be aligned)
394 * @page: The page to read
395 *
396 * On entry, the page should be locked. It will be unlocked when the page
397 * has been read. If the block driver implements rw_page synchronously,
398 * that will be true on exit from this function, but it need not be.
399 *
400 * Errors returned by this function are usually "soft", eg out of memory, or
401 * queue full; callers should try a different route to read this page rather
402 * than propagate an error back up the stack.
403 *
404 * Return: negative errno if an error occurs, 0 if submission was successful.
405 */
406int bdev_read_page(struct block_device *bdev, sector_t sector,
407 struct page *page)
408{
409 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc92015-11-19 13:29:28 -0800410 int result = -EOPNOTSUPP;
411
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400412 if (!ops->rw_page || bdev_get_integrity(bdev))
Dan Williams2e6edc92015-11-19 13:29:28 -0800413 return result;
414
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100415 result = blk_queue_enter(bdev->bd_queue, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800416 if (result)
417 return result;
418 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
419 blk_queue_exit(bdev->bd_queue);
420 return result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700421}
422EXPORT_SYMBOL_GPL(bdev_read_page);
423
424/**
425 * bdev_write_page() - Start writing a page to a block device
426 * @bdev: The device to write the page to
427 * @sector: The offset on the device to write the page to (need not be aligned)
428 * @page: The page to write
429 * @wbc: The writeback_control for the write
430 *
431 * On entry, the page should be locked and not currently under writeback.
432 * On exit, if the write started successfully, the page will be unlocked and
433 * under writeback. If the write failed already (eg the driver failed to
434 * queue the page to the device), the page will still be locked. If the
435 * caller is a ->writepage implementation, it will need to unlock the page.
436 *
437 * Errors returned by this function are usually "soft", eg out of memory, or
438 * queue full; callers should try a different route to write this page rather
439 * than propagate an error back up the stack.
440 *
441 * Return: negative errno if an error occurs, 0 if submission was successful.
442 */
443int bdev_write_page(struct block_device *bdev, sector_t sector,
444 struct page *page, struct writeback_control *wbc)
445{
446 int result;
447 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
448 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc92015-11-19 13:29:28 -0800449
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400450 if (!ops->rw_page || bdev_get_integrity(bdev))
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700451 return -EOPNOTSUPP;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100452 result = blk_queue_enter(bdev->bd_queue, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800453 if (result)
454 return result;
455
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700456 set_page_writeback(page);
457 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
458 if (result)
459 end_page_writeback(page);
460 else
461 unlock_page(page);
Dan Williams2e6edc92015-11-19 13:29:28 -0800462 blk_queue_exit(bdev->bd_queue);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700463 return result;
464}
465EXPORT_SYMBOL_GPL(bdev_write_page);
466
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200467/**
468 * bdev_direct_access() - Get the address for directly-accessibly memory
469 * @bdev: The device containing the memory
Dan Williamsb2e0d162016-01-15 16:55:59 -0800470 * @dax: control and output parameters for ->direct_access
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200471 *
472 * If a block device is made up of directly addressable memory, this function
473 * will tell the caller the PFN and the address of the memory. The address
474 * may be directly dereferenced within the kernel without the need to call
475 * ioremap(), kmap() or similar. The PFN is suitable for inserting into
476 * page tables.
477 *
478 * Return: negative errno if an error occurs, otherwise the number of bytes
479 * accessible at this address.
480 */
Dan Williamsb2e0d162016-01-15 16:55:59 -0800481long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200482{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800483 sector_t sector = dax->sector;
484 long avail, size = dax->size;
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200485 const struct block_device_operations *ops = bdev->bd_disk->fops;
486
Matthew Wilcox43c3dd02015-07-03 10:40:43 -0400487 /*
488 * The device driver is allowed to sleep, in order to make the
489 * memory directly accessible.
490 */
491 might_sleep();
492
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200493 if (size < 0)
494 return size;
495 if (!ops->direct_access)
496 return -EOPNOTSUPP;
497 if ((sector + DIV_ROUND_UP(size, 512)) >
498 part_nr_sects_read(bdev->bd_part))
499 return -ERANGE;
500 sector += get_start_sect(bdev);
501 if (sector % (PAGE_SIZE / 512))
502 return -EINVAL;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800503 avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn);
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200504 if (!avail)
505 return -ERANGE;
Dan Williamsfe683ad2016-01-15 16:55:56 -0800506 if (avail > 0 && avail & ~PAGE_MASK)
507 return -ENXIO;
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200508 return min(avail, size);
509}
510EXPORT_SYMBOL_GPL(bdev_direct_access);
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512/*
513 * pseudo-fs
514 */
515
516static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
Christoph Lametere18b8902006-12-06 20:33:20 -0800517static struct kmem_cache * bdev_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519static struct inode *bdev_alloc_inode(struct super_block *sb)
520{
Christoph Lametere94b1762006-12-06 20:33:17 -0800521 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 if (!ei)
523 return NULL;
524 return &ei->vfs_inode;
525}
526
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100527static void bdev_i_callback(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100529 struct inode *inode = container_of(head, struct inode, i_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 struct bdev_inode *bdi = BDEV_I(inode);
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 kmem_cache_free(bdev_cachep, bdi);
533}
534
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100535static void bdev_destroy_inode(struct inode *inode)
536{
537 call_rcu(&inode->i_rcu, bdev_i_callback);
538}
539
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700540static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541{
542 struct bdev_inode *ei = (struct bdev_inode *) foo;
543 struct block_device *bdev = &ei->bdev;
544
Christoph Lametera35afb82007-05-16 22:10:57 -0700545 memset(bdev, 0, sizeof(*bdev));
546 mutex_init(&bdev->bd_mutex);
Christoph Lametera35afb82007-05-16 22:10:57 -0700547 INIT_LIST_HEAD(&bdev->bd_inodes);
548 INIT_LIST_HEAD(&bdev->bd_list);
Tejun Heo49731ba2011-01-14 18:43:57 +0100549#ifdef CONFIG_SYSFS
550 INIT_LIST_HEAD(&bdev->bd_holder_disks);
551#endif
Christoph Lametera35afb82007-05-16 22:10:57 -0700552 inode_init_once(&ei->vfs_inode);
Takashi Satofcccf502009-01-09 16:40:59 -0800553 /* Initialize mutex for freeze. */
554 mutex_init(&bdev->bd_fsfreeze_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556
557static inline void __bd_forget(struct inode *inode)
558{
559 list_del_init(&inode->i_devices);
560 inode->i_bdev = NULL;
561 inode->i_mapping = &inode->i_data;
562}
563
Al Virob57922d2010-06-07 14:34:48 -0400564static void bdev_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565{
566 struct block_device *bdev = &BDEV_I(inode)->bdev;
567 struct list_head *p;
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700568 truncate_inode_pages_final(&inode->i_data);
Al Virob57922d2010-06-07 14:34:48 -0400569 invalidate_inode_buffers(inode); /* is it needed here? */
Jan Karadbd57682012-05-03 14:48:02 +0200570 clear_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 spin_lock(&bdev_lock);
572 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
573 __bd_forget(list_entry(p, struct inode, i_devices));
574 }
575 list_del_init(&bdev->bd_list);
576 spin_unlock(&bdev_lock);
577}
578
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -0800579static const struct super_operations bdev_sops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 .statfs = simple_statfs,
581 .alloc_inode = bdev_alloc_inode,
582 .destroy_inode = bdev_destroy_inode,
583 .drop_inode = generic_delete_inode,
Al Virob57922d2010-06-07 14:34:48 -0400584 .evict_inode = bdev_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585};
586
Al Viro51139ad2010-07-25 23:47:46 +0400587static struct dentry *bd_mount(struct file_system_type *fs_type,
588 int flags, const char *dev_name, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
Shaohua Li3684aa72016-02-22 15:27:40 -0700590 struct dentry *dent;
591 dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
592 if (dent)
593 dent->d_sb->s_iflags |= SB_I_CGROUPWB;
594 return dent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595}
596
597static struct file_system_type bd_type = {
598 .name = "bdev",
Al Viro51139ad2010-07-25 23:47:46 +0400599 .mount = bd_mount,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 .kill_sb = kill_anon_super,
601};
602
Tejun Heoa212b102015-05-22 17:13:33 -0400603struct super_block *blockdev_superblock __read_mostly;
604EXPORT_SYMBOL_GPL(blockdev_superblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
606void __init bdev_cache_init(void)
607{
608 int err;
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300609 static struct vfsmount *bd_mnt;
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800610
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
Paul Jacksonfffb60f2006-03-24 03:16:06 -0800612 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
Vladimir Davydov5d097052016-01-14 15:18:21 -0800613 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
Paul Mundt20c2df82007-07-20 10:11:58 +0900614 init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 err = register_filesystem(&bd_type);
616 if (err)
617 panic("Cannot register bdev pseudo-fs");
618 bd_mnt = kern_mount(&bd_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 if (IS_ERR(bd_mnt))
620 panic("Cannot create bdev pseudo-fs");
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300621 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622}
623
624/*
625 * Most likely _very_ bad one - but then it's hardly critical for small
626 * /dev and can be fixed when somebody will need really large one.
627 * Keep in mind that it will be fed through icache hash function too.
628 */
629static inline unsigned long hash(dev_t dev)
630{
631 return MAJOR(dev)+MINOR(dev);
632}
633
634static int bdev_test(struct inode *inode, void *data)
635{
636 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
637}
638
639static int bdev_set(struct inode *inode, void *data)
640{
641 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
642 return 0;
643}
644
645static LIST_HEAD(all_bdevs);
646
647struct block_device *bdget(dev_t dev)
648{
649 struct block_device *bdev;
650 struct inode *inode;
651
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800652 inode = iget5_locked(blockdev_superblock, hash(dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 bdev_test, bdev_set, &dev);
654
655 if (!inode)
656 return NULL;
657
658 bdev = &BDEV_I(inode)->bdev;
659
660 if (inode->i_state & I_NEW) {
661 bdev->bd_contains = NULL;
Lachlan McIlroy782b94c2011-06-30 11:01:45 +1000662 bdev->bd_super = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 bdev->bd_inode = inode;
664 bdev->bd_block_size = (1 << inode->i_blkbits);
665 bdev->bd_part_count = 0;
666 bdev->bd_invalidated = 0;
667 inode->i_mode = S_IFBLK;
668 inode->i_rdev = dev;
669 inode->i_bdev = bdev;
670 inode->i_data.a_ops = &def_blk_aops;
671 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 spin_lock(&bdev_lock);
673 list_add(&bdev->bd_list, &all_bdevs);
674 spin_unlock(&bdev_lock);
675 unlock_new_inode(inode);
676 }
677 return bdev;
678}
679
680EXPORT_SYMBOL(bdget);
681
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200682/**
683 * bdgrab -- Grab a reference to an already referenced block device
684 * @bdev: Block device to grab a reference to.
685 */
686struct block_device *bdgrab(struct block_device *bdev)
687{
Al Viro7de9c6ee2010-10-23 11:11:40 -0400688 ihold(bdev->bd_inode);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200689 return bdev;
690}
Anatol Pomozovc1681bf2013-04-01 09:47:56 -0700691EXPORT_SYMBOL(bdgrab);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693long nr_blockdev_pages(void)
694{
Matthias Kaehlcke203a2932007-07-15 23:40:23 -0700695 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 long ret = 0;
697 spin_lock(&bdev_lock);
Matthias Kaehlcke203a2932007-07-15 23:40:23 -0700698 list_for_each_entry(bdev, &all_bdevs, bd_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 ret += bdev->bd_inode->i_mapping->nrpages;
700 }
701 spin_unlock(&bdev_lock);
702 return ret;
703}
704
705void bdput(struct block_device *bdev)
706{
707 iput(bdev->bd_inode);
708}
709
710EXPORT_SYMBOL(bdput);
711
712static struct block_device *bd_acquire(struct inode *inode)
713{
714 struct block_device *bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700715
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 spin_lock(&bdev_lock);
717 bdev = inode->i_bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700718 if (bdev) {
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100719 bdgrab(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 spin_unlock(&bdev_lock);
721 return bdev;
722 }
723 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 bdev = bdget(inode->i_rdev);
726 if (bdev) {
727 spin_lock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700728 if (!inode->i_bdev) {
729 /*
Al Viro7de9c6ee2010-10-23 11:11:40 -0400730 * We take an additional reference to bd_inode,
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700731 * and it's released in clear_inode() of inode.
732 * So, we can access it via ->i_mapping always
733 * without igrab().
734 */
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100735 bdgrab(bdev);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700736 inode->i_bdev = bdev;
737 inode->i_mapping = bdev->bd_inode->i_mapping;
738 list_add(&inode->i_devices, &bdev->bd_inodes);
739 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 spin_unlock(&bdev_lock);
741 }
742 return bdev;
743}
744
745/* Call when you free inode */
746
747void bd_forget(struct inode *inode)
748{
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700749 struct block_device *bdev = NULL;
750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 spin_lock(&bdev_lock);
Yan Hongb4ea2ea2013-04-30 15:26:47 -0700752 if (!sb_is_blkdev_sb(inode->i_sb))
753 bdev = inode->i_bdev;
754 __bd_forget(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700756
757 if (bdev)
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100758 bdput(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759}
760
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900761/**
762 * bd_may_claim - test whether a block device can be claimed
763 * @bdev: block device of interest
764 * @whole: whole block device containing @bdev, may equal @bdev
765 * @holder: holder trying to claim @bdev
766 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300767 * Test whether @bdev can be claimed by @holder.
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900768 *
769 * CONTEXT:
770 * spin_lock(&bdev_lock).
771 *
772 * RETURNS:
773 * %true if @bdev can be claimed, %false otherwise.
774 */
775static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
776 void *holder)
777{
778 if (bdev->bd_holder == holder)
779 return true; /* already a holder */
780 else if (bdev->bd_holder != NULL)
781 return false; /* held by someone else */
782 else if (bdev->bd_contains == bdev)
783 return true; /* is a whole device which isn't held */
784
Tejun Heoe525fd82010-11-13 11:55:17 +0100785 else if (whole->bd_holder == bd_may_claim)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900786 return true; /* is a partition of a device that is being partitioned */
787 else if (whole->bd_holder != NULL)
788 return false; /* is a partition of a held device */
789 else
790 return true; /* is a partition of an un-held device */
791}
792
793/**
Tejun Heo6b4517a2010-04-07 18:53:59 +0900794 * bd_prepare_to_claim - prepare to claim a block device
795 * @bdev: block device of interest
796 * @whole: the whole device containing @bdev, may equal @bdev
797 * @holder: holder trying to claim @bdev
798 *
799 * Prepare to claim @bdev. This function fails if @bdev is already
800 * claimed by another holder and waits if another claiming is in
801 * progress. This function doesn't actually claim. On successful
802 * return, the caller has ownership of bd_claiming and bd_holder[s].
803 *
804 * CONTEXT:
805 * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
806 * it multiple times.
807 *
808 * RETURNS:
809 * 0 if @bdev can be claimed, -EBUSY otherwise.
810 */
811static int bd_prepare_to_claim(struct block_device *bdev,
812 struct block_device *whole, void *holder)
813{
814retry:
815 /* if someone else claimed, fail */
816 if (!bd_may_claim(bdev, whole, holder))
817 return -EBUSY;
818
Tejun Heoe75aa852010-08-04 17:59:39 +0200819 /* if claiming is already in progress, wait for it to finish */
820 if (whole->bd_claiming) {
Tejun Heo6b4517a2010-04-07 18:53:59 +0900821 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
822 DEFINE_WAIT(wait);
823
824 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
825 spin_unlock(&bdev_lock);
826 schedule();
827 finish_wait(wq, &wait);
828 spin_lock(&bdev_lock);
829 goto retry;
830 }
831
832 /* yay, all mine */
833 return 0;
834}
835
836/**
837 * bd_start_claiming - start claiming a block device
838 * @bdev: block device of interest
839 * @holder: holder trying to claim @bdev
840 *
841 * @bdev is about to be opened exclusively. Check @bdev can be opened
842 * exclusively and mark that an exclusive open is in progress. Each
843 * successful call to this function must be matched with a call to
Nick Pigginb0018362010-05-26 01:51:19 +1000844 * either bd_finish_claiming() or bd_abort_claiming() (which do not
845 * fail).
846 *
847 * This function is used to gain exclusive access to the block device
848 * without actually causing other exclusive open attempts to fail. It
849 * should be used when the open sequence itself requires exclusive
850 * access but may subsequently fail.
Tejun Heo6b4517a2010-04-07 18:53:59 +0900851 *
852 * CONTEXT:
853 * Might sleep.
854 *
855 * RETURNS:
856 * Pointer to the block device containing @bdev on success, ERR_PTR()
857 * value on failure.
858 */
859static struct block_device *bd_start_claiming(struct block_device *bdev,
860 void *holder)
861{
862 struct gendisk *disk;
863 struct block_device *whole;
864 int partno, err;
865
866 might_sleep();
867
868 /*
869 * @bdev might not have been initialized properly yet, look up
870 * and grab the outer block device the hard way.
871 */
872 disk = get_gendisk(bdev->bd_dev, &partno);
873 if (!disk)
874 return ERR_PTR(-ENXIO);
875
Tejun Heod4c208b2011-06-13 12:45:48 +0200876 /*
877 * Normally, @bdev should equal what's returned from bdget_disk()
878 * if partno is 0; however, some drivers (floppy) use multiple
879 * bdev's for the same physical device and @bdev may be one of the
880 * aliases. Keep @bdev if partno is 0. This means claimer
881 * tracking is broken for those devices but it has always been that
882 * way.
883 */
884 if (partno)
885 whole = bdget_disk(disk, 0);
886 else
887 whole = bdgrab(bdev);
888
Nick Piggincf342572010-05-26 01:50:21 +1000889 module_put(disk->fops->owner);
Tejun Heo6b4517a2010-04-07 18:53:59 +0900890 put_disk(disk);
891 if (!whole)
892 return ERR_PTR(-ENOMEM);
893
894 /* prepare to claim, if successful, mark claiming in progress */
895 spin_lock(&bdev_lock);
896
897 err = bd_prepare_to_claim(bdev, whole, holder);
898 if (err == 0) {
899 whole->bd_claiming = holder;
900 spin_unlock(&bdev_lock);
901 return whole;
902 } else {
903 spin_unlock(&bdev_lock);
904 bdput(whole);
905 return ERR_PTR(err);
906 }
907}
908
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800909#ifdef CONFIG_SYSFS
Tejun Heo49731ba2011-01-14 18:43:57 +0100910struct bd_holder_disk {
911 struct list_head list;
912 struct gendisk *disk;
913 int refcnt;
914};
915
916static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
917 struct gendisk *disk)
918{
919 struct bd_holder_disk *holder;
920
921 list_for_each_entry(holder, &bdev->bd_holder_disks, list)
922 if (holder->disk == disk)
923 return holder;
924 return NULL;
925}
926
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700927static int add_symlink(struct kobject *from, struct kobject *to)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800928{
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700929 return sysfs_create_link(from, to, kobject_name(to));
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800930}
931
932static void del_symlink(struct kobject *from, struct kobject *to)
933{
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800934 sysfs_remove_link(from, kobject_name(to));
935}
936
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800937/**
Tejun Heoe09b4572010-11-13 11:55:17 +0100938 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
939 * @bdev: the claimed slave bdev
940 * @disk: the holding disk
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500941 *
Tejun Heo49731ba2011-01-14 18:43:57 +0100942 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
943 *
Tejun Heoe09b4572010-11-13 11:55:17 +0100944 * This functions creates the following sysfs symlinks.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500945 *
Tejun Heoe09b4572010-11-13 11:55:17 +0100946 * - from "slaves" directory of the holder @disk to the claimed @bdev
947 * - from "holders" directory of the @bdev to the holder @disk
948 *
949 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
950 * passed to bd_link_disk_holder(), then:
951 *
952 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
953 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
954 *
955 * The caller must have claimed @bdev before calling this function and
956 * ensure that both @bdev and @disk are valid during the creation and
957 * lifetime of these symlinks.
958 *
959 * CONTEXT:
960 * Might sleep.
961 *
962 * RETURNS:
963 * 0 on success, -errno on failure.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500964 */
Tejun Heoe09b4572010-11-13 11:55:17 +0100965int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500966{
Tejun Heo49731ba2011-01-14 18:43:57 +0100967 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +0100968 int ret = 0;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800969
Peter Zijlstra2e7b6512006-12-08 02:36:13 -0800970 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500971
Tejun Heo49731ba2011-01-14 18:43:57 +0100972 WARN_ON_ONCE(!bdev->bd_holder);
Johannes Weiner4e916722007-07-15 23:41:25 -0700973
Tejun Heoe09b4572010-11-13 11:55:17 +0100974 /* FIXME: remove the following once add_disk() handles errors */
975 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
976 goto out_unlock;
Johannes Weiner4e916722007-07-15 23:41:25 -0700977
Tejun Heo49731ba2011-01-14 18:43:57 +0100978 holder = bd_find_holder_disk(bdev, disk);
979 if (holder) {
980 holder->refcnt++;
Tejun Heoe09b4572010-11-13 11:55:17 +0100981 goto out_unlock;
982 }
983
Tejun Heo49731ba2011-01-14 18:43:57 +0100984 holder = kzalloc(sizeof(*holder), GFP_KERNEL);
985 if (!holder) {
986 ret = -ENOMEM;
987 goto out_unlock;
988 }
989
990 INIT_LIST_HEAD(&holder->list);
991 holder->disk = disk;
992 holder->refcnt = 1;
993
994 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
995 if (ret)
996 goto out_free;
997
998 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
999 if (ret)
1000 goto out_del;
Tejun Heoe7407d12011-02-24 09:56:32 +01001001 /*
1002 * bdev could be deleted beneath us which would implicitly destroy
1003 * the holder directory. Hold on to it.
1004 */
1005 kobject_get(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001006
1007 list_add(&holder->list, &bdev->bd_holder_disks);
1008 goto out_unlock;
1009
1010out_del:
1011 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1012out_free:
1013 kfree(holder);
Tejun Heoe09b4572010-11-13 11:55:17 +01001014out_unlock:
Jun'ichi Nomurab4cf1b72006-03-27 01:18:00 -08001015 mutex_unlock(&bdev->bd_mutex);
Tejun Heoe09b4572010-11-13 11:55:17 +01001016 return ret;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001017}
Tejun Heoe09b4572010-11-13 11:55:17 +01001018EXPORT_SYMBOL_GPL(bd_link_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001019
Tejun Heo49731ba2011-01-14 18:43:57 +01001020/**
1021 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1022 * @bdev: the calimed slave bdev
1023 * @disk: the holding disk
1024 *
1025 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1026 *
1027 * CONTEXT:
1028 * Might sleep.
1029 */
1030void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001031{
Tejun Heo49731ba2011-01-14 18:43:57 +01001032 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001033
Tejun Heo49731ba2011-01-14 18:43:57 +01001034 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001035
Tejun Heo49731ba2011-01-14 18:43:57 +01001036 holder = bd_find_holder_disk(bdev, disk);
1037
1038 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
1039 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1040 del_symlink(bdev->bd_part->holder_dir,
1041 &disk_to_dev(disk)->kobj);
Tejun Heoe7407d12011-02-24 09:56:32 +01001042 kobject_put(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001043 list_del_init(&holder->list);
1044 kfree(holder);
1045 }
1046
1047 mutex_unlock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001048}
Tejun Heo49731ba2011-01-14 18:43:57 +01001049EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001050#endif
1051
Andrew Patterson0c002c22008-09-04 14:27:20 -06001052/**
Andrew Patterson56ade442008-09-04 14:27:40 -06001053 * flush_disk - invalidates all buffer-cache entries on a disk
1054 *
1055 * @bdev: struct block device to be flushed
Randy Dunlape6eb5ce2011-02-26 10:54:00 -08001056 * @kill_dirty: flag to guide handling of dirty inodes
Andrew Patterson56ade442008-09-04 14:27:40 -06001057 *
1058 * Invalidates all buffer-cache entries on a disk. It should be called
1059 * when a disk has been changed -- either by a media change or online
1060 * resize.
1061 */
NeilBrown93b270f2011-02-24 17:25:47 +11001062static void flush_disk(struct block_device *bdev, bool kill_dirty)
Andrew Patterson56ade442008-09-04 14:27:40 -06001063{
NeilBrown93b270f2011-02-24 17:25:47 +11001064 if (__invalidate_device(bdev, kill_dirty)) {
Andrew Patterson56ade442008-09-04 14:27:40 -06001065 printk(KERN_WARNING "VFS: busy inodes on changed media or "
Dmitry Monakhov424081f2015-04-13 16:31:34 +04001066 "resized disk %s\n",
1067 bdev->bd_disk ? bdev->bd_disk->disk_name : "");
Andrew Patterson56ade442008-09-04 14:27:40 -06001068 }
1069
1070 if (!bdev->bd_disk)
1071 return;
Tejun Heod27769e2011-08-23 20:01:04 +02001072 if (disk_part_scan_enabled(bdev->bd_disk))
Andrew Patterson56ade442008-09-04 14:27:40 -06001073 bdev->bd_invalidated = 1;
1074}
1075
1076/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001077 * check_disk_size_change - checks for disk size change and adjusts bdev size.
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001078 * @disk: struct gendisk to check
1079 * @bdev: struct bdev to adjust.
1080 *
1081 * This routine checks to see if the bdev size does not match the disk size
1082 * and adjusts it if it differs.
1083 */
1084void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1085{
1086 loff_t disk_size, bdev_size;
1087
1088 disk_size = (loff_t)get_capacity(disk) << 9;
1089 bdev_size = i_size_read(bdev->bd_inode);
1090 if (disk_size != bdev_size) {
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001091 printk(KERN_INFO
1092 "%s: detected capacity change from %lld to %lld\n",
Dmitry Monakhov424081f2015-04-13 16:31:34 +04001093 disk->disk_name, bdev_size, disk_size);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001094 i_size_write(bdev->bd_inode, disk_size);
NeilBrown93b270f2011-02-24 17:25:47 +11001095 flush_disk(bdev, false);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001096 }
1097}
1098EXPORT_SYMBOL(check_disk_size_change);
1099
1100/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001101 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
Andrew Patterson0c002c22008-09-04 14:27:20 -06001102 * @disk: struct gendisk to be revalidated
1103 *
1104 * This routine is a wrapper for lower-level driver's revalidate_disk
1105 * call-backs. It is used to do common pre and post operations needed
1106 * for all revalidate_disk operations.
1107 */
1108int revalidate_disk(struct gendisk *disk)
1109{
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001110 struct block_device *bdev;
Andrew Patterson0c002c22008-09-04 14:27:20 -06001111 int ret = 0;
1112
1113 if (disk->fops->revalidate_disk)
1114 ret = disk->fops->revalidate_disk(disk);
Martin K. Petersen25520d52015-10-21 13:19:49 -04001115 blk_integrity_revalidate(disk);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001116 bdev = bdget_disk(disk, 0);
1117 if (!bdev)
1118 return ret;
1119
1120 mutex_lock(&bdev->bd_mutex);
1121 check_disk_size_change(disk, bdev);
MITSUNARI Shigeo7630b662013-02-21 16:42:01 -08001122 bdev->bd_invalidated = 0;
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001123 mutex_unlock(&bdev->bd_mutex);
1124 bdput(bdev);
Andrew Patterson0c002c22008-09-04 14:27:20 -06001125 return ret;
1126}
1127EXPORT_SYMBOL(revalidate_disk);
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129/*
1130 * This routine checks whether a removable media has been changed,
1131 * and invalidates all buffer-cache-entries in that case. This
1132 * is a relatively slow routine, so we have to try to minimize using
1133 * it. Thus it is called only upon a 'mount' or 'open'. This
1134 * is the best way of combining speed and utility, I think.
1135 * People changing diskettes in the middle of an operation deserve
1136 * to lose :-)
1137 */
1138int check_disk_change(struct block_device *bdev)
1139{
1140 struct gendisk *disk = bdev->bd_disk;
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001141 const struct block_device_operations *bdops = disk->fops;
Tejun Heo77ea8872010-12-08 20:57:37 +01001142 unsigned int events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Tejun Heo77ea8872010-12-08 20:57:37 +01001144 events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
1145 DISK_EVENT_EJECT_REQUEST);
1146 if (!(events & DISK_EVENT_MEDIA_CHANGE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 return 0;
1148
NeilBrown93b270f2011-02-24 17:25:47 +11001149 flush_disk(bdev, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 if (bdops->revalidate_disk)
1151 bdops->revalidate_disk(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 return 1;
1153}
1154
1155EXPORT_SYMBOL(check_disk_change);
1156
1157void bd_set_size(struct block_device *bdev, loff_t size)
1158{
Martin K. Petersene1defc42009-05-22 17:17:49 -04001159 unsigned bsize = bdev_logical_block_size(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
Al Viro59551022016-01-22 15:40:57 -05001161 inode_lock(bdev->bd_inode);
Guo Chaod646a022013-02-21 15:16:42 -08001162 i_size_write(bdev->bd_inode, size);
Al Viro59551022016-01-22 15:40:57 -05001163 inode_unlock(bdev->bd_inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001164 while (bsize < PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 if (size & bsize)
1166 break;
1167 bsize <<= 1;
1168 }
1169 bdev->bd_block_size = bsize;
1170 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1171}
1172EXPORT_SYMBOL(bd_set_size);
1173
Al Viro4385bab2013-05-05 22:11:03 -04001174static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001175
Peter Zijlstra6d740cd2007-02-20 13:58:18 -08001176/*
1177 * bd_mutex locking:
1178 *
1179 * mutex_lock(part->bd_mutex)
1180 * mutex_lock_nested(whole->bd_mutex, 1)
1181 */
1182
Al Viro572c4892007-10-08 13:24:05 -04001183static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 struct gendisk *disk;
Tejun Heo523e1d32011-10-19 14:31:07 +02001186 struct module *owner;
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001187 int ret;
Tejun Heocf771cb2008-09-03 09:01:09 +02001188 int partno;
Al Virofe6e9c12008-06-23 08:30:55 -04001189 int perm = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
Al Viro572c4892007-10-08 13:24:05 -04001191 if (mode & FMODE_READ)
Al Virofe6e9c12008-06-23 08:30:55 -04001192 perm |= MAY_READ;
Al Viro572c4892007-10-08 13:24:05 -04001193 if (mode & FMODE_WRITE)
Al Virofe6e9c12008-06-23 08:30:55 -04001194 perm |= MAY_WRITE;
1195 /*
1196 * hooks: /n/, see "layering violations".
1197 */
Chris Wrightb7300b72010-08-10 18:02:55 -07001198 if (!for_part) {
1199 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1200 if (ret != 0) {
1201 bdput(bdev);
1202 return ret;
1203 }
Al Viro82666022008-08-01 05:32:04 -04001204 }
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001205
NeilBrownd3374822009-01-09 08:31:10 +11001206 restart:
Tejun Heo0762b8b2008-08-25 19:56:12 +09001207
Tejun Heo89f97492008-11-05 10:21:06 +01001208 ret = -ENXIO;
Tejun Heocf771cb2008-09-03 09:01:09 +02001209 disk = get_gendisk(bdev->bd_dev, &partno);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001210 if (!disk)
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001211 goto out;
Tejun Heo523e1d32011-10-19 14:31:07 +02001212 owner = disk->fops->owner;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213
Tejun Heo69e02c52011-03-09 19:54:27 +01001214 disk_block_events(disk);
NeilBrown6796bf52006-12-08 02:36:16 -08001215 mutex_lock_nested(&bdev->bd_mutex, for_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 if (!bdev->bd_openers) {
1217 bdev->bd_disk = disk;
Andi Kleen87192a22012-01-12 17:20:34 -08001218 bdev->bd_queue = disk->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 bdev->bd_contains = bdev;
Dan Williams03cdadb2016-02-26 15:19:43 -08001220 if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
1221 bdev->bd_inode->i_flags = S_DAX;
1222 else
1223 bdev->bd_inode->i_flags = 0;
1224
Tejun Heocf771cb2008-09-03 09:01:09 +02001225 if (!partno) {
Tejun Heo89f97492008-11-05 10:21:06 +01001226 ret = -ENXIO;
1227 bdev->bd_part = disk_get_part(disk, partno);
1228 if (!bdev->bd_part)
1229 goto out_clear;
1230
Tejun Heo1196f8b2011-04-21 20:54:45 +02001231 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 if (disk->fops->open) {
Al Viro572c4892007-10-08 13:24:05 -04001233 ret = disk->fops->open(bdev, mode);
NeilBrownd3374822009-01-09 08:31:10 +11001234 if (ret == -ERESTARTSYS) {
1235 /* Lost a race with 'disk' being
1236 * deleted, try again.
1237 * See md.c
1238 */
1239 disk_put_part(bdev->bd_part);
1240 bdev->bd_part = NULL;
NeilBrownd3374822009-01-09 08:31:10 +11001241 bdev->bd_disk = NULL;
Andi Kleen87192a22012-01-12 17:20:34 -08001242 bdev->bd_queue = NULL;
NeilBrownd3374822009-01-09 08:31:10 +11001243 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001244 disk_unblock_events(disk);
Tejun Heo69e02c52011-03-09 19:54:27 +01001245 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001246 module_put(owner);
NeilBrownd3374822009-01-09 08:31:10 +11001247 goto restart;
1248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 }
Tejun Heo7e697232011-05-23 13:26:07 +02001250
Dan Williams5a023cd2015-11-30 10:20:29 -08001251 if (!ret) {
Tejun Heo7e697232011-05-23 13:26:07 +02001252 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
Dan Williams5a023cd2015-11-30 10:20:29 -08001253 if (!blkdev_dax_capable(bdev))
1254 bdev->bd_inode->i_flags &= ~S_DAX;
1255 }
Tejun Heo7e697232011-05-23 13:26:07 +02001256
Tejun Heo1196f8b2011-04-21 20:54:45 +02001257 /*
1258 * If the device is invalidated, rescan partition
1259 * if open succeeded or failed with -ENOMEDIUM.
1260 * The latter is necessary to prevent ghost
1261 * partitions on a removed medium.
1262 */
Jun'ichi Nomurafe316bf2012-03-02 10:38:33 +01001263 if (bdev->bd_invalidated) {
1264 if (!ret)
1265 rescan_partitions(disk, bdev);
1266 else if (ret == -ENOMEDIUM)
1267 invalidate_partitions(disk, bdev);
1268 }
Dan Williams5a023cd2015-11-30 10:20:29 -08001269
Tejun Heo1196f8b2011-04-21 20:54:45 +02001270 if (ret)
1271 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 struct block_device *whole;
1274 whole = bdget_disk(disk, 0);
1275 ret = -ENOMEM;
1276 if (!whole)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001277 goto out_clear;
NeilBrown37be4122006-12-08 02:36:16 -08001278 BUG_ON(for_part);
Al Viro572c4892007-10-08 13:24:05 -04001279 ret = __blkdev_get(whole, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 if (ret)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001281 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 bdev->bd_contains = whole;
Tejun Heo89f97492008-11-05 10:21:06 +01001283 bdev->bd_part = disk_get_part(disk, partno);
Tejun Heoe71bf0d2008-09-03 09:03:02 +02001284 if (!(disk->flags & GENHD_FL_UP) ||
Tejun Heo89f97492008-11-05 10:21:06 +01001285 !bdev->bd_part || !bdev->bd_part->nr_sects) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 ret = -ENXIO;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001287 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 }
Tejun Heo89f97492008-11-05 10:21:06 +01001289 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
Dan Williams5a023cd2015-11-30 10:20:29 -08001290 if (!blkdev_dax_capable(bdev))
Jeff Moyerf0b2e562015-08-14 16:15:32 -04001291 bdev->bd_inode->i_flags &= ~S_DAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 }
1293 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 if (bdev->bd_contains == bdev) {
Tejun Heo1196f8b2011-04-21 20:54:45 +02001295 ret = 0;
1296 if (bdev->bd_disk->fops->open)
Al Viro572c4892007-10-08 13:24:05 -04001297 ret = bdev->bd_disk->fops->open(bdev, mode);
Tejun Heo1196f8b2011-04-21 20:54:45 +02001298 /* the same as first opener case, read comment there */
Jun'ichi Nomurafe316bf2012-03-02 10:38:33 +01001299 if (bdev->bd_invalidated) {
1300 if (!ret)
1301 rescan_partitions(bdev->bd_disk, bdev);
1302 else if (ret == -ENOMEDIUM)
1303 invalidate_partitions(bdev->bd_disk, bdev);
1304 }
Tejun Heo1196f8b2011-04-21 20:54:45 +02001305 if (ret)
1306 goto out_unlock_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 }
Tejun Heo69e02c52011-03-09 19:54:27 +01001308 /* only one opener holds refs to the module and disk */
Tejun Heo69e02c52011-03-09 19:54:27 +01001309 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001310 module_put(owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 }
1312 bdev->bd_openers++;
NeilBrown37be4122006-12-08 02:36:16 -08001313 if (for_part)
1314 bdev->bd_part_count++;
Arjan van de Venc039e312006-03-23 03:00:28 -08001315 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001316 disk_unblock_events(disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 return 0;
1318
Tejun Heo0762b8b2008-08-25 19:56:12 +09001319 out_clear:
Tejun Heo89f97492008-11-05 10:21:06 +01001320 disk_put_part(bdev->bd_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 bdev->bd_disk = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001322 bdev->bd_part = NULL;
Andi Kleen87192a22012-01-12 17:20:34 -08001323 bdev->bd_queue = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 if (bdev != bdev->bd_contains)
Al Viro572c4892007-10-08 13:24:05 -04001325 __blkdev_put(bdev->bd_contains, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 bdev->bd_contains = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001327 out_unlock_bdev:
Arjan van de Venc039e312006-03-23 03:00:28 -08001328 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001329 disk_unblock_events(disk);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001330 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001331 module_put(owner);
Dan Carpenter4345cab2011-03-19 13:53:31 +01001332 out:
Tejun Heo0762b8b2008-08-25 19:56:12 +09001333 bdput(bdev);
1334
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 return ret;
1336}
1337
Tejun Heod4d77622010-11-13 11:55:18 +01001338/**
1339 * blkdev_get - open a block device
1340 * @bdev: block_device to open
1341 * @mode: FMODE_* mask
1342 * @holder: exclusive holder identifier
1343 *
1344 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1345 * open with exclusive access. Specifying %FMODE_EXCL with %NULL
1346 * @holder is invalid. Exclusive opens may nest for the same @holder.
1347 *
1348 * On success, the reference count of @bdev is unchanged. On failure,
1349 * @bdev is put.
1350 *
1351 * CONTEXT:
1352 * Might sleep.
1353 *
1354 * RETURNS:
1355 * 0 on success, -errno on failure.
1356 */
Tejun Heoe525fd82010-11-13 11:55:17 +01001357int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358{
Tejun Heoe525fd82010-11-13 11:55:17 +01001359 struct block_device *whole = NULL;
1360 int res;
1361
1362 WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
1363
1364 if ((mode & FMODE_EXCL) && holder) {
1365 whole = bd_start_claiming(bdev, holder);
1366 if (IS_ERR(whole)) {
1367 bdput(bdev);
1368 return PTR_ERR(whole);
1369 }
1370 }
1371
1372 res = __blkdev_get(bdev, mode, 0);
1373
1374 if (whole) {
Tejun Heod4dc2102011-04-21 20:54:46 +02001375 struct gendisk *disk = whole->bd_disk;
1376
Tejun Heo6a027ef2010-11-13 11:55:17 +01001377 /* finish claiming */
Tejun Heo77ea8872010-12-08 20:57:37 +01001378 mutex_lock(&bdev->bd_mutex);
Tejun Heo6a027ef2010-11-13 11:55:17 +01001379 spin_lock(&bdev_lock);
1380
Tejun Heo77ea8872010-12-08 20:57:37 +01001381 if (!res) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001382 BUG_ON(!bd_may_claim(bdev, whole, holder));
1383 /*
1384 * Note that for a whole device bd_holders
1385 * will be incremented twice, and bd_holder
1386 * will be set to bd_may_claim before being
1387 * set to holder
1388 */
1389 whole->bd_holders++;
1390 whole->bd_holder = bd_may_claim;
1391 bdev->bd_holders++;
1392 bdev->bd_holder = holder;
1393 }
1394
1395 /* tell others that we're done */
1396 BUG_ON(whole->bd_claiming != holder);
1397 whole->bd_claiming = NULL;
1398 wake_up_bit(&whole->bd_claiming, 0);
1399
1400 spin_unlock(&bdev_lock);
Tejun Heo77ea8872010-12-08 20:57:37 +01001401
1402 /*
Tejun Heod4dc2102011-04-21 20:54:46 +02001403 * Block event polling for write claims if requested. Any
1404 * write holder makes the write_holder state stick until
1405 * all are released. This is good enough and tracking
1406 * individual writeable reference is too fragile given the
1407 * way @mode is used in blkdev_get/put().
Tejun Heo77ea8872010-12-08 20:57:37 +01001408 */
Tejun Heo4c49ff32011-06-01 08:27:41 +02001409 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1410 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
Tejun Heo77ea8872010-12-08 20:57:37 +01001411 bdev->bd_write_holder = true;
Tejun Heod4dc2102011-04-21 20:54:46 +02001412 disk_block_events(disk);
Tejun Heo77ea8872010-12-08 20:57:37 +01001413 }
1414
1415 mutex_unlock(&bdev->bd_mutex);
Tejun Heo6a027ef2010-11-13 11:55:17 +01001416 bdput(whole);
Tejun Heoe525fd82010-11-13 11:55:17 +01001417 }
1418
1419 return res;
NeilBrown37be4122006-12-08 02:36:16 -08001420}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421EXPORT_SYMBOL(blkdev_get);
1422
Tejun Heod4d77622010-11-13 11:55:18 +01001423/**
1424 * blkdev_get_by_path - open a block device by name
1425 * @path: path to the block device to open
1426 * @mode: FMODE_* mask
1427 * @holder: exclusive holder identifier
1428 *
1429 * Open the blockdevice described by the device file at @path. @mode
1430 * and @holder are identical to blkdev_get().
1431 *
1432 * On success, the returned block_device has reference count of one.
1433 *
1434 * CONTEXT:
1435 * Might sleep.
1436 *
1437 * RETURNS:
1438 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1439 */
1440struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1441 void *holder)
1442{
1443 struct block_device *bdev;
1444 int err;
1445
1446 bdev = lookup_bdev(path);
1447 if (IS_ERR(bdev))
1448 return bdev;
1449
1450 err = blkdev_get(bdev, mode, holder);
1451 if (err)
1452 return ERR_PTR(err);
1453
Chuck Ebberte51900f2011-02-16 18:11:53 -05001454 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1455 blkdev_put(bdev, mode);
1456 return ERR_PTR(-EACCES);
1457 }
1458
Tejun Heod4d77622010-11-13 11:55:18 +01001459 return bdev;
1460}
1461EXPORT_SYMBOL(blkdev_get_by_path);
1462
1463/**
1464 * blkdev_get_by_dev - open a block device by device number
1465 * @dev: device number of block device to open
1466 * @mode: FMODE_* mask
1467 * @holder: exclusive holder identifier
1468 *
1469 * Open the blockdevice described by device number @dev. @mode and
1470 * @holder are identical to blkdev_get().
1471 *
1472 * Use it ONLY if you really do not have anything better - i.e. when
1473 * you are behind a truly sucky interface and all you are given is a
1474 * device number. _Never_ to be used for internal purposes. If you
1475 * ever need it - reconsider your API.
1476 *
1477 * On success, the returned block_device has reference count of one.
1478 *
1479 * CONTEXT:
1480 * Might sleep.
1481 *
1482 * RETURNS:
1483 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1484 */
1485struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1486{
1487 struct block_device *bdev;
1488 int err;
1489
1490 bdev = bdget(dev);
1491 if (!bdev)
1492 return ERR_PTR(-ENOMEM);
1493
1494 err = blkdev_get(bdev, mode, holder);
1495 if (err)
1496 return ERR_PTR(err);
1497
1498 return bdev;
1499}
1500EXPORT_SYMBOL(blkdev_get_by_dev);
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502static int blkdev_open(struct inode * inode, struct file * filp)
1503{
1504 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
1506 /*
1507 * Preserve backwards compatibility and allow large file access
1508 * even if userspace doesn't ask for it explicitly. Some mkfs
1509 * binary needs it. We might want to drop this workaround
1510 * during an unstable branch.
1511 */
1512 filp->f_flags |= O_LARGEFILE;
1513
Al Viro572c4892007-10-08 13:24:05 -04001514 if (filp->f_flags & O_NDELAY)
1515 filp->f_mode |= FMODE_NDELAY;
1516 if (filp->f_flags & O_EXCL)
1517 filp->f_mode |= FMODE_EXCL;
1518 if ((filp->f_flags & O_ACCMODE) == 3)
1519 filp->f_mode |= FMODE_WRITE_IOCTL;
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 bdev = bd_acquire(inode);
Pavel Emelianov6a2aae02006-10-28 10:38:33 -07001522 if (bdev == NULL)
1523 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Al Viro572c4892007-10-08 13:24:05 -04001525 filp->f_mapping = bdev->bd_inode->i_mapping;
1526
Tejun Heoe525fd82010-11-13 11:55:17 +01001527 return blkdev_get(bdev, filp->f_mode, filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528}
1529
Al Viro4385bab2013-05-05 22:11:03 -04001530static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001531{
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001532 struct gendisk *disk = bdev->bd_disk;
NeilBrown37be4122006-12-08 02:36:16 -08001533 struct block_device *victim = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001534
NeilBrown6796bf52006-12-08 02:36:16 -08001535 mutex_lock_nested(&bdev->bd_mutex, for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001536 if (for_part)
1537 bdev->bd_part_count--;
1538
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001539 if (!--bdev->bd_openers) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001540 WARN_ON_ONCE(bdev->bd_holders);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001541 sync_blockdev(bdev);
1542 kill_bdev(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001543
Vivek Goyaldbd3ca52015-11-09 09:23:40 -07001544 bdev_write_inode(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001545 /*
1546 * Detaching bdev inode from its wb in __destroy_inode()
1547 * is too late: the queue which embeds its bdi (along with
1548 * root wb) can be gone as soon as we put_disk() below.
1549 */
1550 inode_detach_wb(bdev->bd_inode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001551 }
1552 if (bdev->bd_contains == bdev) {
1553 if (disk->fops->release)
Al Virodb2a1442013-05-05 21:52:57 -04001554 disk->fops->release(disk, mode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001555 }
1556 if (!bdev->bd_openers) {
1557 struct module *owner = disk->fops->owner;
1558
Tejun Heo0762b8b2008-08-25 19:56:12 +09001559 disk_put_part(bdev->bd_part);
1560 bdev->bd_part = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001561 bdev->bd_disk = NULL;
NeilBrown37be4122006-12-08 02:36:16 -08001562 if (bdev != bdev->bd_contains)
1563 victim = bdev->bd_contains;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001564 bdev->bd_contains = NULL;
Tejun Heo523e1d32011-10-19 14:31:07 +02001565
1566 put_disk(disk);
1567 module_put(owner);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001568 }
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001569 mutex_unlock(&bdev->bd_mutex);
1570 bdput(bdev);
NeilBrown37be4122006-12-08 02:36:16 -08001571 if (victim)
Al Viro9a1c3542008-02-22 20:40:24 -05001572 __blkdev_put(victim, mode, 1);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001573}
1574
Al Viro4385bab2013-05-05 22:11:03 -04001575void blkdev_put(struct block_device *bdev, fmode_t mode)
NeilBrown37be4122006-12-08 02:36:16 -08001576{
Tejun Heo85ef06d2011-07-01 16:17:47 +02001577 mutex_lock(&bdev->bd_mutex);
1578
Tejun Heoe525fd82010-11-13 11:55:17 +01001579 if (mode & FMODE_EXCL) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001580 bool bdev_free;
1581
1582 /*
1583 * Release a claim on the device. The holder fields
1584 * are protected with bdev_lock. bd_mutex is to
1585 * synchronize disk_holder unlinking.
1586 */
Tejun Heo6a027ef2010-11-13 11:55:17 +01001587 spin_lock(&bdev_lock);
1588
1589 WARN_ON_ONCE(--bdev->bd_holders < 0);
1590 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1591
1592 /* bd_contains might point to self, check in a separate step */
1593 if ((bdev_free = !bdev->bd_holders))
1594 bdev->bd_holder = NULL;
1595 if (!bdev->bd_contains->bd_holders)
1596 bdev->bd_contains->bd_holder = NULL;
1597
1598 spin_unlock(&bdev_lock);
1599
Tejun Heo77ea8872010-12-08 20:57:37 +01001600 /*
1601 * If this was the last claim, remove holder link and
1602 * unblock evpoll if it was a write holder.
1603 */
Tejun Heo85ef06d2011-07-01 16:17:47 +02001604 if (bdev_free && bdev->bd_write_holder) {
1605 disk_unblock_events(bdev->bd_disk);
1606 bdev->bd_write_holder = false;
Tejun Heo77ea8872010-12-08 20:57:37 +01001607 }
Tejun Heo69362172011-03-09 19:54:27 +01001608 }
Tejun Heo77ea8872010-12-08 20:57:37 +01001609
Tejun Heo85ef06d2011-07-01 16:17:47 +02001610 /*
1611 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1612 * event. This is to ensure detection of media removal commanded
1613 * from userland - e.g. eject(1).
1614 */
1615 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1616
1617 mutex_unlock(&bdev->bd_mutex);
1618
Al Viro4385bab2013-05-05 22:11:03 -04001619 __blkdev_put(bdev, mode, 0);
NeilBrown37be4122006-12-08 02:36:16 -08001620}
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001621EXPORT_SYMBOL(blkdev_put);
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623static int blkdev_close(struct inode * inode, struct file * filp)
1624{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001625 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
Al Viro4385bab2013-05-05 22:11:03 -04001626 blkdev_put(bdev, filp->f_mode);
1627 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628}
1629
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001630static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001632 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Al Viro56b26ad2008-09-19 03:17:36 -04001633 fmode_t mode = file->f_mode;
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001634
1635 /*
1636 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1637 * to updated it before every ioctl.
1638 */
Al Viro56b26ad2008-09-19 03:17:36 -04001639 if (file->f_flags & O_NDELAY)
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001640 mode |= FMODE_NDELAY;
1641 else
1642 mode &= ~FMODE_NDELAY;
1643
Al Viro56b26ad2008-09-19 03:17:36 -04001644 return blkdev_ioctl(bdev, mode, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645}
1646
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001647/*
Christoph Hellwigeef99382009-08-20 17:43:41 +02001648 * Write data to the block device. Only intended for the block device itself
1649 * and the raw driver which basically is a fake block device.
1650 *
1651 * Does not take i_mutex for the write and thus is not for general purpose
1652 * use.
1653 */
Al Viro1456c0a2014-04-03 03:21:50 -04001654ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
Christoph Hellwigeef99382009-08-20 17:43:41 +02001655{
1656 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001657 struct inode *bd_inode = bdev_file_inode(file);
Al Viro7ec7b942015-04-07 11:35:14 -04001658 loff_t size = i_size_read(bd_inode);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001659 struct blk_plug plug;
Christoph Hellwigeef99382009-08-20 17:43:41 +02001660 ssize_t ret;
Al Viro5f380c72015-04-07 11:28:12 -04001661
Al Viro7ec7b942015-04-07 11:35:14 -04001662 if (bdev_read_only(I_BDEV(bd_inode)))
1663 return -EPERM;
Al Viro5f380c72015-04-07 11:28:12 -04001664
Al Viro7ec7b942015-04-07 11:35:14 -04001665 if (!iov_iter_count(from))
Al Viro5f380c72015-04-07 11:28:12 -04001666 return 0;
1667
Al Viro7ec7b942015-04-07 11:35:14 -04001668 if (iocb->ki_pos >= size)
1669 return -ENOSPC;
1670
1671 iov_iter_truncate(from, size - iocb->ki_pos);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001672
Jianpeng Ma53362a02012-08-02 09:50:39 +02001673 blk_start_plug(&plug);
Al Viro1456c0a2014-04-03 03:21:50 -04001674 ret = __generic_file_write_iter(iocb, from);
Christoph Hellwig02afc272013-09-04 15:04:40 +02001675 if (ret > 0) {
Christoph Hellwigeef99382009-08-20 17:43:41 +02001676 ssize_t err;
Al Viro1456c0a2014-04-03 03:21:50 -04001677 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
Dan Carpenter45d4f852014-04-03 14:47:17 -07001678 if (err < 0)
Christoph Hellwigeef99382009-08-20 17:43:41 +02001679 ret = err;
1680 }
Jianpeng Ma53362a02012-08-02 09:50:39 +02001681 blk_finish_plug(&plug);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001682 return ret;
1683}
Al Viro1456c0a2014-04-03 03:21:50 -04001684EXPORT_SYMBOL_GPL(blkdev_write_iter);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001685
David Jefferyb2de5252014-09-29 10:21:10 -04001686ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001687{
1688 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001689 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001690 loff_t size = i_size_read(bd_inode);
Al Viroa8860382014-04-02 20:02:21 -04001691 loff_t pos = iocb->ki_pos;
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001692
1693 if (pos >= size)
1694 return 0;
1695
1696 size -= pos;
Al Viroa8860382014-04-02 20:02:21 -04001697 iov_iter_truncate(to, size);
1698 return generic_file_read_iter(iocb, to);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001699}
David Jefferyb2de5252014-09-29 10:21:10 -04001700EXPORT_SYMBOL_GPL(blkdev_read_iter);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001701
Christoph Hellwigeef99382009-08-20 17:43:41 +02001702/*
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001703 * Try to release a page associated with block device when the system
1704 * is under memory pressure.
1705 */
1706static int blkdev_releasepage(struct page *page, gfp_t wait)
1707{
1708 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1709
1710 if (super && super->s_op->bdev_try_to_free_page)
1711 return super->s_op->bdev_try_to_free_page(super, page, wait);
1712
1713 return try_to_free_buffers(page);
1714}
1715
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001716static int blkdev_writepages(struct address_space *mapping,
1717 struct writeback_control *wbc)
1718{
1719 if (dax_mapping(mapping)) {
1720 struct block_device *bdev = I_BDEV(mapping->host);
1721
1722 return dax_writeback_mapping_range(mapping, bdev, wbc);
1723 }
1724 return generic_writepages(mapping, wbc);
1725}
1726
Adrian Bunk4c54ac62008-02-18 13:48:31 +01001727static const struct address_space_operations def_blk_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 .readpage = blkdev_readpage,
Akinobu Mita447f05b2014-10-09 15:26:58 -07001729 .readpages = blkdev_readpages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 .writepage = blkdev_writepage,
Nick Piggin6272b5a2007-10-16 01:25:04 -07001731 .write_begin = blkdev_write_begin,
1732 .write_end = blkdev_write_end,
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001733 .writepages = blkdev_writepages,
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001734 .releasepage = blkdev_releasepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 .direct_IO = blkdev_direct_IO,
Mel Gormanb4597222013-07-03 15:02:05 -07001736 .is_dirty_writeback = buffer_check_dirty_writeback,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737};
1738
Dan Williams5a023cd2015-11-30 10:20:29 -08001739#ifdef CONFIG_FS_DAX
1740/*
1741 * In the raw block case we do not need to contend with truncation nor
1742 * unwritten file extents. Without those concerns there is no need for
1743 * additional locking beyond the mmap_sem context that these routines
1744 * are already executing under.
1745 *
1746 * Note, there is no protection if the block device is dynamically
1747 * resized (partition grow/shrink) during a fault. A stable block device
1748 * size is already not enforced in the blkdev_direct_IO path.
1749 *
1750 * For DAX, it is the responsibility of the block device driver to
1751 * ensure the whole-disk device size is stable while requests are in
1752 * flight.
1753 *
1754 * Finally, unlike the filemap_page_mkwrite() case there is no
1755 * filesystem superblock to sync against freezing. We still include a
1756 * pfn_mkwrite callback for dax drivers to receive write fault
1757 * notifications.
1758 */
1759static int blkdev_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1760{
Jan Kara02fbd132016-05-11 11:58:48 +02001761 return __dax_fault(vma, vmf, blkdev_get_block);
Dan Williams5a023cd2015-11-30 10:20:29 -08001762}
1763
Ross Zwisler9c5a05bc2016-02-05 15:36:08 -08001764static int blkdev_dax_pfn_mkwrite(struct vm_area_struct *vma,
1765 struct vm_fault *vmf)
1766{
1767 return dax_pfn_mkwrite(vma, vmf);
1768}
1769
Dan Williams5a023cd2015-11-30 10:20:29 -08001770static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
1771 pmd_t *pmd, unsigned int flags)
1772{
Jan Kara02fbd132016-05-11 11:58:48 +02001773 return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block);
Dan Williams5a023cd2015-11-30 10:20:29 -08001774}
1775
Dan Williams5a023cd2015-11-30 10:20:29 -08001776static const struct vm_operations_struct blkdev_dax_vm_ops = {
Dan Williams5a023cd2015-11-30 10:20:29 -08001777 .fault = blkdev_dax_fault,
1778 .pmd_fault = blkdev_dax_pmd_fault,
Ross Zwisler9c5a05bc2016-02-05 15:36:08 -08001779 .pfn_mkwrite = blkdev_dax_pfn_mkwrite,
Dan Williams5a023cd2015-11-30 10:20:29 -08001780};
1781
1782static const struct vm_operations_struct blkdev_default_vm_ops = {
Dan Williams5a023cd2015-11-30 10:20:29 -08001783 .fault = filemap_fault,
1784 .map_pages = filemap_map_pages,
1785};
1786
1787static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
1788{
1789 struct inode *bd_inode = bdev_file_inode(file);
Dan Williams5a023cd2015-11-30 10:20:29 -08001790
1791 file_accessed(file);
Dan Williams5a023cd2015-11-30 10:20:29 -08001792 if (IS_DAX(bd_inode)) {
1793 vma->vm_ops = &blkdev_dax_vm_ops;
1794 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1795 } else {
1796 vma->vm_ops = &blkdev_default_vm_ops;
1797 }
Dan Williams5a023cd2015-11-30 10:20:29 -08001798
1799 return 0;
1800}
1801#else
1802#define blkdev_mmap generic_file_mmap
1803#endif
1804
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001805const struct file_operations def_blk_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 .open = blkdev_open,
1807 .release = blkdev_close,
1808 .llseek = block_llseek,
Al Viroa8860382014-04-02 20:02:21 -04001809 .read_iter = blkdev_read_iter,
Al Viro1456c0a2014-04-03 03:21:50 -04001810 .write_iter = blkdev_write_iter,
Dan Williams5a023cd2015-11-30 10:20:29 -08001811 .mmap = blkdev_mmap,
Andrew Mortonb1dd3b22010-04-06 14:35:00 -07001812 .fsync = blkdev_fsync,
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001813 .unlocked_ioctl = block_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814#ifdef CONFIG_COMPAT
1815 .compat_ioctl = compat_blkdev_ioctl,
1816#endif
Linus Torvalds1e8b3332012-11-29 10:49:50 -08001817 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001818 .splice_write = iter_file_splice_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819};
1820
1821int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1822{
1823 int res;
1824 mm_segment_t old_fs = get_fs();
1825 set_fs(KERNEL_DS);
Al Viro56b26ad2008-09-19 03:17:36 -04001826 res = blkdev_ioctl(bdev, 0, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 set_fs(old_fs);
1828 return res;
1829}
1830
1831EXPORT_SYMBOL(ioctl_by_bdev);
1832
1833/**
1834 * lookup_bdev - lookup a struct block_device by name
Randy Dunlap94e29592009-01-06 14:41:15 -08001835 * @pathname: special file representing the block device
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 *
Randy Dunlap57d1b532008-10-09 10:42:38 +02001837 * Get a reference to the blockdevice at @pathname in the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 * namespace if possible and return it. Return ERR_PTR(error)
1839 * otherwise.
1840 */
Al Viro421748e2008-08-02 01:04:36 -04001841struct block_device *lookup_bdev(const char *pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842{
1843 struct block_device *bdev;
1844 struct inode *inode;
Al Viro421748e2008-08-02 01:04:36 -04001845 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 int error;
1847
Al Viro421748e2008-08-02 01:04:36 -04001848 if (!pathname || !*pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 return ERR_PTR(-EINVAL);
1850
Al Viro421748e2008-08-02 01:04:36 -04001851 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 if (error)
1853 return ERR_PTR(error);
1854
David Howellsbb6687342015-03-17 22:26:21 +00001855 inode = d_backing_inode(path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 error = -ENOTBLK;
1857 if (!S_ISBLK(inode->i_mode))
1858 goto fail;
1859 error = -EACCES;
Al Viro421748e2008-08-02 01:04:36 -04001860 if (path.mnt->mnt_flags & MNT_NODEV)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 goto fail;
1862 error = -ENOMEM;
1863 bdev = bd_acquire(inode);
1864 if (!bdev)
1865 goto fail;
1866out:
Al Viro421748e2008-08-02 01:04:36 -04001867 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 return bdev;
1869fail:
1870 bdev = ERR_PTR(error);
1871 goto out;
1872}
Al Virod5686b42008-08-01 05:00:11 -04001873EXPORT_SYMBOL(lookup_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874
NeilBrown93b270f2011-02-24 17:25:47 +11001875int __invalidate_device(struct block_device *bdev, bool kill_dirty)
David Howellsb71e8a42006-08-29 19:06:11 +01001876{
1877 struct super_block *sb = get_super(bdev);
1878 int res = 0;
1879
1880 if (sb) {
1881 /*
1882 * no need to lock the super, get_super holds the
1883 * read mutex so the filesystem cannot go away
1884 * under us (->put_super runs with the write lock
1885 * hold).
1886 */
1887 shrink_dcache_sb(sb);
NeilBrown93b270f2011-02-24 17:25:47 +11001888 res = invalidate_inodes(sb, kill_dirty);
David Howellsb71e8a42006-08-29 19:06:11 +01001889 drop_super(sb);
1890 }
Peter Zijlstraf98393a2007-05-06 14:49:54 -07001891 invalidate_bdev(bdev);
David Howellsb71e8a42006-08-29 19:06:11 +01001892 return res;
1893}
1894EXPORT_SYMBOL(__invalidate_device);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001895
1896void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
1897{
1898 struct inode *inode, *old_inode = NULL;
1899
Dave Chinner74278da2015-03-04 12:37:22 -05001900 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001901 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1902 struct address_space *mapping = inode->i_mapping;
1903
1904 spin_lock(&inode->i_lock);
1905 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1906 mapping->nrpages == 0) {
1907 spin_unlock(&inode->i_lock);
1908 continue;
1909 }
1910 __iget(inode);
1911 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -05001912 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001913 /*
1914 * We hold a reference to 'inode' so it couldn't have been
1915 * removed from s_inodes list while we dropped the
Dave Chinner74278da2015-03-04 12:37:22 -05001916 * s_inode_list_lock We cannot iput the inode now as we can
Jan Kara5c0d6b62012-07-03 16:45:31 +02001917 * be holding the last reference and we cannot iput it under
Dave Chinner74278da2015-03-04 12:37:22 -05001918 * s_inode_list_lock. So we keep the reference and iput it
Jan Kara5c0d6b62012-07-03 16:45:31 +02001919 * later.
1920 */
1921 iput(old_inode);
1922 old_inode = inode;
1923
1924 func(I_BDEV(inode), arg);
1925
Dave Chinner74278da2015-03-04 12:37:22 -05001926 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001927 }
Dave Chinner74278da2015-03-04 12:37:22 -05001928 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02001929 iput(old_inode);
1930}