blob: 7c3ec604907378097eb7b71293018fdbb7d0c0e9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -070014#include <linux/device_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/highmem.h>
16#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/blkpg.h>
Muthu Kumarb502bd12012-03-23 15:01:50 -070020#include <linux/magic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/buffer_head.h>
Al Viroff01bb42011-09-16 02:31:11 -040022#include <linux/swap.h>
Nick Piggin585d3bc2009-02-25 10:44:19 +010023#include <linux/pagevec.h>
David Howells811d7362006-08-29 19:06:09 +010024#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/mpage.h>
26#include <linux/mount.h>
27#include <linux/uio.h>
28#include <linux/namei.h>
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -070029#include <linux/log2.h>
Al Viroff01bb42011-09-16 02:31:11 -040030#include <linux/cleancache.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070031#include <linux/dax.h>
Dan Williamsacc93d32016-05-07 11:40:28 -070032#include <linux/badblocks.h>
Christoph Hellwig189ce2b2016-10-31 11:59:25 -060033#include <linux/task_io_accounting_ops.h>
Darrick J. Wong25f4c412016-10-11 13:51:11 -070034#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/uaccess.h>
David Howells07f3f05c2006-09-30 20:52:18 +020036#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38struct bdev_inode {
39 struct block_device bdev;
40 struct inode vfs_inode;
41};
42
Adrian Bunk4c54ac62008-02-18 13:48:31 +010043static const struct address_space_operations def_blk_aops;
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045static inline struct bdev_inode *BDEV_I(struct inode *inode)
46{
47 return container_of(inode, struct bdev_inode, vfs_inode);
48}
49
Geert Uytterhoevenff5053f2015-06-26 13:58:32 +020050struct block_device *I_BDEV(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051{
52 return &BDEV_I(inode)->bdev;
53}
Linus Torvalds1da177e2005-04-16 15:20:36 -070054EXPORT_SYMBOL(I_BDEV);
55
Toshi Kani2af3a812016-05-10 10:23:52 -060056void __vfs_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
57{
58 struct va_format vaf;
59 va_list args;
60
61 va_start(args, fmt);
62 vaf.fmt = fmt;
63 vaf.va = &args;
64 printk_ratelimited("%sVFS (%s): %pV\n", prefix, sb->s_id, &vaf);
65 va_end(args);
66}
67
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070068static void bdev_write_inode(struct block_device *bdev)
Christoph Hellwig564f00f2015-01-14 10:42:33 +010069{
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070070 struct inode *inode = bdev->bd_inode;
71 int ret;
72
Christoph Hellwig564f00f2015-01-14 10:42:33 +010073 spin_lock(&inode->i_lock);
74 while (inode->i_state & I_DIRTY) {
75 spin_unlock(&inode->i_lock);
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070076 ret = write_inode_now(inode, true);
77 if (ret) {
78 char name[BDEVNAME_SIZE];
79 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
80 "for block device %s (err=%d).\n",
81 bdevname(bdev, name), ret);
82 }
Christoph Hellwig564f00f2015-01-14 10:42:33 +010083 spin_lock(&inode->i_lock);
84 }
85 spin_unlock(&inode->i_lock);
86}
87
Peter Zijlstraf9a14392007-05-06 14:49:55 -070088/* Kill _all_ buffers and pagecache , dirty or not.. */
Al Viroff01bb42011-09-16 02:31:11 -040089void kill_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Al Viroff01bb42011-09-16 02:31:11 -040091 struct address_space *mapping = bdev->bd_inode->i_mapping;
92
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080093 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
Peter Zijlstraf9a14392007-05-06 14:49:55 -070094 return;
Al Viroff01bb42011-09-16 02:31:11 -040095
Peter Zijlstraf9a14392007-05-06 14:49:55 -070096 invalidate_bh_lrus();
Al Viroff01bb42011-09-16 02:31:11 -040097 truncate_inode_pages(mapping, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
Al Viroff01bb42011-09-16 02:31:11 -040099EXPORT_SYMBOL(kill_bdev);
100
101/* Invalidate clean unused buffers and pagecache. */
102void invalidate_bdev(struct block_device *bdev)
103{
104 struct address_space *mapping = bdev->bd_inode->i_mapping;
105
106 if (mapping->nrpages == 0)
107 return;
108
109 invalidate_bh_lrus();
110 lru_add_drain_all(); /* make sure all lru add caches are flushed */
111 invalidate_mapping_pages(mapping, 0, -1);
112 /* 99% of the time, we don't need to flush the cleancache on the bdev.
113 * But, for the strange corners, lets be cautious
114 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400115 cleancache_invalidate_inode(mapping);
Al Viroff01bb42011-09-16 02:31:11 -0400116}
117EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119int set_blocksize(struct block_device *bdev, int size)
120{
121 /* Size must be a power of two, and between 512 and PAGE_SIZE */
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -0700122 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 return -EINVAL;
124
125 /* Size cannot be smaller than the size supported by the device */
Martin K. Petersene1defc42009-05-22 17:17:49 -0400126 if (size < bdev_logical_block_size(bdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 return -EINVAL;
128
129 /* Don't change the size if it is same as current */
130 if (bdev->bd_block_size != size) {
131 sync_blockdev(bdev);
132 bdev->bd_block_size = size;
133 bdev->bd_inode->i_blkbits = blksize_bits(size);
134 kill_bdev(bdev);
135 }
136 return 0;
137}
138
139EXPORT_SYMBOL(set_blocksize);
140
141int sb_set_blocksize(struct super_block *sb, int size)
142{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 if (set_blocksize(sb->s_bdev, size))
144 return 0;
145 /* If we get here, we know size is power of two
146 * and it's value is between 512 and PAGE_SIZE */
147 sb->s_blocksize = size;
Coywolf Qi Hunt38885bd2006-03-24 03:18:05 -0800148 sb->s_blocksize_bits = blksize_bits(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 return sb->s_blocksize;
150}
151
152EXPORT_SYMBOL(sb_set_blocksize);
153
154int sb_min_blocksize(struct super_block *sb, int size)
155{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400156 int minsize = bdev_logical_block_size(sb->s_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 if (size < minsize)
158 size = minsize;
159 return sb_set_blocksize(sb, size);
160}
161
162EXPORT_SYMBOL(sb_min_blocksize);
163
164static int
165blkdev_get_block(struct inode *inode, sector_t iblock,
166 struct buffer_head *bh, int create)
167{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 bh->b_bdev = I_BDEV(inode);
169 bh->b_blocknr = iblock;
170 set_buffer_mapped(bh);
171 return 0;
172}
173
Dan Williams4ebb16c2015-10-28 07:48:19 +0900174static struct inode *bdev_file_inode(struct file *file)
175{
176 return file->f_mapping->host;
177}
178
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600179#define DIO_INLINE_BIO_VECS 4
180
181static void blkdev_bio_end_io_simple(struct bio *bio)
182{
183 struct task_struct *waiter = bio->bi_private;
184
185 WRITE_ONCE(bio->bi_private, NULL);
186 wake_up_process(waiter);
187}
188
189static ssize_t
190__blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
191 int nr_pages)
192{
193 struct file *file = iocb->ki_filp;
194 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
195 unsigned blkbits = blksize_bits(bdev_logical_block_size(bdev));
196 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *bvec;
197 loff_t pos = iocb->ki_pos;
198 bool should_dirty = false;
199 struct bio bio;
200 ssize_t ret;
201 blk_qc_t qc;
202 int i;
203
204 if ((pos | iov_iter_alignment(iter)) & ((1 << blkbits) - 1))
205 return -EINVAL;
206
207 bio_init(&bio);
208 bio.bi_max_vecs = nr_pages;
209 bio.bi_io_vec = inline_vecs;
210 bio.bi_bdev = bdev;
211 bio.bi_iter.bi_sector = pos >> blkbits;
212 bio.bi_private = current;
213 bio.bi_end_io = blkdev_bio_end_io_simple;
214
215 ret = bio_iov_iter_get_pages(&bio, iter);
216 if (unlikely(ret))
217 return ret;
218 ret = bio.bi_iter.bi_size;
219
220 if (iov_iter_rw(iter) == READ) {
221 bio_set_op_attrs(&bio, REQ_OP_READ, 0);
222 if (iter_is_iovec(iter))
223 should_dirty = true;
224 } else {
225 bio_set_op_attrs(&bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
226 task_io_account_write(ret);
227 }
228
229 qc = submit_bio(&bio);
230 for (;;) {
231 set_current_state(TASK_UNINTERRUPTIBLE);
232 if (!READ_ONCE(bio.bi_private))
233 break;
234 if (!(iocb->ki_flags & IOCB_HIPRI) ||
235 !blk_mq_poll(bdev_get_queue(bdev), qc))
236 io_schedule();
237 }
238 __set_current_state(TASK_RUNNING);
239
240 bio_for_each_segment_all(bvec, &bio, i) {
241 if (should_dirty && !PageCompound(bvec->bv_page))
242 set_page_dirty_lock(bvec->bv_page);
243 put_page(bvec->bv_page);
244 }
245
246 if (unlikely(bio.bi_error))
247 return bio.bi_error;
248 iocb->ki_pos += ret;
249 return ret;
250}
251
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800252static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700253blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800254{
255 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +0900256 struct inode *inode = bdev_file_inode(file);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600257 int nr_pages;
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800258
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600259 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
260 if (!nr_pages)
261 return 0;
262 if (is_sync_kiocb(iocb) && nr_pages <= DIO_INLINE_BIO_VECS)
263 return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700264 return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter,
Jens Axboefe0f07d2015-04-15 17:05:48 -0600265 blkdev_get_block, NULL, NULL,
266 DIO_SKIP_DIO_COUNT);
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800267}
268
Jan Kara5cee5812009-04-27 16:43:51 +0200269int __sync_blockdev(struct block_device *bdev, int wait)
270{
271 if (!bdev)
272 return 0;
273 if (!wait)
274 return filemap_flush(bdev->bd_inode->i_mapping);
275 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
276}
277
Nick Piggin585d3bc2009-02-25 10:44:19 +0100278/*
279 * Write out and wait upon all the dirty data associated with a block
280 * device via its mapping. Does not take the superblock lock.
281 */
282int sync_blockdev(struct block_device *bdev)
283{
Jan Kara5cee5812009-04-27 16:43:51 +0200284 return __sync_blockdev(bdev, 1);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100285}
286EXPORT_SYMBOL(sync_blockdev);
287
288/*
289 * Write out and wait upon all dirty data associated with this
290 * device. Filesystem data as well as the underlying block
291 * device. Takes the superblock lock.
292 */
293int fsync_bdev(struct block_device *bdev)
294{
295 struct super_block *sb = get_super(bdev);
296 if (sb) {
Jan Kara60b06802009-04-27 16:43:53 +0200297 int res = sync_filesystem(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100298 drop_super(sb);
299 return res;
300 }
301 return sync_blockdev(bdev);
302}
Al Viro47e44912009-04-01 07:07:16 -0400303EXPORT_SYMBOL(fsync_bdev);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100304
305/**
306 * freeze_bdev -- lock a filesystem and force it into a consistent state
307 * @bdev: blockdevice to lock
308 *
Nick Piggin585d3bc2009-02-25 10:44:19 +0100309 * If a superblock is found on this device, we take the s_umount semaphore
310 * on it to make sure nobody unmounts until the snapshot creation is done.
311 * The reference counter (bd_fsfreeze_count) guarantees that only the last
312 * unfreeze process can unfreeze the frozen filesystem actually when multiple
313 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
314 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
315 * actually.
316 */
317struct super_block *freeze_bdev(struct block_device *bdev)
318{
319 struct super_block *sb;
320 int error = 0;
321
322 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200323 if (++bdev->bd_fsfreeze_count > 1) {
324 /*
325 * We don't even need to grab a reference - the first call
326 * to freeze_bdev grab an active reference and only the last
327 * thaw_bdev drops it.
328 */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100329 sb = get_super(bdev);
Andrey Ryabinin5bb53c02016-08-23 18:55:31 +0300330 if (sb)
331 drop_super(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100332 mutex_unlock(&bdev->bd_fsfreeze_mutex);
333 return sb;
334 }
Nick Piggin585d3bc2009-02-25 10:44:19 +0100335
Christoph Hellwig45042302009-08-03 23:28:35 +0200336 sb = get_active_super(bdev);
337 if (!sb)
338 goto out;
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600339 if (sb->s_op->freeze_super)
340 error = sb->s_op->freeze_super(sb);
341 else
342 error = freeze_super(sb);
Josef Bacik18e9e512010-03-23 10:34:56 -0400343 if (error) {
344 deactivate_super(sb);
345 bdev->bd_fsfreeze_count--;
Christoph Hellwig45042302009-08-03 23:28:35 +0200346 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Josef Bacik18e9e512010-03-23 10:34:56 -0400347 return ERR_PTR(error);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100348 }
Josef Bacik18e9e512010-03-23 10:34:56 -0400349 deactivate_super(sb);
Christoph Hellwig45042302009-08-03 23:28:35 +0200350 out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100351 sync_blockdev(bdev);
352 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig4fadd7b2009-08-03 23:28:06 +0200353 return sb; /* thaw_bdev releases s->s_umount */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100354}
355EXPORT_SYMBOL(freeze_bdev);
356
357/**
358 * thaw_bdev -- unlock filesystem
359 * @bdev: blockdevice to unlock
360 * @sb: associated superblock
361 *
362 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
363 */
364int thaw_bdev(struct block_device *bdev, struct super_block *sb)
365{
Christoph Hellwig45042302009-08-03 23:28:35 +0200366 int error = -EINVAL;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100367
368 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200369 if (!bdev->bd_fsfreeze_count)
Josef Bacik18e9e512010-03-23 10:34:56 -0400370 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100371
Christoph Hellwig45042302009-08-03 23:28:35 +0200372 error = 0;
373 if (--bdev->bd_fsfreeze_count > 0)
Josef Bacik18e9e512010-03-23 10:34:56 -0400374 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100375
Christoph Hellwig45042302009-08-03 23:28:35 +0200376 if (!sb)
Josef Bacik18e9e512010-03-23 10:34:56 -0400377 goto out;
Christoph Hellwig45042302009-08-03 23:28:35 +0200378
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600379 if (sb->s_op->thaw_super)
380 error = sb->s_op->thaw_super(sb);
381 else
382 error = thaw_super(sb);
Pierre Morel997198b2016-10-04 10:53:40 +0200383 if (error)
Josef Bacik18e9e512010-03-23 10:34:56 -0400384 bdev->bd_fsfreeze_count++;
Josef Bacik18e9e512010-03-23 10:34:56 -0400385out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100386 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Pierre Morel997198b2016-10-04 10:53:40 +0200387 return error;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100388}
389EXPORT_SYMBOL(thaw_bdev);
390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
392{
393 return block_write_full_page(page, blkdev_get_block, wbc);
394}
395
396static int blkdev_readpage(struct file * file, struct page * page)
397{
398 return block_read_full_page(page, blkdev_get_block);
399}
400
Akinobu Mita447f05b2014-10-09 15:26:58 -0700401static int blkdev_readpages(struct file *file, struct address_space *mapping,
402 struct list_head *pages, unsigned nr_pages)
403{
404 return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
405}
406
Nick Piggin6272b5a2007-10-16 01:25:04 -0700407static int blkdev_write_begin(struct file *file, struct address_space *mapping,
408 loff_t pos, unsigned len, unsigned flags,
409 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
Christoph Hellwig155130a2010-06-04 11:29:58 +0200411 return block_write_begin(mapping, pos, len, flags, pagep,
412 blkdev_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413}
414
Nick Piggin6272b5a2007-10-16 01:25:04 -0700415static int blkdev_write_end(struct file *file, struct address_space *mapping,
416 loff_t pos, unsigned len, unsigned copied,
417 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418{
Nick Piggin6272b5a2007-10-16 01:25:04 -0700419 int ret;
420 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
421
422 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300423 put_page(page);
Nick Piggin6272b5a2007-10-16 01:25:04 -0700424
425 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426}
427
428/*
429 * private llseek:
Al Viro496ad9a2013-01-23 17:07:38 -0500430 * for a block special file file_inode(file)->i_size is zero
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 * so we compute the size by hand (just as in block_read/write above)
432 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800433static loff_t block_llseek(struct file *file, loff_t offset, int whence)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900435 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 loff_t retval;
437
Al Viro59551022016-01-22 15:40:57 -0500438 inode_lock(bd_inode);
Al Viro5d48f3a2013-06-23 21:34:45 +0400439 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
Al Viro59551022016-01-22 15:40:57 -0500440 inode_unlock(bd_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 return retval;
442}
443
Josef Bacik02c24a82011-07-16 20:44:56 -0400444int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900446 struct inode *bd_inode = bdev_file_inode(filp);
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400447 struct block_device *bdev = I_BDEV(bd_inode);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100448 int error;
Rafael J. Wysockida5aa862011-08-02 02:17:48 +0200449
450 error = filemap_write_and_wait_range(filp->f_mapping, start, end);
451 if (error)
452 return error;
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100453
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400454 /*
455 * There is no need to serialise calls to blkdev_issue_flush with
456 * i_mutex and doing so causes performance issues with concurrent
457 * O_SYNC writers to a block device.
458 */
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200459 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100460 if (error == -EOPNOTSUPP)
461 error = 0;
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400462
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100463 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
Andrew Mortonb1dd3b22010-04-06 14:35:00 -0700465EXPORT_SYMBOL(blkdev_fsync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700467/**
468 * bdev_read_page() - Start reading a page from a block device
469 * @bdev: The device to read the page from
470 * @sector: The offset on the device to read the page to (need not be aligned)
471 * @page: The page to read
472 *
473 * On entry, the page should be locked. It will be unlocked when the page
474 * has been read. If the block driver implements rw_page synchronously,
475 * that will be true on exit from this function, but it need not be.
476 *
477 * Errors returned by this function are usually "soft", eg out of memory, or
478 * queue full; callers should try a different route to read this page rather
479 * than propagate an error back up the stack.
480 *
481 * Return: negative errno if an error occurs, 0 if submission was successful.
482 */
483int bdev_read_page(struct block_device *bdev, sector_t sector,
484 struct page *page)
485{
486 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc92015-11-19 13:29:28 -0800487 int result = -EOPNOTSUPP;
488
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400489 if (!ops->rw_page || bdev_get_integrity(bdev))
Dan Williams2e6edc92015-11-19 13:29:28 -0800490 return result;
491
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100492 result = blk_queue_enter(bdev->bd_queue, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800493 if (result)
494 return result;
Jens Axboec11f0c02016-08-05 08:11:04 -0600495 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800496 blk_queue_exit(bdev->bd_queue);
497 return result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700498}
499EXPORT_SYMBOL_GPL(bdev_read_page);
500
501/**
502 * bdev_write_page() - Start writing a page to a block device
503 * @bdev: The device to write the page to
504 * @sector: The offset on the device to write the page to (need not be aligned)
505 * @page: The page to write
506 * @wbc: The writeback_control for the write
507 *
508 * On entry, the page should be locked and not currently under writeback.
509 * On exit, if the write started successfully, the page will be unlocked and
510 * under writeback. If the write failed already (eg the driver failed to
511 * queue the page to the device), the page will still be locked. If the
512 * caller is a ->writepage implementation, it will need to unlock the page.
513 *
514 * Errors returned by this function are usually "soft", eg out of memory, or
515 * queue full; callers should try a different route to write this page rather
516 * than propagate an error back up the stack.
517 *
518 * Return: negative errno if an error occurs, 0 if submission was successful.
519 */
520int bdev_write_page(struct block_device *bdev, sector_t sector,
521 struct page *page, struct writeback_control *wbc)
522{
523 int result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700524 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc92015-11-19 13:29:28 -0800525
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400526 if (!ops->rw_page || bdev_get_integrity(bdev))
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700527 return -EOPNOTSUPP;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100528 result = blk_queue_enter(bdev->bd_queue, false);
Dan Williams2e6edc92015-11-19 13:29:28 -0800529 if (result)
530 return result;
531
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700532 set_page_writeback(page);
Jens Axboec11f0c02016-08-05 08:11:04 -0600533 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700534 if (result)
535 end_page_writeback(page);
536 else
537 unlock_page(page);
Dan Williams2e6edc92015-11-19 13:29:28 -0800538 blk_queue_exit(bdev->bd_queue);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700539 return result;
540}
541EXPORT_SYMBOL_GPL(bdev_write_page);
542
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200543/**
544 * bdev_direct_access() - Get the address for directly-accessibly memory
545 * @bdev: The device containing the memory
Dan Williamsb2e0d162016-01-15 16:55:59 -0800546 * @dax: control and output parameters for ->direct_access
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200547 *
548 * If a block device is made up of directly addressable memory, this function
549 * will tell the caller the PFN and the address of the memory. The address
550 * may be directly dereferenced within the kernel without the need to call
551 * ioremap(), kmap() or similar. The PFN is suitable for inserting into
552 * page tables.
553 *
554 * Return: negative errno if an error occurs, otherwise the number of bytes
555 * accessible at this address.
556 */
Dan Williamsb2e0d162016-01-15 16:55:59 -0800557long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200558{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800559 sector_t sector = dax->sector;
560 long avail, size = dax->size;
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200561 const struct block_device_operations *ops = bdev->bd_disk->fops;
562
Matthew Wilcox43c3dd02015-07-03 10:40:43 -0400563 /*
564 * The device driver is allowed to sleep, in order to make the
565 * memory directly accessible.
566 */
567 might_sleep();
568
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200569 if (size < 0)
570 return size;
Toshi Kani163d4ba2016-06-23 17:05:50 -0400571 if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200572 return -EOPNOTSUPP;
573 if ((sector + DIV_ROUND_UP(size, 512)) >
574 part_nr_sects_read(bdev->bd_part))
575 return -ERANGE;
576 sector += get_start_sect(bdev);
577 if (sector % (PAGE_SIZE / 512))
578 return -EINVAL;
Dan Williams0a70bd42016-02-24 14:02:11 -0800579 avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200580 if (!avail)
581 return -ERANGE;
Dan Williamsfe683ad2016-01-15 16:55:56 -0800582 if (avail > 0 && avail & ~PAGE_MASK)
583 return -ENXIO;
Matthew Wilcoxdd22f552015-01-07 18:05:34 +0200584 return min(avail, size);
585}
586EXPORT_SYMBOL_GPL(bdev_direct_access);
587
Toshi Kani2d96afc2016-05-10 10:23:53 -0600588/**
589 * bdev_dax_supported() - Check if the device supports dax for filesystem
590 * @sb: The superblock of the device
591 * @blocksize: The block size of the device
592 *
593 * This is a library function for filesystems to check if the block device
594 * can be mounted with dax option.
595 *
596 * Return: negative errno if unsupported, 0 if supported.
597 */
598int bdev_dax_supported(struct super_block *sb, int blocksize)
599{
600 struct blk_dax_ctl dax = {
601 .sector = 0,
602 .size = PAGE_SIZE,
603 };
604 int err;
605
606 if (blocksize != PAGE_SIZE) {
607 vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
608 return -EINVAL;
609 }
610
611 err = bdev_direct_access(sb->s_bdev, &dax);
612 if (err < 0) {
613 switch (err) {
614 case -EOPNOTSUPP:
615 vfs_msg(sb, KERN_ERR,
616 "error: device does not support dax");
617 break;
618 case -EINVAL:
619 vfs_msg(sb, KERN_ERR,
620 "error: unaligned partition for dax");
621 break;
622 default:
623 vfs_msg(sb, KERN_ERR,
624 "error: dax access failed (%d)", err);
625 }
626 return err;
627 }
628
629 return 0;
630}
631EXPORT_SYMBOL_GPL(bdev_dax_supported);
632
Toshi Kania8078b12016-05-10 10:23:57 -0600633/**
634 * bdev_dax_capable() - Return if the raw device is capable for dax
635 * @bdev: The device for raw block device access
636 */
637bool bdev_dax_capable(struct block_device *bdev)
638{
Toshi Kania8078b12016-05-10 10:23:57 -0600639 struct blk_dax_ctl dax = {
640 .size = PAGE_SIZE,
641 };
642
643 if (!IS_ENABLED(CONFIG_FS_DAX))
644 return false;
645
646 dax.sector = 0;
647 if (bdev_direct_access(bdev, &dax) < 0)
648 return false;
649
650 dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512);
651 if (bdev_direct_access(bdev, &dax) < 0)
652 return false;
653
Toshi Kania8078b12016-05-10 10:23:57 -0600654 return true;
655}
656
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657/*
658 * pseudo-fs
659 */
660
661static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
Christoph Lametere18b8902006-12-06 20:33:20 -0800662static struct kmem_cache * bdev_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
664static struct inode *bdev_alloc_inode(struct super_block *sb)
665{
Christoph Lametere94b1762006-12-06 20:33:17 -0800666 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (!ei)
668 return NULL;
669 return &ei->vfs_inode;
670}
671
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100672static void bdev_i_callback(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100674 struct inode *inode = container_of(head, struct inode, i_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 struct bdev_inode *bdi = BDEV_I(inode);
676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 kmem_cache_free(bdev_cachep, bdi);
678}
679
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100680static void bdev_destroy_inode(struct inode *inode)
681{
682 call_rcu(&inode->i_rcu, bdev_i_callback);
683}
684
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700685static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686{
687 struct bdev_inode *ei = (struct bdev_inode *) foo;
688 struct block_device *bdev = &ei->bdev;
689
Christoph Lametera35afb82007-05-16 22:10:57 -0700690 memset(bdev, 0, sizeof(*bdev));
691 mutex_init(&bdev->bd_mutex);
Christoph Lametera35afb82007-05-16 22:10:57 -0700692 INIT_LIST_HEAD(&bdev->bd_list);
Tejun Heo49731ba2011-01-14 18:43:57 +0100693#ifdef CONFIG_SYSFS
694 INIT_LIST_HEAD(&bdev->bd_holder_disks);
695#endif
Christoph Lametera35afb82007-05-16 22:10:57 -0700696 inode_init_once(&ei->vfs_inode);
Takashi Satofcccf502009-01-09 16:40:59 -0800697 /* Initialize mutex for freeze. */
698 mutex_init(&bdev->bd_fsfreeze_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699}
700
Al Virob57922d2010-06-07 14:34:48 -0400701static void bdev_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
703 struct block_device *bdev = &BDEV_I(inode)->bdev;
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700704 truncate_inode_pages_final(&inode->i_data);
Al Virob57922d2010-06-07 14:34:48 -0400705 invalidate_inode_buffers(inode); /* is it needed here? */
Jan Karadbd57682012-05-03 14:48:02 +0200706 clear_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 spin_lock(&bdev_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 list_del_init(&bdev->bd_list);
709 spin_unlock(&bdev_lock);
710}
711
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -0800712static const struct super_operations bdev_sops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 .statfs = simple_statfs,
714 .alloc_inode = bdev_alloc_inode,
715 .destroy_inode = bdev_destroy_inode,
716 .drop_inode = generic_delete_inode,
Al Virob57922d2010-06-07 14:34:48 -0400717 .evict_inode = bdev_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718};
719
Al Viro51139ad2010-07-25 23:47:46 +0400720static struct dentry *bd_mount(struct file_system_type *fs_type,
721 int flags, const char *dev_name, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722{
Shaohua Li3684aa72016-02-22 15:27:40 -0700723 struct dentry *dent;
724 dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
Vegard Nossume9e5e3f2016-08-22 12:47:43 +0200725 if (!IS_ERR(dent))
Shaohua Li3684aa72016-02-22 15:27:40 -0700726 dent->d_sb->s_iflags |= SB_I_CGROUPWB;
727 return dent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
730static struct file_system_type bd_type = {
731 .name = "bdev",
Al Viro51139ad2010-07-25 23:47:46 +0400732 .mount = bd_mount,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 .kill_sb = kill_anon_super,
734};
735
Tejun Heoa212b102015-05-22 17:13:33 -0400736struct super_block *blockdev_superblock __read_mostly;
737EXPORT_SYMBOL_GPL(blockdev_superblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
739void __init bdev_cache_init(void)
740{
741 int err;
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300742 static struct vfsmount *bd_mnt;
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
Paul Jacksonfffb60f2006-03-24 03:16:06 -0800745 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
Vladimir Davydov5d097052016-01-14 15:18:21 -0800746 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
Paul Mundt20c2df82007-07-20 10:11:58 +0900747 init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 err = register_filesystem(&bd_type);
749 if (err)
750 panic("Cannot register bdev pseudo-fs");
751 bd_mnt = kern_mount(&bd_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (IS_ERR(bd_mnt))
753 panic("Cannot create bdev pseudo-fs");
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300754 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
756
757/*
758 * Most likely _very_ bad one - but then it's hardly critical for small
759 * /dev and can be fixed when somebody will need really large one.
760 * Keep in mind that it will be fed through icache hash function too.
761 */
762static inline unsigned long hash(dev_t dev)
763{
764 return MAJOR(dev)+MINOR(dev);
765}
766
767static int bdev_test(struct inode *inode, void *data)
768{
769 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
770}
771
772static int bdev_set(struct inode *inode, void *data)
773{
774 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
775 return 0;
776}
777
778static LIST_HEAD(all_bdevs);
779
780struct block_device *bdget(dev_t dev)
781{
782 struct block_device *bdev;
783 struct inode *inode;
784
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800785 inode = iget5_locked(blockdev_superblock, hash(dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 bdev_test, bdev_set, &dev);
787
788 if (!inode)
789 return NULL;
790
791 bdev = &BDEV_I(inode)->bdev;
792
793 if (inode->i_state & I_NEW) {
794 bdev->bd_contains = NULL;
Lachlan McIlroy782b94c2011-06-30 11:01:45 +1000795 bdev->bd_super = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 bdev->bd_inode = inode;
797 bdev->bd_block_size = (1 << inode->i_blkbits);
798 bdev->bd_part_count = 0;
799 bdev->bd_invalidated = 0;
800 inode->i_mode = S_IFBLK;
801 inode->i_rdev = dev;
802 inode->i_bdev = bdev;
803 inode->i_data.a_ops = &def_blk_aops;
804 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 spin_lock(&bdev_lock);
806 list_add(&bdev->bd_list, &all_bdevs);
807 spin_unlock(&bdev_lock);
808 unlock_new_inode(inode);
809 }
810 return bdev;
811}
812
813EXPORT_SYMBOL(bdget);
814
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200815/**
816 * bdgrab -- Grab a reference to an already referenced block device
817 * @bdev: Block device to grab a reference to.
818 */
819struct block_device *bdgrab(struct block_device *bdev)
820{
Al Viro7de9c6ee2010-10-23 11:11:40 -0400821 ihold(bdev->bd_inode);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200822 return bdev;
823}
Anatol Pomozovc1681bf2013-04-01 09:47:56 -0700824EXPORT_SYMBOL(bdgrab);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826long nr_blockdev_pages(void)
827{
Matthias Kaehlcke203a2932007-07-15 23:40:23 -0700828 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 long ret = 0;
830 spin_lock(&bdev_lock);
Matthias Kaehlcke203a2932007-07-15 23:40:23 -0700831 list_for_each_entry(bdev, &all_bdevs, bd_list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 ret += bdev->bd_inode->i_mapping->nrpages;
833 }
834 spin_unlock(&bdev_lock);
835 return ret;
836}
837
838void bdput(struct block_device *bdev)
839{
840 iput(bdev->bd_inode);
841}
842
843EXPORT_SYMBOL(bdput);
844
845static struct block_device *bd_acquire(struct inode *inode)
846{
847 struct block_device *bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 spin_lock(&bdev_lock);
850 bdev = inode->i_bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700851 if (bdev) {
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100852 bdgrab(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 spin_unlock(&bdev_lock);
854 return bdev;
855 }
856 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 bdev = bdget(inode->i_rdev);
859 if (bdev) {
860 spin_lock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700861 if (!inode->i_bdev) {
862 /*
Al Viro7de9c6ee2010-10-23 11:11:40 -0400863 * We take an additional reference to bd_inode,
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700864 * and it's released in clear_inode() of inode.
865 * So, we can access it via ->i_mapping always
866 * without igrab().
867 */
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100868 bdgrab(bdev);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700869 inode->i_bdev = bdev;
870 inode->i_mapping = bdev->bd_inode->i_mapping;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700871 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 spin_unlock(&bdev_lock);
873 }
874 return bdev;
875}
876
877/* Call when you free inode */
878
879void bd_forget(struct inode *inode)
880{
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700881 struct block_device *bdev = NULL;
882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 spin_lock(&bdev_lock);
Yan Hongb4ea2ea2013-04-30 15:26:47 -0700884 if (!sb_is_blkdev_sb(inode->i_sb))
885 bdev = inode->i_bdev;
Al Viroa4a4f942016-07-19 13:16:52 -0400886 inode->i_bdev = NULL;
887 inode->i_mapping = &inode->i_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700889
890 if (bdev)
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100891 bdput(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892}
893
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900894/**
895 * bd_may_claim - test whether a block device can be claimed
896 * @bdev: block device of interest
897 * @whole: whole block device containing @bdev, may equal @bdev
898 * @holder: holder trying to claim @bdev
899 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300900 * Test whether @bdev can be claimed by @holder.
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900901 *
902 * CONTEXT:
903 * spin_lock(&bdev_lock).
904 *
905 * RETURNS:
906 * %true if @bdev can be claimed, %false otherwise.
907 */
908static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
909 void *holder)
910{
911 if (bdev->bd_holder == holder)
912 return true; /* already a holder */
913 else if (bdev->bd_holder != NULL)
914 return false; /* held by someone else */
915 else if (bdev->bd_contains == bdev)
916 return true; /* is a whole device which isn't held */
917
Tejun Heoe525fd82010-11-13 11:55:17 +0100918 else if (whole->bd_holder == bd_may_claim)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +0900919 return true; /* is a partition of a device that is being partitioned */
920 else if (whole->bd_holder != NULL)
921 return false; /* is a partition of a held device */
922 else
923 return true; /* is a partition of an un-held device */
924}
925
926/**
Tejun Heo6b4517a2010-04-07 18:53:59 +0900927 * bd_prepare_to_claim - prepare to claim a block device
928 * @bdev: block device of interest
929 * @whole: the whole device containing @bdev, may equal @bdev
930 * @holder: holder trying to claim @bdev
931 *
932 * Prepare to claim @bdev. This function fails if @bdev is already
933 * claimed by another holder and waits if another claiming is in
934 * progress. This function doesn't actually claim. On successful
935 * return, the caller has ownership of bd_claiming and bd_holder[s].
936 *
937 * CONTEXT:
938 * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
939 * it multiple times.
940 *
941 * RETURNS:
942 * 0 if @bdev can be claimed, -EBUSY otherwise.
943 */
944static int bd_prepare_to_claim(struct block_device *bdev,
945 struct block_device *whole, void *holder)
946{
947retry:
948 /* if someone else claimed, fail */
949 if (!bd_may_claim(bdev, whole, holder))
950 return -EBUSY;
951
Tejun Heoe75aa852010-08-04 17:59:39 +0200952 /* if claiming is already in progress, wait for it to finish */
953 if (whole->bd_claiming) {
Tejun Heo6b4517a2010-04-07 18:53:59 +0900954 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
955 DEFINE_WAIT(wait);
956
957 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
958 spin_unlock(&bdev_lock);
959 schedule();
960 finish_wait(wq, &wait);
961 spin_lock(&bdev_lock);
962 goto retry;
963 }
964
965 /* yay, all mine */
966 return 0;
967}
968
969/**
970 * bd_start_claiming - start claiming a block device
971 * @bdev: block device of interest
972 * @holder: holder trying to claim @bdev
973 *
974 * @bdev is about to be opened exclusively. Check @bdev can be opened
975 * exclusively and mark that an exclusive open is in progress. Each
976 * successful call to this function must be matched with a call to
Nick Pigginb0018362010-05-26 01:51:19 +1000977 * either bd_finish_claiming() or bd_abort_claiming() (which do not
978 * fail).
979 *
980 * This function is used to gain exclusive access to the block device
981 * without actually causing other exclusive open attempts to fail. It
982 * should be used when the open sequence itself requires exclusive
983 * access but may subsequently fail.
Tejun Heo6b4517a2010-04-07 18:53:59 +0900984 *
985 * CONTEXT:
986 * Might sleep.
987 *
988 * RETURNS:
989 * Pointer to the block device containing @bdev on success, ERR_PTR()
990 * value on failure.
991 */
992static struct block_device *bd_start_claiming(struct block_device *bdev,
993 void *holder)
994{
995 struct gendisk *disk;
996 struct block_device *whole;
997 int partno, err;
998
999 might_sleep();
1000
1001 /*
1002 * @bdev might not have been initialized properly yet, look up
1003 * and grab the outer block device the hard way.
1004 */
1005 disk = get_gendisk(bdev->bd_dev, &partno);
1006 if (!disk)
1007 return ERR_PTR(-ENXIO);
1008
Tejun Heod4c208b2011-06-13 12:45:48 +02001009 /*
1010 * Normally, @bdev should equal what's returned from bdget_disk()
1011 * if partno is 0; however, some drivers (floppy) use multiple
1012 * bdev's for the same physical device and @bdev may be one of the
1013 * aliases. Keep @bdev if partno is 0. This means claimer
1014 * tracking is broken for those devices but it has always been that
1015 * way.
1016 */
1017 if (partno)
1018 whole = bdget_disk(disk, 0);
1019 else
1020 whole = bdgrab(bdev);
1021
Nick Piggincf342572010-05-26 01:50:21 +10001022 module_put(disk->fops->owner);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001023 put_disk(disk);
1024 if (!whole)
1025 return ERR_PTR(-ENOMEM);
1026
1027 /* prepare to claim, if successful, mark claiming in progress */
1028 spin_lock(&bdev_lock);
1029
1030 err = bd_prepare_to_claim(bdev, whole, holder);
1031 if (err == 0) {
1032 whole->bd_claiming = holder;
1033 spin_unlock(&bdev_lock);
1034 return whole;
1035 } else {
1036 spin_unlock(&bdev_lock);
1037 bdput(whole);
1038 return ERR_PTR(err);
1039 }
1040}
1041
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001042#ifdef CONFIG_SYSFS
Tejun Heo49731ba2011-01-14 18:43:57 +01001043struct bd_holder_disk {
1044 struct list_head list;
1045 struct gendisk *disk;
1046 int refcnt;
1047};
1048
1049static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
1050 struct gendisk *disk)
1051{
1052 struct bd_holder_disk *holder;
1053
1054 list_for_each_entry(holder, &bdev->bd_holder_disks, list)
1055 if (holder->disk == disk)
1056 return holder;
1057 return NULL;
1058}
1059
Andrew Morton4d7dd8fd2006-09-29 01:58:56 -07001060static int add_symlink(struct kobject *from, struct kobject *to)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001061{
Andrew Morton4d7dd8fd2006-09-29 01:58:56 -07001062 return sysfs_create_link(from, to, kobject_name(to));
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001063}
1064
1065static void del_symlink(struct kobject *from, struct kobject *to)
1066{
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001067 sysfs_remove_link(from, kobject_name(to));
1068}
1069
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001070/**
Tejun Heoe09b4572010-11-13 11:55:17 +01001071 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1072 * @bdev: the claimed slave bdev
1073 * @disk: the holding disk
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001074 *
Tejun Heo49731ba2011-01-14 18:43:57 +01001075 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1076 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001077 * This functions creates the following sysfs symlinks.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001078 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001079 * - from "slaves" directory of the holder @disk to the claimed @bdev
1080 * - from "holders" directory of the @bdev to the holder @disk
1081 *
1082 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1083 * passed to bd_link_disk_holder(), then:
1084 *
1085 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1086 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1087 *
1088 * The caller must have claimed @bdev before calling this function and
1089 * ensure that both @bdev and @disk are valid during the creation and
1090 * lifetime of these symlinks.
1091 *
1092 * CONTEXT:
1093 * Might sleep.
1094 *
1095 * RETURNS:
1096 * 0 on success, -errno on failure.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001097 */
Tejun Heoe09b4572010-11-13 11:55:17 +01001098int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001099{
Tejun Heo49731ba2011-01-14 18:43:57 +01001100 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001101 int ret = 0;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001102
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001103 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001104
Tejun Heo49731ba2011-01-14 18:43:57 +01001105 WARN_ON_ONCE(!bdev->bd_holder);
Johannes Weiner4e916722007-07-15 23:41:25 -07001106
Tejun Heoe09b4572010-11-13 11:55:17 +01001107 /* FIXME: remove the following once add_disk() handles errors */
1108 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
1109 goto out_unlock;
Johannes Weiner4e916722007-07-15 23:41:25 -07001110
Tejun Heo49731ba2011-01-14 18:43:57 +01001111 holder = bd_find_holder_disk(bdev, disk);
1112 if (holder) {
1113 holder->refcnt++;
Tejun Heoe09b4572010-11-13 11:55:17 +01001114 goto out_unlock;
1115 }
1116
Tejun Heo49731ba2011-01-14 18:43:57 +01001117 holder = kzalloc(sizeof(*holder), GFP_KERNEL);
1118 if (!holder) {
1119 ret = -ENOMEM;
1120 goto out_unlock;
1121 }
1122
1123 INIT_LIST_HEAD(&holder->list);
1124 holder->disk = disk;
1125 holder->refcnt = 1;
1126
1127 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1128 if (ret)
1129 goto out_free;
1130
1131 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1132 if (ret)
1133 goto out_del;
Tejun Heoe7407d12011-02-24 09:56:32 +01001134 /*
1135 * bdev could be deleted beneath us which would implicitly destroy
1136 * the holder directory. Hold on to it.
1137 */
1138 kobject_get(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001139
1140 list_add(&holder->list, &bdev->bd_holder_disks);
1141 goto out_unlock;
1142
1143out_del:
1144 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1145out_free:
1146 kfree(holder);
Tejun Heoe09b4572010-11-13 11:55:17 +01001147out_unlock:
Jun'ichi Nomurab4cf1b72006-03-27 01:18:00 -08001148 mutex_unlock(&bdev->bd_mutex);
Tejun Heoe09b4572010-11-13 11:55:17 +01001149 return ret;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001150}
Tejun Heoe09b4572010-11-13 11:55:17 +01001151EXPORT_SYMBOL_GPL(bd_link_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001152
Tejun Heo49731ba2011-01-14 18:43:57 +01001153/**
1154 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1155 * @bdev: the calimed slave bdev
1156 * @disk: the holding disk
1157 *
1158 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1159 *
1160 * CONTEXT:
1161 * Might sleep.
1162 */
1163void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001164{
Tejun Heo49731ba2011-01-14 18:43:57 +01001165 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001166
Tejun Heo49731ba2011-01-14 18:43:57 +01001167 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001168
Tejun Heo49731ba2011-01-14 18:43:57 +01001169 holder = bd_find_holder_disk(bdev, disk);
1170
1171 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
1172 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1173 del_symlink(bdev->bd_part->holder_dir,
1174 &disk_to_dev(disk)->kobj);
Tejun Heoe7407d12011-02-24 09:56:32 +01001175 kobject_put(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001176 list_del_init(&holder->list);
1177 kfree(holder);
1178 }
1179
1180 mutex_unlock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001181}
Tejun Heo49731ba2011-01-14 18:43:57 +01001182EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001183#endif
1184
Andrew Patterson0c002c22008-09-04 14:27:20 -06001185/**
Andrew Patterson56ade442008-09-04 14:27:40 -06001186 * flush_disk - invalidates all buffer-cache entries on a disk
1187 *
1188 * @bdev: struct block device to be flushed
Randy Dunlape6eb5ce2011-02-26 10:54:00 -08001189 * @kill_dirty: flag to guide handling of dirty inodes
Andrew Patterson56ade442008-09-04 14:27:40 -06001190 *
1191 * Invalidates all buffer-cache entries on a disk. It should be called
1192 * when a disk has been changed -- either by a media change or online
1193 * resize.
1194 */
NeilBrown93b270f2011-02-24 17:25:47 +11001195static void flush_disk(struct block_device *bdev, bool kill_dirty)
Andrew Patterson56ade442008-09-04 14:27:40 -06001196{
NeilBrown93b270f2011-02-24 17:25:47 +11001197 if (__invalidate_device(bdev, kill_dirty)) {
Andrew Patterson56ade442008-09-04 14:27:40 -06001198 printk(KERN_WARNING "VFS: busy inodes on changed media or "
Dmitry Monakhov424081f2015-04-13 16:31:34 +04001199 "resized disk %s\n",
1200 bdev->bd_disk ? bdev->bd_disk->disk_name : "");
Andrew Patterson56ade442008-09-04 14:27:40 -06001201 }
1202
1203 if (!bdev->bd_disk)
1204 return;
Tejun Heod27769e2011-08-23 20:01:04 +02001205 if (disk_part_scan_enabled(bdev->bd_disk))
Andrew Patterson56ade442008-09-04 14:27:40 -06001206 bdev->bd_invalidated = 1;
1207}
1208
1209/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001210 * check_disk_size_change - checks for disk size change and adjusts bdev size.
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001211 * @disk: struct gendisk to check
1212 * @bdev: struct bdev to adjust.
1213 *
1214 * This routine checks to see if the bdev size does not match the disk size
1215 * and adjusts it if it differs.
1216 */
1217void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1218{
1219 loff_t disk_size, bdev_size;
1220
1221 disk_size = (loff_t)get_capacity(disk) << 9;
1222 bdev_size = i_size_read(bdev->bd_inode);
1223 if (disk_size != bdev_size) {
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001224 printk(KERN_INFO
1225 "%s: detected capacity change from %lld to %lld\n",
Dmitry Monakhov424081f2015-04-13 16:31:34 +04001226 disk->disk_name, bdev_size, disk_size);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001227 i_size_write(bdev->bd_inode, disk_size);
NeilBrown93b270f2011-02-24 17:25:47 +11001228 flush_disk(bdev, false);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001229 }
1230}
1231EXPORT_SYMBOL(check_disk_size_change);
1232
1233/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001234 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
Andrew Patterson0c002c22008-09-04 14:27:20 -06001235 * @disk: struct gendisk to be revalidated
1236 *
1237 * This routine is a wrapper for lower-level driver's revalidate_disk
1238 * call-backs. It is used to do common pre and post operations needed
1239 * for all revalidate_disk operations.
1240 */
1241int revalidate_disk(struct gendisk *disk)
1242{
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001243 struct block_device *bdev;
Andrew Patterson0c002c22008-09-04 14:27:20 -06001244 int ret = 0;
1245
1246 if (disk->fops->revalidate_disk)
1247 ret = disk->fops->revalidate_disk(disk);
Martin K. Petersen25520d52015-10-21 13:19:49 -04001248 blk_integrity_revalidate(disk);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001249 bdev = bdget_disk(disk, 0);
1250 if (!bdev)
1251 return ret;
1252
1253 mutex_lock(&bdev->bd_mutex);
1254 check_disk_size_change(disk, bdev);
MITSUNARI Shigeo7630b662013-02-21 16:42:01 -08001255 bdev->bd_invalidated = 0;
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001256 mutex_unlock(&bdev->bd_mutex);
1257 bdput(bdev);
Andrew Patterson0c002c22008-09-04 14:27:20 -06001258 return ret;
1259}
1260EXPORT_SYMBOL(revalidate_disk);
1261
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262/*
1263 * This routine checks whether a removable media has been changed,
1264 * and invalidates all buffer-cache-entries in that case. This
1265 * is a relatively slow routine, so we have to try to minimize using
1266 * it. Thus it is called only upon a 'mount' or 'open'. This
1267 * is the best way of combining speed and utility, I think.
1268 * People changing diskettes in the middle of an operation deserve
1269 * to lose :-)
1270 */
1271int check_disk_change(struct block_device *bdev)
1272{
1273 struct gendisk *disk = bdev->bd_disk;
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001274 const struct block_device_operations *bdops = disk->fops;
Tejun Heo77ea8872010-12-08 20:57:37 +01001275 unsigned int events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
Tejun Heo77ea8872010-12-08 20:57:37 +01001277 events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
1278 DISK_EVENT_EJECT_REQUEST);
1279 if (!(events & DISK_EVENT_MEDIA_CHANGE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 return 0;
1281
NeilBrown93b270f2011-02-24 17:25:47 +11001282 flush_disk(bdev, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 if (bdops->revalidate_disk)
1284 bdops->revalidate_disk(bdev->bd_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return 1;
1286}
1287
1288EXPORT_SYMBOL(check_disk_change);
1289
1290void bd_set_size(struct block_device *bdev, loff_t size)
1291{
Martin K. Petersene1defc42009-05-22 17:17:49 -04001292 unsigned bsize = bdev_logical_block_size(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Al Viro59551022016-01-22 15:40:57 -05001294 inode_lock(bdev->bd_inode);
Guo Chaod646a022013-02-21 15:16:42 -08001295 i_size_write(bdev->bd_inode, size);
Al Viro59551022016-01-22 15:40:57 -05001296 inode_unlock(bdev->bd_inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001297 while (bsize < PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 if (size & bsize)
1299 break;
1300 bsize <<= 1;
1301 }
1302 bdev->bd_block_size = bsize;
1303 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1304}
1305EXPORT_SYMBOL(bd_set_size);
1306
Al Viro4385bab2013-05-05 22:11:03 -04001307static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001308
Peter Zijlstra6d740cd2007-02-20 13:58:18 -08001309/*
1310 * bd_mutex locking:
1311 *
1312 * mutex_lock(part->bd_mutex)
1313 * mutex_lock_nested(whole->bd_mutex, 1)
1314 */
1315
Al Viro572c4892007-10-08 13:24:05 -04001316static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 struct gendisk *disk;
Tejun Heo523e1d32011-10-19 14:31:07 +02001319 struct module *owner;
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001320 int ret;
Tejun Heocf771cb2008-09-03 09:01:09 +02001321 int partno;
Al Virofe6e9c12008-06-23 08:30:55 -04001322 int perm = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Al Viro572c4892007-10-08 13:24:05 -04001324 if (mode & FMODE_READ)
Al Virofe6e9c12008-06-23 08:30:55 -04001325 perm |= MAY_READ;
Al Viro572c4892007-10-08 13:24:05 -04001326 if (mode & FMODE_WRITE)
Al Virofe6e9c12008-06-23 08:30:55 -04001327 perm |= MAY_WRITE;
1328 /*
1329 * hooks: /n/, see "layering violations".
1330 */
Chris Wrightb7300b72010-08-10 18:02:55 -07001331 if (!for_part) {
1332 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1333 if (ret != 0) {
1334 bdput(bdev);
1335 return ret;
1336 }
Al Viro82666022008-08-01 05:32:04 -04001337 }
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001338
NeilBrownd3374822009-01-09 08:31:10 +11001339 restart:
Tejun Heo0762b8b2008-08-25 19:56:12 +09001340
Tejun Heo89f97492008-11-05 10:21:06 +01001341 ret = -ENXIO;
Tejun Heocf771cb2008-09-03 09:01:09 +02001342 disk = get_gendisk(bdev->bd_dev, &partno);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001343 if (!disk)
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001344 goto out;
Tejun Heo523e1d32011-10-19 14:31:07 +02001345 owner = disk->fops->owner;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Tejun Heo69e02c52011-03-09 19:54:27 +01001347 disk_block_events(disk);
NeilBrown6796bf52006-12-08 02:36:16 -08001348 mutex_lock_nested(&bdev->bd_mutex, for_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 if (!bdev->bd_openers) {
1350 bdev->bd_disk = disk;
Andi Kleen87192a22012-01-12 17:20:34 -08001351 bdev->bd_queue = disk->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 bdev->bd_contains = bdev;
Dan Williams03cdadb2016-02-26 15:19:43 -08001353
Tejun Heocf771cb2008-09-03 09:01:09 +02001354 if (!partno) {
Tejun Heo89f97492008-11-05 10:21:06 +01001355 ret = -ENXIO;
1356 bdev->bd_part = disk_get_part(disk, partno);
1357 if (!bdev->bd_part)
1358 goto out_clear;
1359
Tejun Heo1196f8b2011-04-21 20:54:45 +02001360 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 if (disk->fops->open) {
Al Viro572c4892007-10-08 13:24:05 -04001362 ret = disk->fops->open(bdev, mode);
NeilBrownd3374822009-01-09 08:31:10 +11001363 if (ret == -ERESTARTSYS) {
1364 /* Lost a race with 'disk' being
1365 * deleted, try again.
1366 * See md.c
1367 */
1368 disk_put_part(bdev->bd_part);
1369 bdev->bd_part = NULL;
NeilBrownd3374822009-01-09 08:31:10 +11001370 bdev->bd_disk = NULL;
Andi Kleen87192a22012-01-12 17:20:34 -08001371 bdev->bd_queue = NULL;
NeilBrownd3374822009-01-09 08:31:10 +11001372 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001373 disk_unblock_events(disk);
Tejun Heo69e02c52011-03-09 19:54:27 +01001374 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001375 module_put(owner);
NeilBrownd3374822009-01-09 08:31:10 +11001376 goto restart;
1377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 }
Tejun Heo7e697232011-05-23 13:26:07 +02001379
Christoph Hellwig22375702016-09-14 11:56:13 +02001380 if (!ret)
Tejun Heo7e697232011-05-23 13:26:07 +02001381 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
Tejun Heo7e697232011-05-23 13:26:07 +02001382
Tejun Heo1196f8b2011-04-21 20:54:45 +02001383 /*
1384 * If the device is invalidated, rescan partition
1385 * if open succeeded or failed with -ENOMEDIUM.
1386 * The latter is necessary to prevent ghost
1387 * partitions on a removed medium.
1388 */
Jun'ichi Nomurafe316bf2012-03-02 10:38:33 +01001389 if (bdev->bd_invalidated) {
1390 if (!ret)
1391 rescan_partitions(disk, bdev);
1392 else if (ret == -ENOMEDIUM)
1393 invalidate_partitions(disk, bdev);
1394 }
Dan Williams5a023cd2015-11-30 10:20:29 -08001395
Tejun Heo1196f8b2011-04-21 20:54:45 +02001396 if (ret)
1397 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 struct block_device *whole;
1400 whole = bdget_disk(disk, 0);
1401 ret = -ENOMEM;
1402 if (!whole)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001403 goto out_clear;
NeilBrown37be4122006-12-08 02:36:16 -08001404 BUG_ON(for_part);
Al Viro572c4892007-10-08 13:24:05 -04001405 ret = __blkdev_get(whole, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 if (ret)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001407 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 bdev->bd_contains = whole;
Tejun Heo89f97492008-11-05 10:21:06 +01001409 bdev->bd_part = disk_get_part(disk, partno);
Tejun Heoe71bf0d2008-09-03 09:03:02 +02001410 if (!(disk->flags & GENHD_FL_UP) ||
Tejun Heo89f97492008-11-05 10:21:06 +01001411 !bdev->bd_part || !bdev->bd_part->nr_sects) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 ret = -ENXIO;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001413 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 }
Tejun Heo89f97492008-11-05 10:21:06 +01001415 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 }
1417 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 if (bdev->bd_contains == bdev) {
Tejun Heo1196f8b2011-04-21 20:54:45 +02001419 ret = 0;
1420 if (bdev->bd_disk->fops->open)
Al Viro572c4892007-10-08 13:24:05 -04001421 ret = bdev->bd_disk->fops->open(bdev, mode);
Tejun Heo1196f8b2011-04-21 20:54:45 +02001422 /* the same as first opener case, read comment there */
Jun'ichi Nomurafe316bf2012-03-02 10:38:33 +01001423 if (bdev->bd_invalidated) {
1424 if (!ret)
1425 rescan_partitions(bdev->bd_disk, bdev);
1426 else if (ret == -ENOMEDIUM)
1427 invalidate_partitions(bdev->bd_disk, bdev);
1428 }
Tejun Heo1196f8b2011-04-21 20:54:45 +02001429 if (ret)
1430 goto out_unlock_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 }
Tejun Heo69e02c52011-03-09 19:54:27 +01001432 /* only one opener holds refs to the module and disk */
Tejun Heo69e02c52011-03-09 19:54:27 +01001433 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001434 module_put(owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 }
1436 bdev->bd_openers++;
NeilBrown37be4122006-12-08 02:36:16 -08001437 if (for_part)
1438 bdev->bd_part_count++;
Arjan van de Venc039e312006-03-23 03:00:28 -08001439 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001440 disk_unblock_events(disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 return 0;
1442
Tejun Heo0762b8b2008-08-25 19:56:12 +09001443 out_clear:
Tejun Heo89f97492008-11-05 10:21:06 +01001444 disk_put_part(bdev->bd_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 bdev->bd_disk = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001446 bdev->bd_part = NULL;
Andi Kleen87192a22012-01-12 17:20:34 -08001447 bdev->bd_queue = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 if (bdev != bdev->bd_contains)
Al Viro572c4892007-10-08 13:24:05 -04001449 __blkdev_put(bdev->bd_contains, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 bdev->bd_contains = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001451 out_unlock_bdev:
Arjan van de Venc039e312006-03-23 03:00:28 -08001452 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001453 disk_unblock_events(disk);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001454 put_disk(disk);
Tejun Heo523e1d32011-10-19 14:31:07 +02001455 module_put(owner);
Dan Carpenter4345cab2011-03-19 13:53:31 +01001456 out:
Tejun Heo0762b8b2008-08-25 19:56:12 +09001457 bdput(bdev);
1458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 return ret;
1460}
1461
Tejun Heod4d77622010-11-13 11:55:18 +01001462/**
1463 * blkdev_get - open a block device
1464 * @bdev: block_device to open
1465 * @mode: FMODE_* mask
1466 * @holder: exclusive holder identifier
1467 *
1468 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1469 * open with exclusive access. Specifying %FMODE_EXCL with %NULL
1470 * @holder is invalid. Exclusive opens may nest for the same @holder.
1471 *
1472 * On success, the reference count of @bdev is unchanged. On failure,
1473 * @bdev is put.
1474 *
1475 * CONTEXT:
1476 * Might sleep.
1477 *
1478 * RETURNS:
1479 * 0 on success, -errno on failure.
1480 */
Tejun Heoe525fd82010-11-13 11:55:17 +01001481int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482{
Tejun Heoe525fd82010-11-13 11:55:17 +01001483 struct block_device *whole = NULL;
1484 int res;
1485
1486 WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
1487
1488 if ((mode & FMODE_EXCL) && holder) {
1489 whole = bd_start_claiming(bdev, holder);
1490 if (IS_ERR(whole)) {
1491 bdput(bdev);
1492 return PTR_ERR(whole);
1493 }
1494 }
1495
1496 res = __blkdev_get(bdev, mode, 0);
1497
1498 if (whole) {
Tejun Heod4dc2102011-04-21 20:54:46 +02001499 struct gendisk *disk = whole->bd_disk;
1500
Tejun Heo6a027ef2010-11-13 11:55:17 +01001501 /* finish claiming */
Tejun Heo77ea8872010-12-08 20:57:37 +01001502 mutex_lock(&bdev->bd_mutex);
Tejun Heo6a027ef2010-11-13 11:55:17 +01001503 spin_lock(&bdev_lock);
1504
Tejun Heo77ea8872010-12-08 20:57:37 +01001505 if (!res) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001506 BUG_ON(!bd_may_claim(bdev, whole, holder));
1507 /*
1508 * Note that for a whole device bd_holders
1509 * will be incremented twice, and bd_holder
1510 * will be set to bd_may_claim before being
1511 * set to holder
1512 */
1513 whole->bd_holders++;
1514 whole->bd_holder = bd_may_claim;
1515 bdev->bd_holders++;
1516 bdev->bd_holder = holder;
1517 }
1518
1519 /* tell others that we're done */
1520 BUG_ON(whole->bd_claiming != holder);
1521 whole->bd_claiming = NULL;
1522 wake_up_bit(&whole->bd_claiming, 0);
1523
1524 spin_unlock(&bdev_lock);
Tejun Heo77ea8872010-12-08 20:57:37 +01001525
1526 /*
Tejun Heod4dc2102011-04-21 20:54:46 +02001527 * Block event polling for write claims if requested. Any
1528 * write holder makes the write_holder state stick until
1529 * all are released. This is good enough and tracking
1530 * individual writeable reference is too fragile given the
1531 * way @mode is used in blkdev_get/put().
Tejun Heo77ea8872010-12-08 20:57:37 +01001532 */
Tejun Heo4c49ff32011-06-01 08:27:41 +02001533 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1534 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
Tejun Heo77ea8872010-12-08 20:57:37 +01001535 bdev->bd_write_holder = true;
Tejun Heod4dc2102011-04-21 20:54:46 +02001536 disk_block_events(disk);
Tejun Heo77ea8872010-12-08 20:57:37 +01001537 }
1538
1539 mutex_unlock(&bdev->bd_mutex);
Tejun Heo6a027ef2010-11-13 11:55:17 +01001540 bdput(whole);
Tejun Heoe525fd82010-11-13 11:55:17 +01001541 }
1542
1543 return res;
NeilBrown37be4122006-12-08 02:36:16 -08001544}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545EXPORT_SYMBOL(blkdev_get);
1546
Tejun Heod4d77622010-11-13 11:55:18 +01001547/**
1548 * blkdev_get_by_path - open a block device by name
1549 * @path: path to the block device to open
1550 * @mode: FMODE_* mask
1551 * @holder: exclusive holder identifier
1552 *
1553 * Open the blockdevice described by the device file at @path. @mode
1554 * and @holder are identical to blkdev_get().
1555 *
1556 * On success, the returned block_device has reference count of one.
1557 *
1558 * CONTEXT:
1559 * Might sleep.
1560 *
1561 * RETURNS:
1562 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1563 */
1564struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1565 void *holder)
1566{
1567 struct block_device *bdev;
1568 int err;
1569
1570 bdev = lookup_bdev(path);
1571 if (IS_ERR(bdev))
1572 return bdev;
1573
1574 err = blkdev_get(bdev, mode, holder);
1575 if (err)
1576 return ERR_PTR(err);
1577
Chuck Ebberte51900f2011-02-16 18:11:53 -05001578 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1579 blkdev_put(bdev, mode);
1580 return ERR_PTR(-EACCES);
1581 }
1582
Tejun Heod4d77622010-11-13 11:55:18 +01001583 return bdev;
1584}
1585EXPORT_SYMBOL(blkdev_get_by_path);
1586
1587/**
1588 * blkdev_get_by_dev - open a block device by device number
1589 * @dev: device number of block device to open
1590 * @mode: FMODE_* mask
1591 * @holder: exclusive holder identifier
1592 *
1593 * Open the blockdevice described by device number @dev. @mode and
1594 * @holder are identical to blkdev_get().
1595 *
1596 * Use it ONLY if you really do not have anything better - i.e. when
1597 * you are behind a truly sucky interface and all you are given is a
1598 * device number. _Never_ to be used for internal purposes. If you
1599 * ever need it - reconsider your API.
1600 *
1601 * On success, the returned block_device has reference count of one.
1602 *
1603 * CONTEXT:
1604 * Might sleep.
1605 *
1606 * RETURNS:
1607 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1608 */
1609struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1610{
1611 struct block_device *bdev;
1612 int err;
1613
1614 bdev = bdget(dev);
1615 if (!bdev)
1616 return ERR_PTR(-ENOMEM);
1617
1618 err = blkdev_get(bdev, mode, holder);
1619 if (err)
1620 return ERR_PTR(err);
1621
1622 return bdev;
1623}
1624EXPORT_SYMBOL(blkdev_get_by_dev);
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626static int blkdev_open(struct inode * inode, struct file * filp)
1627{
1628 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629
1630 /*
1631 * Preserve backwards compatibility and allow large file access
1632 * even if userspace doesn't ask for it explicitly. Some mkfs
1633 * binary needs it. We might want to drop this workaround
1634 * during an unstable branch.
1635 */
1636 filp->f_flags |= O_LARGEFILE;
1637
Al Viro572c4892007-10-08 13:24:05 -04001638 if (filp->f_flags & O_NDELAY)
1639 filp->f_mode |= FMODE_NDELAY;
1640 if (filp->f_flags & O_EXCL)
1641 filp->f_mode |= FMODE_EXCL;
1642 if ((filp->f_flags & O_ACCMODE) == 3)
1643 filp->f_mode |= FMODE_WRITE_IOCTL;
1644
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 bdev = bd_acquire(inode);
Pavel Emelianov6a2aae02006-10-28 10:38:33 -07001646 if (bdev == NULL)
1647 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Al Viro572c4892007-10-08 13:24:05 -04001649 filp->f_mapping = bdev->bd_inode->i_mapping;
1650
Tejun Heoe525fd82010-11-13 11:55:17 +01001651 return blkdev_get(bdev, filp->f_mode, filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652}
1653
Al Viro4385bab2013-05-05 22:11:03 -04001654static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001655{
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001656 struct gendisk *disk = bdev->bd_disk;
NeilBrown37be4122006-12-08 02:36:16 -08001657 struct block_device *victim = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001658
NeilBrown6796bf52006-12-08 02:36:16 -08001659 mutex_lock_nested(&bdev->bd_mutex, for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001660 if (for_part)
1661 bdev->bd_part_count--;
1662
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001663 if (!--bdev->bd_openers) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001664 WARN_ON_ONCE(bdev->bd_holders);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001665 sync_blockdev(bdev);
1666 kill_bdev(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001667
Vivek Goyaldbd3ca52015-11-09 09:23:40 -07001668 bdev_write_inode(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001669 /*
1670 * Detaching bdev inode from its wb in __destroy_inode()
1671 * is too late: the queue which embeds its bdi (along with
1672 * root wb) can be gone as soon as we put_disk() below.
1673 */
1674 inode_detach_wb(bdev->bd_inode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001675 }
1676 if (bdev->bd_contains == bdev) {
1677 if (disk->fops->release)
Al Virodb2a1442013-05-05 21:52:57 -04001678 disk->fops->release(disk, mode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001679 }
1680 if (!bdev->bd_openers) {
1681 struct module *owner = disk->fops->owner;
1682
Tejun Heo0762b8b2008-08-25 19:56:12 +09001683 disk_put_part(bdev->bd_part);
1684 bdev->bd_part = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001685 bdev->bd_disk = NULL;
NeilBrown37be4122006-12-08 02:36:16 -08001686 if (bdev != bdev->bd_contains)
1687 victim = bdev->bd_contains;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001688 bdev->bd_contains = NULL;
Tejun Heo523e1d32011-10-19 14:31:07 +02001689
1690 put_disk(disk);
1691 module_put(owner);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001692 }
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001693 mutex_unlock(&bdev->bd_mutex);
1694 bdput(bdev);
NeilBrown37be4122006-12-08 02:36:16 -08001695 if (victim)
Al Viro9a1c3542008-02-22 20:40:24 -05001696 __blkdev_put(victim, mode, 1);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001697}
1698
Al Viro4385bab2013-05-05 22:11:03 -04001699void blkdev_put(struct block_device *bdev, fmode_t mode)
NeilBrown37be4122006-12-08 02:36:16 -08001700{
Tejun Heo85ef06d2011-07-01 16:17:47 +02001701 mutex_lock(&bdev->bd_mutex);
1702
Tejun Heoe525fd82010-11-13 11:55:17 +01001703 if (mode & FMODE_EXCL) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001704 bool bdev_free;
1705
1706 /*
1707 * Release a claim on the device. The holder fields
1708 * are protected with bdev_lock. bd_mutex is to
1709 * synchronize disk_holder unlinking.
1710 */
Tejun Heo6a027ef2010-11-13 11:55:17 +01001711 spin_lock(&bdev_lock);
1712
1713 WARN_ON_ONCE(--bdev->bd_holders < 0);
1714 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1715
1716 /* bd_contains might point to self, check in a separate step */
1717 if ((bdev_free = !bdev->bd_holders))
1718 bdev->bd_holder = NULL;
1719 if (!bdev->bd_contains->bd_holders)
1720 bdev->bd_contains->bd_holder = NULL;
1721
1722 spin_unlock(&bdev_lock);
1723
Tejun Heo77ea8872010-12-08 20:57:37 +01001724 /*
1725 * If this was the last claim, remove holder link and
1726 * unblock evpoll if it was a write holder.
1727 */
Tejun Heo85ef06d2011-07-01 16:17:47 +02001728 if (bdev_free && bdev->bd_write_holder) {
1729 disk_unblock_events(bdev->bd_disk);
1730 bdev->bd_write_holder = false;
Tejun Heo77ea8872010-12-08 20:57:37 +01001731 }
Tejun Heo69362172011-03-09 19:54:27 +01001732 }
Tejun Heo77ea8872010-12-08 20:57:37 +01001733
Tejun Heo85ef06d2011-07-01 16:17:47 +02001734 /*
1735 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1736 * event. This is to ensure detection of media removal commanded
1737 * from userland - e.g. eject(1).
1738 */
1739 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1740
1741 mutex_unlock(&bdev->bd_mutex);
1742
Al Viro4385bab2013-05-05 22:11:03 -04001743 __blkdev_put(bdev, mode, 0);
NeilBrown37be4122006-12-08 02:36:16 -08001744}
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001745EXPORT_SYMBOL(blkdev_put);
1746
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747static int blkdev_close(struct inode * inode, struct file * filp)
1748{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001749 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
Al Viro4385bab2013-05-05 22:11:03 -04001750 blkdev_put(bdev, filp->f_mode);
1751 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752}
1753
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001754static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001756 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Al Viro56b26ad2008-09-19 03:17:36 -04001757 fmode_t mode = file->f_mode;
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001758
1759 /*
1760 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1761 * to updated it before every ioctl.
1762 */
Al Viro56b26ad2008-09-19 03:17:36 -04001763 if (file->f_flags & O_NDELAY)
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001764 mode |= FMODE_NDELAY;
1765 else
1766 mode &= ~FMODE_NDELAY;
1767
Al Viro56b26ad2008-09-19 03:17:36 -04001768 return blkdev_ioctl(bdev, mode, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
1770
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001771/*
Christoph Hellwigeef99382009-08-20 17:43:41 +02001772 * Write data to the block device. Only intended for the block device itself
1773 * and the raw driver which basically is a fake block device.
1774 *
1775 * Does not take i_mutex for the write and thus is not for general purpose
1776 * use.
1777 */
Al Viro1456c0a2014-04-03 03:21:50 -04001778ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
Christoph Hellwigeef99382009-08-20 17:43:41 +02001779{
1780 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001781 struct inode *bd_inode = bdev_file_inode(file);
Al Viro7ec7b942015-04-07 11:35:14 -04001782 loff_t size = i_size_read(bd_inode);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001783 struct blk_plug plug;
Christoph Hellwigeef99382009-08-20 17:43:41 +02001784 ssize_t ret;
Al Viro5f380c72015-04-07 11:28:12 -04001785
Al Viro7ec7b942015-04-07 11:35:14 -04001786 if (bdev_read_only(I_BDEV(bd_inode)))
1787 return -EPERM;
Al Viro5f380c72015-04-07 11:28:12 -04001788
Al Viro7ec7b942015-04-07 11:35:14 -04001789 if (!iov_iter_count(from))
Al Viro5f380c72015-04-07 11:28:12 -04001790 return 0;
1791
Al Viro7ec7b942015-04-07 11:35:14 -04001792 if (iocb->ki_pos >= size)
1793 return -ENOSPC;
1794
1795 iov_iter_truncate(from, size - iocb->ki_pos);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001796
Jianpeng Ma53362a02012-08-02 09:50:39 +02001797 blk_start_plug(&plug);
Al Viro1456c0a2014-04-03 03:21:50 -04001798 ret = __generic_file_write_iter(iocb, from);
Christoph Hellwige2592212016-04-07 08:52:01 -07001799 if (ret > 0)
1800 ret = generic_write_sync(iocb, ret);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001801 blk_finish_plug(&plug);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001802 return ret;
1803}
Al Viro1456c0a2014-04-03 03:21:50 -04001804EXPORT_SYMBOL_GPL(blkdev_write_iter);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001805
David Jefferyb2de5252014-09-29 10:21:10 -04001806ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001807{
1808 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001809 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001810 loff_t size = i_size_read(bd_inode);
Al Viroa8860382014-04-02 20:02:21 -04001811 loff_t pos = iocb->ki_pos;
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001812
1813 if (pos >= size)
1814 return 0;
1815
1816 size -= pos;
Al Viroa8860382014-04-02 20:02:21 -04001817 iov_iter_truncate(to, size);
1818 return generic_file_read_iter(iocb, to);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001819}
David Jefferyb2de5252014-09-29 10:21:10 -04001820EXPORT_SYMBOL_GPL(blkdev_read_iter);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001821
Christoph Hellwigeef99382009-08-20 17:43:41 +02001822/*
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001823 * Try to release a page associated with block device when the system
1824 * is under memory pressure.
1825 */
1826static int blkdev_releasepage(struct page *page, gfp_t wait)
1827{
1828 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1829
1830 if (super && super->s_op->bdev_try_to_free_page)
1831 return super->s_op->bdev_try_to_free_page(super, page, wait);
1832
1833 return try_to_free_buffers(page);
1834}
1835
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001836static int blkdev_writepages(struct address_space *mapping,
1837 struct writeback_control *wbc)
1838{
1839 if (dax_mapping(mapping)) {
1840 struct block_device *bdev = I_BDEV(mapping->host);
1841
1842 return dax_writeback_mapping_range(mapping, bdev, wbc);
1843 }
1844 return generic_writepages(mapping, wbc);
1845}
1846
Adrian Bunk4c54ac62008-02-18 13:48:31 +01001847static const struct address_space_operations def_blk_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 .readpage = blkdev_readpage,
Akinobu Mita447f05b2014-10-09 15:26:58 -07001849 .readpages = blkdev_readpages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 .writepage = blkdev_writepage,
Nick Piggin6272b5a2007-10-16 01:25:04 -07001851 .write_begin = blkdev_write_begin,
1852 .write_end = blkdev_write_end,
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001853 .writepages = blkdev_writepages,
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001854 .releasepage = blkdev_releasepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 .direct_IO = blkdev_direct_IO,
Mel Gormanb4597222013-07-03 15:02:05 -07001856 .is_dirty_writeback = buffer_check_dirty_writeback,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857};
1858
Darrick J. Wong25f4c412016-10-11 13:51:11 -07001859#define BLKDEV_FALLOC_FL_SUPPORTED \
1860 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1861 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
1862
1863static long blkdev_fallocate(struct file *file, int mode, loff_t start,
1864 loff_t len)
1865{
1866 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
1867 struct request_queue *q = bdev_get_queue(bdev);
1868 struct address_space *mapping;
1869 loff_t end = start + len - 1;
1870 loff_t isize;
1871 int error;
1872
1873 /* Fail if we don't recognize the flags. */
1874 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
1875 return -EOPNOTSUPP;
1876
1877 /* Don't go off the end of the device. */
1878 isize = i_size_read(bdev->bd_inode);
1879 if (start >= isize)
1880 return -EINVAL;
1881 if (end >= isize) {
1882 if (mode & FALLOC_FL_KEEP_SIZE) {
1883 len = isize - start;
1884 end = start + len - 1;
1885 } else
1886 return -EINVAL;
1887 }
1888
1889 /*
1890 * Don't allow IO that isn't aligned to logical block size.
1891 */
1892 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
1893 return -EINVAL;
1894
1895 /* Invalidate the page cache, including dirty pages. */
1896 mapping = bdev->bd_inode->i_mapping;
1897 truncate_inode_pages_range(mapping, start, end);
1898
1899 switch (mode) {
1900 case FALLOC_FL_ZERO_RANGE:
1901 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
1902 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
1903 GFP_KERNEL, false);
1904 break;
1905 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
1906 /* Only punch if the device can do zeroing discard. */
1907 if (!blk_queue_discard(q) || !q->limits.discard_zeroes_data)
1908 return -EOPNOTSUPP;
1909 error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
1910 GFP_KERNEL, 0);
1911 break;
1912 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
1913 if (!blk_queue_discard(q))
1914 return -EOPNOTSUPP;
1915 error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
1916 GFP_KERNEL, 0);
1917 break;
1918 default:
1919 return -EOPNOTSUPP;
1920 }
1921 if (error)
1922 return error;
1923
1924 /*
1925 * Invalidate again; if someone wandered in and dirtied a page,
1926 * the caller will be given -EBUSY. The third argument is
1927 * inclusive, so the rounding here is safe.
1928 */
1929 return invalidate_inode_pages2_range(mapping,
1930 start >> PAGE_SHIFT,
1931 end >> PAGE_SHIFT);
1932}
1933
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001934const struct file_operations def_blk_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 .open = blkdev_open,
1936 .release = blkdev_close,
1937 .llseek = block_llseek,
Al Viroa8860382014-04-02 20:02:21 -04001938 .read_iter = blkdev_read_iter,
Al Viro1456c0a2014-04-03 03:21:50 -04001939 .write_iter = blkdev_write_iter,
Dan Williamsacc93d32016-05-07 11:40:28 -07001940 .mmap = generic_file_mmap,
Andrew Mortonb1dd3b22010-04-06 14:35:00 -07001941 .fsync = blkdev_fsync,
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001942 .unlocked_ioctl = block_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943#ifdef CONFIG_COMPAT
1944 .compat_ioctl = compat_blkdev_ioctl,
1945#endif
Linus Torvalds1e8b3332012-11-29 10:49:50 -08001946 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001947 .splice_write = iter_file_splice_write,
Darrick J. Wong25f4c412016-10-11 13:51:11 -07001948 .fallocate = blkdev_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949};
1950
1951int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1952{
1953 int res;
1954 mm_segment_t old_fs = get_fs();
1955 set_fs(KERNEL_DS);
Al Viro56b26ad2008-09-19 03:17:36 -04001956 res = blkdev_ioctl(bdev, 0, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 set_fs(old_fs);
1958 return res;
1959}
1960
1961EXPORT_SYMBOL(ioctl_by_bdev);
1962
1963/**
1964 * lookup_bdev - lookup a struct block_device by name
Randy Dunlap94e29592009-01-06 14:41:15 -08001965 * @pathname: special file representing the block device
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 *
Randy Dunlap57d1b532008-10-09 10:42:38 +02001967 * Get a reference to the blockdevice at @pathname in the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 * namespace if possible and return it. Return ERR_PTR(error)
1969 * otherwise.
1970 */
Al Viro421748e2008-08-02 01:04:36 -04001971struct block_device *lookup_bdev(const char *pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972{
1973 struct block_device *bdev;
1974 struct inode *inode;
Al Viro421748e2008-08-02 01:04:36 -04001975 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 int error;
1977
Al Viro421748e2008-08-02 01:04:36 -04001978 if (!pathname || !*pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 return ERR_PTR(-EINVAL);
1980
Al Viro421748e2008-08-02 01:04:36 -04001981 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 if (error)
1983 return ERR_PTR(error);
1984
David Howellsbb6687342015-03-17 22:26:21 +00001985 inode = d_backing_inode(path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 error = -ENOTBLK;
1987 if (!S_ISBLK(inode->i_mode))
1988 goto fail;
1989 error = -EACCES;
Eric W. Biedermana2982cc2016-06-09 15:34:02 -05001990 if (!may_open_dev(&path))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 goto fail;
1992 error = -ENOMEM;
1993 bdev = bd_acquire(inode);
1994 if (!bdev)
1995 goto fail;
1996out:
Al Viro421748e2008-08-02 01:04:36 -04001997 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 return bdev;
1999fail:
2000 bdev = ERR_PTR(error);
2001 goto out;
2002}
Al Virod5686b42008-08-01 05:00:11 -04002003EXPORT_SYMBOL(lookup_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
NeilBrown93b270f2011-02-24 17:25:47 +11002005int __invalidate_device(struct block_device *bdev, bool kill_dirty)
David Howellsb71e8a42006-08-29 19:06:11 +01002006{
2007 struct super_block *sb = get_super(bdev);
2008 int res = 0;
2009
2010 if (sb) {
2011 /*
2012 * no need to lock the super, get_super holds the
2013 * read mutex so the filesystem cannot go away
2014 * under us (->put_super runs with the write lock
2015 * hold).
2016 */
2017 shrink_dcache_sb(sb);
NeilBrown93b270f2011-02-24 17:25:47 +11002018 res = invalidate_inodes(sb, kill_dirty);
David Howellsb71e8a42006-08-29 19:06:11 +01002019 drop_super(sb);
2020 }
Peter Zijlstraf98393a2007-05-06 14:49:54 -07002021 invalidate_bdev(bdev);
David Howellsb71e8a42006-08-29 19:06:11 +01002022 return res;
2023}
2024EXPORT_SYMBOL(__invalidate_device);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002025
2026void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
2027{
2028 struct inode *inode, *old_inode = NULL;
2029
Dave Chinner74278da2015-03-04 12:37:22 -05002030 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002031 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
2032 struct address_space *mapping = inode->i_mapping;
2033
2034 spin_lock(&inode->i_lock);
2035 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
2036 mapping->nrpages == 0) {
2037 spin_unlock(&inode->i_lock);
2038 continue;
2039 }
2040 __iget(inode);
2041 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -05002042 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002043 /*
2044 * We hold a reference to 'inode' so it couldn't have been
2045 * removed from s_inodes list while we dropped the
Dave Chinner74278da2015-03-04 12:37:22 -05002046 * s_inode_list_lock We cannot iput the inode now as we can
Jan Kara5c0d6b62012-07-03 16:45:31 +02002047 * be holding the last reference and we cannot iput it under
Dave Chinner74278da2015-03-04 12:37:22 -05002048 * s_inode_list_lock. So we keep the reference and iput it
Jan Kara5c0d6b62012-07-03 16:45:31 +02002049 * later.
2050 */
2051 iput(old_inode);
2052 old_inode = inode;
2053
2054 func(I_BDEV(inode), arg);
2055
Dave Chinner74278da2015-03-04 12:37:22 -05002056 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002057 }
Dave Chinner74278da2015-03-04 12:37:22 -05002058 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002059 iput(old_inode);
2060}