blob: 6fe49b9349ea465db443080b3f535031b44fdfc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fcntl.h>
11#include <linux/slab.h>
12#include <linux/kmod.h>
13#include <linux/major.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/smp_lock.h>
15#include <linux/highmem.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/blkpg.h>
19#include <linux/buffer_head.h>
David Howells811d7362006-08-29 19:06:09 +010020#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mpage.h>
22#include <linux/mount.h>
23#include <linux/uio.h>
24#include <linux/namei.h>
25#include <asm/uaccess.h>
David Howells07f3f052006-09-30 20:52:18 +020026#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28struct bdev_inode {
29 struct block_device bdev;
30 struct inode vfs_inode;
31};
32
33static inline struct bdev_inode *BDEV_I(struct inode *inode)
34{
35 return container_of(inode, struct bdev_inode, vfs_inode);
36}
37
38inline struct block_device *I_BDEV(struct inode *inode)
39{
40 return &BDEV_I(inode)->bdev;
41}
42
43EXPORT_SYMBOL(I_BDEV);
44
45static sector_t max_block(struct block_device *bdev)
46{
47 sector_t retval = ~((sector_t)0);
48 loff_t sz = i_size_read(bdev->bd_inode);
49
50 if (sz) {
51 unsigned int size = block_size(bdev);
52 unsigned int sizebits = blksize_bits(size);
53 retval = (sz >> sizebits);
54 }
55 return retval;
56}
57
Peter Zijlstraf9a14392007-05-06 14:49:55 -070058/* Kill _all_ buffers and pagecache , dirty or not.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059static void kill_bdev(struct block_device *bdev)
60{
Peter Zijlstraf9a14392007-05-06 14:49:55 -070061 if (bdev->bd_inode->i_mapping->nrpages == 0)
62 return;
63 invalidate_bh_lrus();
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
65}
66
67int set_blocksize(struct block_device *bdev, int size)
68{
69 /* Size must be a power of two, and between 512 and PAGE_SIZE */
70 if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
71 return -EINVAL;
72
73 /* Size cannot be smaller than the size supported by the device */
74 if (size < bdev_hardsect_size(bdev))
75 return -EINVAL;
76
77 /* Don't change the size if it is same as current */
78 if (bdev->bd_block_size != size) {
79 sync_blockdev(bdev);
80 bdev->bd_block_size = size;
81 bdev->bd_inode->i_blkbits = blksize_bits(size);
82 kill_bdev(bdev);
83 }
84 return 0;
85}
86
87EXPORT_SYMBOL(set_blocksize);
88
89int sb_set_blocksize(struct super_block *sb, int size)
90{
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 if (set_blocksize(sb->s_bdev, size))
92 return 0;
93 /* If we get here, we know size is power of two
94 * and it's value is between 512 and PAGE_SIZE */
95 sb->s_blocksize = size;
Coywolf Qi Hunt38885bd2006-03-24 03:18:05 -080096 sb->s_blocksize_bits = blksize_bits(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 return sb->s_blocksize;
98}
99
100EXPORT_SYMBOL(sb_set_blocksize);
101
102int sb_min_blocksize(struct super_block *sb, int size)
103{
104 int minsize = bdev_hardsect_size(sb->s_bdev);
105 if (size < minsize)
106 size = minsize;
107 return sb_set_blocksize(sb, size);
108}
109
110EXPORT_SYMBOL(sb_min_blocksize);
111
112static int
113blkdev_get_block(struct inode *inode, sector_t iblock,
114 struct buffer_head *bh, int create)
115{
116 if (iblock >= max_block(I_BDEV(inode))) {
117 if (create)
118 return -EIO;
119
120 /*
121 * for reads, we're just trying to fill a partial page.
122 * return a hole, they will have to call get_block again
123 * before they can fill it, and they will get -EIO at that
124 * time
125 */
126 return 0;
127 }
128 bh->b_bdev = I_BDEV(inode);
129 bh->b_blocknr = iblock;
130 set_buffer_mapped(bh);
131 return 0;
132}
133
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800134static int
135blkdev_get_blocks(struct inode *inode, sector_t iblock,
136 struct buffer_head *bh, int create)
137{
138 sector_t end_block = max_block(I_BDEV(inode));
139 unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
140
141 if ((iblock + max_blocks) > end_block) {
142 max_blocks = end_block - iblock;
143 if ((long)max_blocks <= 0) {
144 if (create)
145 return -EIO; /* write fully beyond EOF */
146 /*
147 * It is a read which is fully beyond EOF. We return
148 * a !buffer_mapped buffer
149 */
150 max_blocks = 0;
151 }
152 }
153
154 bh->b_bdev = I_BDEV(inode);
155 bh->b_blocknr = iblock;
156 bh->b_size = max_blocks << inode->i_blkbits;
157 if (max_blocks)
158 set_buffer_mapped(bh);
159 return 0;
160}
161
162static ssize_t
163blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
164 loff_t offset, unsigned long nr_segs)
165{
166 struct file *file = iocb->ki_filp;
167 struct inode *inode = file->f_mapping->host;
168
169 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
170 iov, offset, nr_segs, blkdev_get_blocks, NULL);
171}
172
173#if 0
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800174static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800176 struct kiocb *iocb = bio->bi_private;
177 atomic_t *bio_count = &iocb->ki_bio_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800179 if (bio_data_dir(bio) == READ)
180 bio_check_pages_dirty(bio);
181 else {
182 bio_release_pages(bio);
183 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 }
185
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800186 /* iocb->ki_nbytes stores error code from LLDD */
187 if (error)
188 iocb->ki_nbytes = -EIO;
189
190 if (atomic_dec_and_test(bio_count)) {
Andrew Morton790816d2007-01-22 20:40:35 -0800191 if ((long)iocb->ki_nbytes < 0)
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800192 aio_complete(iocb, iocb->ki_nbytes, 0);
193 else
194 aio_complete(iocb, iocb->ki_left, 0);
195 }
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return 0;
198}
199
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800200#define VEC_SIZE 16
201struct pvec {
202 unsigned short nr;
203 unsigned short idx;
204 struct page *page[VEC_SIZE];
205};
206
207#define PAGES_SPANNED(addr, len) \
208 (DIV_ROUND_UP((addr) + (len), PAGE_SIZE) - (addr) / PAGE_SIZE);
209
210/*
211 * get page pointer for user addr, we internally cache struct page array for
212 * (addr, count) range in pvec to avoid frequent call to get_user_pages. If
213 * internal page list is exhausted, a batch count of up to VEC_SIZE is used
214 * to get next set of page struct.
215 */
216static struct page *blk_get_page(unsigned long addr, size_t count, int rw,
217 struct pvec *pvec)
218{
219 int ret, nr_pages;
220 if (pvec->idx == pvec->nr) {
221 nr_pages = PAGES_SPANNED(addr, count);
222 nr_pages = min(nr_pages, VEC_SIZE);
223 down_read(&current->mm->mmap_sem);
224 ret = get_user_pages(current, current->mm, addr, nr_pages,
225 rw == READ, 0, pvec->page, NULL);
226 up_read(&current->mm->mmap_sem);
227 if (ret < 0)
228 return ERR_PTR(ret);
229 pvec->nr = ret;
230 pvec->idx = 0;
231 }
232 return pvec->page[pvec->idx++];
233}
234
Chen, Kenneth Wcda92052007-01-22 20:40:43 -0800235/* return a page back to pvec array */
236static void blk_unget_page(struct page *page, struct pvec *pvec)
237{
238 pvec->page[--pvec->idx] = page;
239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241static ssize_t
242blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800243 loff_t pos, unsigned long nr_segs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800245 struct inode *inode = iocb->ki_filp->f_mapping->host;
246 unsigned blkbits = blksize_bits(bdev_hardsect_size(I_BDEV(inode)));
247 unsigned blocksize_mask = (1 << blkbits) - 1;
248 unsigned long seg = 0; /* iov segment iterator */
249 unsigned long nvec; /* number of bio vec needed */
250 unsigned long cur_off; /* offset into current page */
251 unsigned long cur_len; /* I/O len of current page, up to PAGE_SIZE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800253 unsigned long addr; /* user iovec address */
254 size_t count; /* user iovec len */
255 size_t nbytes = iocb->ki_nbytes = iocb->ki_left; /* total xfer size */
256 loff_t size; /* size of block device */
257 struct bio *bio;
258 atomic_t *bio_count = &iocb->ki_bio_count;
259 struct page *page;
260 struct pvec pvec;
261
262 pvec.nr = 0;
263 pvec.idx = 0;
264
265 if (pos & blocksize_mask)
266 return -EINVAL;
267
268 size = i_size_read(inode);
269 if (pos + nbytes > size) {
270 nbytes = size - pos;
271 iocb->ki_left = nbytes;
272 }
273
274 /*
275 * check first non-zero iov alignment, the remaining
276 * iov alignment is checked inside bio loop below.
277 */
278 do {
279 addr = (unsigned long) iov[seg].iov_base;
280 count = min(iov[seg].iov_len, nbytes);
281 if (addr & blocksize_mask || count & blocksize_mask)
282 return -EINVAL;
283 } while (!count && ++seg < nr_segs);
284 atomic_set(bio_count, 1);
285
286 while (nbytes) {
287 /* roughly estimate number of bio vec needed */
288 nvec = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
289 nvec = max(nvec, nr_segs - seg);
290 nvec = min(nvec, (unsigned long) BIO_MAX_PAGES);
291
292 /* bio_alloc should not fail with GFP_KERNEL flag */
293 bio = bio_alloc(GFP_KERNEL, nvec);
294 bio->bi_bdev = I_BDEV(inode);
295 bio->bi_end_io = blk_end_aio;
296 bio->bi_private = iocb;
297 bio->bi_sector = pos >> blkbits;
298same_bio:
299 cur_off = addr & ~PAGE_MASK;
300 cur_len = PAGE_SIZE - cur_off;
301 if (count < cur_len)
302 cur_len = count;
303
304 page = blk_get_page(addr, count, rw, &pvec);
305 if (unlikely(IS_ERR(page)))
306 goto backout;
307
308 if (bio_add_page(bio, page, cur_len, cur_off)) {
309 pos += cur_len;
310 addr += cur_len;
311 count -= cur_len;
312 nbytes -= cur_len;
313
314 if (count)
315 goto same_bio;
316 while (++seg < nr_segs) {
317 addr = (unsigned long) iov[seg].iov_base;
318 count = iov[seg].iov_len;
319 if (!count)
320 continue;
321 if (unlikely(addr & blocksize_mask ||
322 count & blocksize_mask)) {
323 page = ERR_PTR(-EINVAL);
324 goto backout;
325 }
326 count = min(count, nbytes);
327 goto same_bio;
328 }
Chen, Kenneth Wcda92052007-01-22 20:40:43 -0800329 } else {
330 blk_unget_page(page, &pvec);
Chen, Kenneth We61c9012006-12-13 00:34:36 -0800331 }
332
333 /* bio is ready, submit it */
334 if (rw == READ)
335 bio_set_pages_dirty(bio);
336 atomic_inc(bio_count);
337 submit_bio(rw, bio);
338 }
339
340completion:
341 iocb->ki_left -= nbytes;
342 nbytes = iocb->ki_left;
343 iocb->ki_pos += nbytes;
344
345 blk_run_address_space(inode->i_mapping);
346 if (atomic_dec_and_test(bio_count))
347 aio_complete(iocb, nbytes, 0);
348
349 return -EIOCBQUEUED;
350
351backout:
352 /*
353 * back out nbytes count constructed so far for this bio,
354 * we will throw away current bio.
355 */
356 nbytes += bio->bi_size;
357 bio_release_pages(bio);
358 bio_put(bio);
359
360 /*
361 * if no bio was submmitted, return the error code.
362 * otherwise, proceed with pending I/O completion.
363 */
364 if (atomic_read(bio_count) == 1)
365 return PTR_ERR(page);
366 goto completion;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367}
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800368#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
371{
372 return block_write_full_page(page, blkdev_get_block, wbc);
373}
374
375static int blkdev_readpage(struct file * file, struct page * page)
376{
377 return block_read_full_page(page, blkdev_get_block);
378}
379
380static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
381{
382 return block_prepare_write(page, from, to, blkdev_get_block);
383}
384
385static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
386{
387 return block_commit_write(page, from, to);
388}
389
390/*
391 * private llseek:
Josef "Jeff" Sipek0f7fc9e2006-12-08 02:36:35 -0800392 * for a block special file file->f_path.dentry->d_inode->i_size is zero
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 * so we compute the size by hand (just as in block_read/write above)
394 */
395static loff_t block_llseek(struct file *file, loff_t offset, int origin)
396{
397 struct inode *bd_inode = file->f_mapping->host;
398 loff_t size;
399 loff_t retval;
400
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800401 mutex_lock(&bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 size = i_size_read(bd_inode);
403
404 switch (origin) {
405 case 2:
406 offset += size;
407 break;
408 case 1:
409 offset += file->f_pos;
410 }
411 retval = -EINVAL;
412 if (offset >= 0 && offset <= size) {
413 if (offset != file->f_pos) {
414 file->f_pos = offset;
415 }
416 retval = offset;
417 }
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800418 mutex_unlock(&bd_inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 return retval;
420}
421
422/*
423 * Filp is never NULL; the only case when ->fsync() is called with
424 * NULL first argument is nfsd_sync_dir() and that's not a directory.
425 */
426
427static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
428{
429 return sync_blockdev(I_BDEV(filp->f_mapping->host));
430}
431
432/*
433 * pseudo-fs
434 */
435
436static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
Christoph Lametere18b8902006-12-06 20:33:20 -0800437static struct kmem_cache * bdev_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
439static struct inode *bdev_alloc_inode(struct super_block *sb)
440{
Christoph Lametere94b1762006-12-06 20:33:17 -0800441 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 if (!ei)
443 return NULL;
444 return &ei->vfs_inode;
445}
446
447static void bdev_destroy_inode(struct inode *inode)
448{
449 struct bdev_inode *bdi = BDEV_I(inode);
450
451 bdi->bdev.bd_inode_backing_dev_info = NULL;
452 kmem_cache_free(bdev_cachep, bdi);
453}
454
Christoph Lametere18b8902006-12-06 20:33:20 -0800455static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456{
457 struct bdev_inode *ei = (struct bdev_inode *) foo;
458 struct block_device *bdev = &ei->bdev;
459
460 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
461 SLAB_CTOR_CONSTRUCTOR)
462 {
463 memset(bdev, 0, sizeof(*bdev));
Arjan van de Venc039e312006-03-23 03:00:28 -0800464 mutex_init(&bdev->bd_mutex);
David Chinnerf73ca1b2007-01-10 23:15:41 -0800465 sema_init(&bdev->bd_mount_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 INIT_LIST_HEAD(&bdev->bd_inodes);
467 INIT_LIST_HEAD(&bdev->bd_list);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800468#ifdef CONFIG_SYSFS
469 INIT_LIST_HEAD(&bdev->bd_holder_list);
470#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 inode_init_once(&ei->vfs_inode);
472 }
473}
474
475static inline void __bd_forget(struct inode *inode)
476{
477 list_del_init(&inode->i_devices);
478 inode->i_bdev = NULL;
479 inode->i_mapping = &inode->i_data;
480}
481
482static void bdev_clear_inode(struct inode *inode)
483{
484 struct block_device *bdev = &BDEV_I(inode)->bdev;
485 struct list_head *p;
486 spin_lock(&bdev_lock);
487 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
488 __bd_forget(list_entry(p, struct inode, i_devices));
489 }
490 list_del_init(&bdev->bd_list);
491 spin_unlock(&bdev_lock);
492}
493
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -0800494static const struct super_operations bdev_sops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 .statfs = simple_statfs,
496 .alloc_inode = bdev_alloc_inode,
497 .destroy_inode = bdev_destroy_inode,
498 .drop_inode = generic_delete_inode,
499 .clear_inode = bdev_clear_inode,
500};
501
David Howells454e2392006-06-23 02:02:57 -0700502static int bd_get_sb(struct file_system_type *fs_type,
503 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504{
David Howells454e2392006-06-23 02:02:57 -0700505 return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506}
507
508static struct file_system_type bd_type = {
509 .name = "bdev",
510 .get_sb = bd_get_sb,
511 .kill_sb = kill_anon_super,
512};
513
Eric Dumazetfa3536c2006-03-26 01:37:24 -0800514static struct vfsmount *bd_mnt __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515struct super_block *blockdev_superblock;
516
517void __init bdev_cache_init(void)
518{
519 int err;
520 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
Paul Jacksonfffb60f2006-03-24 03:16:06 -0800521 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
522 SLAB_MEM_SPREAD|SLAB_PANIC),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 init_once, NULL);
524 err = register_filesystem(&bd_type);
525 if (err)
526 panic("Cannot register bdev pseudo-fs");
527 bd_mnt = kern_mount(&bd_type);
528 err = PTR_ERR(bd_mnt);
529 if (IS_ERR(bd_mnt))
530 panic("Cannot create bdev pseudo-fs");
531 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
532}
533
534/*
535 * Most likely _very_ bad one - but then it's hardly critical for small
536 * /dev and can be fixed when somebody will need really large one.
537 * Keep in mind that it will be fed through icache hash function too.
538 */
539static inline unsigned long hash(dev_t dev)
540{
541 return MAJOR(dev)+MINOR(dev);
542}
543
544static int bdev_test(struct inode *inode, void *data)
545{
546 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
547}
548
549static int bdev_set(struct inode *inode, void *data)
550{
551 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
552 return 0;
553}
554
555static LIST_HEAD(all_bdevs);
556
557struct block_device *bdget(dev_t dev)
558{
559 struct block_device *bdev;
560 struct inode *inode;
561
562 inode = iget5_locked(bd_mnt->mnt_sb, hash(dev),
563 bdev_test, bdev_set, &dev);
564
565 if (!inode)
566 return NULL;
567
568 bdev = &BDEV_I(inode)->bdev;
569
570 if (inode->i_state & I_NEW) {
571 bdev->bd_contains = NULL;
572 bdev->bd_inode = inode;
573 bdev->bd_block_size = (1 << inode->i_blkbits);
574 bdev->bd_part_count = 0;
575 bdev->bd_invalidated = 0;
576 inode->i_mode = S_IFBLK;
577 inode->i_rdev = dev;
578 inode->i_bdev = bdev;
579 inode->i_data.a_ops = &def_blk_aops;
580 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
581 inode->i_data.backing_dev_info = &default_backing_dev_info;
582 spin_lock(&bdev_lock);
583 list_add(&bdev->bd_list, &all_bdevs);
584 spin_unlock(&bdev_lock);
585 unlock_new_inode(inode);
586 }
587 return bdev;
588}
589
590EXPORT_SYMBOL(bdget);
591
592long nr_blockdev_pages(void)
593{
594 struct list_head *p;
595 long ret = 0;
596 spin_lock(&bdev_lock);
597 list_for_each(p, &all_bdevs) {
598 struct block_device *bdev;
599 bdev = list_entry(p, struct block_device, bd_list);
600 ret += bdev->bd_inode->i_mapping->nrpages;
601 }
602 spin_unlock(&bdev_lock);
603 return ret;
604}
605
606void bdput(struct block_device *bdev)
607{
608 iput(bdev->bd_inode);
609}
610
611EXPORT_SYMBOL(bdput);
612
613static struct block_device *bd_acquire(struct inode *inode)
614{
615 struct block_device *bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 spin_lock(&bdev_lock);
618 bdev = inode->i_bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700619 if (bdev) {
620 atomic_inc(&bdev->bd_inode->i_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 spin_unlock(&bdev_lock);
622 return bdev;
623 }
624 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 bdev = bdget(inode->i_rdev);
627 if (bdev) {
628 spin_lock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700629 if (!inode->i_bdev) {
630 /*
631 * We take an additional bd_inode->i_count for inode,
632 * and it's released in clear_inode() of inode.
633 * So, we can access it via ->i_mapping always
634 * without igrab().
635 */
636 atomic_inc(&bdev->bd_inode->i_count);
637 inode->i_bdev = bdev;
638 inode->i_mapping = bdev->bd_inode->i_mapping;
639 list_add(&inode->i_devices, &bdev->bd_inodes);
640 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 spin_unlock(&bdev_lock);
642 }
643 return bdev;
644}
645
646/* Call when you free inode */
647
648void bd_forget(struct inode *inode)
649{
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700650 struct block_device *bdev = NULL;
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 spin_lock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700653 if (inode->i_bdev) {
654 if (inode->i_sb != blockdev_superblock)
655 bdev = inode->i_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 __bd_forget(inode);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700659
660 if (bdev)
661 iput(bdev->bd_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
664int bd_claim(struct block_device *bdev, void *holder)
665{
666 int res;
667 spin_lock(&bdev_lock);
668
669 /* first decide result */
670 if (bdev->bd_holder == holder)
671 res = 0; /* already a holder */
672 else if (bdev->bd_holder != NULL)
673 res = -EBUSY; /* held by someone else */
674 else if (bdev->bd_contains == bdev)
675 res = 0; /* is a whole device which isn't held */
676
677 else if (bdev->bd_contains->bd_holder == bd_claim)
678 res = 0; /* is a partition of a device that is being partitioned */
679 else if (bdev->bd_contains->bd_holder != NULL)
680 res = -EBUSY; /* is a partition of a held device */
681 else
682 res = 0; /* is a partition of an un-held device */
683
684 /* now impose change */
685 if (res==0) {
686 /* note that for a whole device bd_holders
687 * will be incremented twice, and bd_holder will
688 * be set to bd_claim before being set to holder
689 */
690 bdev->bd_contains->bd_holders ++;
691 bdev->bd_contains->bd_holder = bd_claim;
692 bdev->bd_holders++;
693 bdev->bd_holder = holder;
694 }
695 spin_unlock(&bdev_lock);
696 return res;
697}
698
699EXPORT_SYMBOL(bd_claim);
700
701void bd_release(struct block_device *bdev)
702{
703 spin_lock(&bdev_lock);
704 if (!--bdev->bd_contains->bd_holders)
705 bdev->bd_contains->bd_holder = NULL;
706 if (!--bdev->bd_holders)
707 bdev->bd_holder = NULL;
708 spin_unlock(&bdev_lock);
709}
710
711EXPORT_SYMBOL(bd_release);
712
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800713#ifdef CONFIG_SYSFS
714/*
715 * Functions for bd_claim_by_kobject / bd_release_from_kobject
716 *
717 * If a kobject is passed to bd_claim_by_kobject()
718 * and the kobject has a parent directory,
719 * following symlinks are created:
720 * o from the kobject to the claimed bdev
721 * o from "holders" directory of the bdev to the parent of the kobject
722 * bd_release_from_kobject() removes these symlinks.
723 *
724 * Example:
725 * If /dev/dm-0 maps to /dev/sda, kobject corresponding to
726 * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
727 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
728 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
729 */
730
731static struct kobject *bdev_get_kobj(struct block_device *bdev)
732{
733 if (bdev->bd_contains != bdev)
734 return kobject_get(&bdev->bd_part->kobj);
735 else
736 return kobject_get(&bdev->bd_disk->kobj);
737}
738
739static struct kobject *bdev_get_holder(struct block_device *bdev)
740{
741 if (bdev->bd_contains != bdev)
742 return kobject_get(bdev->bd_part->holder_dir);
743 else
744 return kobject_get(bdev->bd_disk->holder_dir);
745}
746
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700747static int add_symlink(struct kobject *from, struct kobject *to)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800748{
749 if (!from || !to)
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700750 return 0;
751 return sysfs_create_link(from, to, kobject_name(to));
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800752}
753
754static void del_symlink(struct kobject *from, struct kobject *to)
755{
756 if (!from || !to)
757 return;
758 sysfs_remove_link(from, kobject_name(to));
759}
760
761/*
762 * 'struct bd_holder' contains pointers to kobjects symlinked by
763 * bd_claim_by_kobject.
764 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
765 */
766struct bd_holder {
767 struct list_head list; /* chain of holders of the bdev */
768 int count; /* references from the holder */
769 struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */
770 struct kobject *hdev; /* e.g. "/block/dm-0" */
771 struct kobject *hdir; /* e.g. "/block/sda/holders" */
772 struct kobject *sdev; /* e.g. "/block/sda" */
773};
774
775/*
776 * Get references of related kobjects at once.
777 * Returns 1 on success. 0 on failure.
778 *
779 * Should call bd_holder_release_dirs() after successful use.
780 */
781static int bd_holder_grab_dirs(struct block_device *bdev,
782 struct bd_holder *bo)
783{
784 if (!bdev || !bo)
785 return 0;
786
787 bo->sdir = kobject_get(bo->sdir);
788 if (!bo->sdir)
789 return 0;
790
791 bo->hdev = kobject_get(bo->sdir->parent);
792 if (!bo->hdev)
793 goto fail_put_sdir;
794
795 bo->sdev = bdev_get_kobj(bdev);
796 if (!bo->sdev)
797 goto fail_put_hdev;
798
799 bo->hdir = bdev_get_holder(bdev);
800 if (!bo->hdir)
801 goto fail_put_sdev;
802
803 return 1;
804
805fail_put_sdev:
806 kobject_put(bo->sdev);
807fail_put_hdev:
808 kobject_put(bo->hdev);
809fail_put_sdir:
810 kobject_put(bo->sdir);
811
812 return 0;
813}
814
815/* Put references of related kobjects at once. */
816static void bd_holder_release_dirs(struct bd_holder *bo)
817{
818 kobject_put(bo->hdir);
819 kobject_put(bo->sdev);
820 kobject_put(bo->hdev);
821 kobject_put(bo->sdir);
822}
823
824static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
825{
826 struct bd_holder *bo;
827
828 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
829 if (!bo)
830 return NULL;
831
832 bo->count = 1;
833 bo->sdir = kobj;
834
835 return bo;
836}
837
838static void free_bd_holder(struct bd_holder *bo)
839{
840 kfree(bo);
841}
842
843/**
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500844 * find_bd_holder - find matching struct bd_holder from the block device
845 *
846 * @bdev: struct block device to be searched
847 * @bo: target struct bd_holder
848 *
849 * Returns matching entry with @bo in @bdev->bd_holder_list.
850 * If found, increment the reference count and return the pointer.
851 * If not found, returns NULL.
852 */
Andrew Morton36a561d2006-10-30 22:07:03 -0800853static struct bd_holder *find_bd_holder(struct block_device *bdev,
854 struct bd_holder *bo)
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500855{
856 struct bd_holder *tmp;
857
858 list_for_each_entry(tmp, &bdev->bd_holder_list, list)
859 if (tmp->sdir == bo->sdir) {
860 tmp->count++;
861 return tmp;
862 }
863
864 return NULL;
865}
866
867/**
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800868 * add_bd_holder - create sysfs symlinks for bd_claim() relationship
869 *
870 * @bdev: block device to be bd_claimed
871 * @bo: preallocated and initialized by alloc_bd_holder()
872 *
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500873 * Add @bo to @bdev->bd_holder_list, create symlinks.
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800874 *
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500875 * Returns 0 if symlinks are created.
876 * Returns -ve if something fails.
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800877 */
878static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
879{
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700880 int ret;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800881
882 if (!bo)
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700883 return -EINVAL;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800884
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800885 if (!bd_holder_grab_dirs(bdev, bo))
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700886 return -EBUSY;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800887
Andrew Morton4d7dd8f2006-09-29 01:58:56 -0700888 ret = add_symlink(bo->sdir, bo->sdev);
889 if (ret == 0) {
890 ret = add_symlink(bo->hdir, bo->hdev);
891 if (ret)
892 del_symlink(bo->sdir, bo->sdev);
893 }
894 if (ret == 0)
895 list_add_tail(&bo->list, &bdev->bd_holder_list);
896 return ret;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800897}
898
899/**
900 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
901 *
902 * @bdev: block device to be bd_claimed
903 * @kobj: holder's kobject
904 *
905 * If there is matching entry with @kobj in @bdev->bd_holder_list
906 * and no other bd_claim() from the same kobject,
907 * remove the struct bd_holder from the list, delete symlinks for it.
908 *
909 * Returns a pointer to the struct bd_holder when it's removed from the list
910 * and ready to be freed.
911 * Returns NULL if matching claim isn't found or there is other bd_claim()
912 * by the same kobject.
913 */
914static struct bd_holder *del_bd_holder(struct block_device *bdev,
915 struct kobject *kobj)
916{
917 struct bd_holder *bo;
918
919 list_for_each_entry(bo, &bdev->bd_holder_list, list) {
920 if (bo->sdir == kobj) {
921 bo->count--;
922 BUG_ON(bo->count < 0);
923 if (!bo->count) {
924 list_del(&bo->list);
925 del_symlink(bo->sdir, bo->sdev);
926 del_symlink(bo->hdir, bo->hdev);
927 bd_holder_release_dirs(bo);
928 return bo;
929 }
930 break;
931 }
932 }
933
934 return NULL;
935}
936
937/**
938 * bd_claim_by_kobject - bd_claim() with additional kobject signature
939 *
940 * @bdev: block device to be claimed
941 * @holder: holder's signature
942 * @kobj: holder's kobject
943 *
944 * Do bd_claim() and if it succeeds, create sysfs symlinks between
945 * the bdev and the holder's kobject.
946 * Use bd_release_from_kobject() when relesing the claimed bdev.
947 *
948 * Returns 0 on success. (same as bd_claim())
949 * Returns errno on failure.
950 */
951static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
952 struct kobject *kobj)
953{
954 int res;
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500955 struct bd_holder *bo, *found;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800956
957 if (!kobj)
958 return -EINVAL;
959
960 bo = alloc_bd_holder(kobj);
961 if (!bo)
962 return -ENOMEM;
963
Peter Zijlstra2e7b6512006-12-08 02:36:13 -0800964 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800965 res = bd_claim(bdev, holder);
Jun'ichi Nomurabcb55162006-10-30 16:23:45 -0500966 if (res == 0) {
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500967 found = find_bd_holder(bdev, bo);
968 if (found == NULL) {
969 res = add_bd_holder(bdev, bo);
970 if (res)
971 bd_release(bdev);
972 }
Jun'ichi Nomurabcb55162006-10-30 16:23:45 -0500973 }
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -0500974
975 if (res || found)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800976 free_bd_holder(bo);
Jun'ichi Nomurab4cf1b72006-03-27 01:18:00 -0800977 mutex_unlock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800978
979 return res;
980}
981
982/**
983 * bd_release_from_kobject - bd_release() with additional kobject signature
984 *
985 * @bdev: block device to be released
986 * @kobj: holder's kobject
987 *
988 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
989 */
990static void bd_release_from_kobject(struct block_device *bdev,
991 struct kobject *kobj)
992{
993 struct bd_holder *bo;
994
995 if (!kobj)
996 return;
997
Peter Zijlstra2e7b6512006-12-08 02:36:13 -0800998 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -0800999 bd_release(bdev);
1000 if ((bo = del_bd_holder(bdev, kobj)))
1001 free_bd_holder(bo);
Jun'ichi Nomurab4cf1b72006-03-27 01:18:00 -08001002 mutex_unlock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001003}
1004
1005/**
1006 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
1007 *
1008 * @bdev: block device to be claimed
1009 * @holder: holder's signature
1010 * @disk: holder's gendisk
1011 *
1012 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
1013 */
1014int bd_claim_by_disk(struct block_device *bdev, void *holder,
1015 struct gendisk *disk)
1016{
1017 return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
1018}
1019EXPORT_SYMBOL_GPL(bd_claim_by_disk);
1020
1021/**
1022 * bd_release_from_disk - wrapper function for bd_release_from_kobject()
1023 *
1024 * @bdev: block device to be claimed
1025 * @disk: holder's gendisk
1026 *
1027 * Call bd_release_from_kobject() and put @disk->slave_dir.
1028 */
1029void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
1030{
1031 bd_release_from_kobject(bdev, disk->slave_dir);
1032 kobject_put(disk->slave_dir);
1033}
1034EXPORT_SYMBOL_GPL(bd_release_from_disk);
1035#endif
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037/*
1038 * Tries to open block device by device number. Use it ONLY if you
1039 * really do not have anything better - i.e. when you are behind a
1040 * truly sucky interface and all you are given is a device number. _Never_
1041 * to be used for internal purposes. If you ever need it - reconsider
1042 * your API.
1043 */
1044struct block_device *open_by_devnum(dev_t dev, unsigned mode)
1045{
1046 struct block_device *bdev = bdget(dev);
1047 int err = -ENOMEM;
1048 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
1049 if (bdev)
1050 err = blkdev_get(bdev, mode, flags);
1051 return err ? ERR_PTR(err) : bdev;
1052}
1053
1054EXPORT_SYMBOL(open_by_devnum);
1055
1056/*
1057 * This routine checks whether a removable media has been changed,
1058 * and invalidates all buffer-cache-entries in that case. This
1059 * is a relatively slow routine, so we have to try to minimize using
1060 * it. Thus it is called only upon a 'mount' or 'open'. This
1061 * is the best way of combining speed and utility, I think.
1062 * People changing diskettes in the middle of an operation deserve
1063 * to lose :-)
1064 */
1065int check_disk_change(struct block_device *bdev)
1066{
1067 struct gendisk *disk = bdev->bd_disk;
1068 struct block_device_operations * bdops = disk->fops;
1069
1070 if (!bdops->media_changed)
1071 return 0;
1072 if (!bdops->media_changed(bdev->bd_disk))
1073 return 0;
1074
Christoph Hellwig2ef41632005-05-05 16:15:59 -07001075 if (__invalidate_device(bdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 printk("VFS: busy inodes on changed media.\n");
1077
1078 if (bdops->revalidate_disk)
1079 bdops->revalidate_disk(bdev->bd_disk);
1080 if (bdev->bd_disk->minors > 1)
1081 bdev->bd_invalidated = 1;
1082 return 1;
1083}
1084
1085EXPORT_SYMBOL(check_disk_change);
1086
1087void bd_set_size(struct block_device *bdev, loff_t size)
1088{
1089 unsigned bsize = bdev_hardsect_size(bdev);
1090
1091 bdev->bd_inode->i_size = size;
1092 while (bsize < PAGE_CACHE_SIZE) {
1093 if (size & bsize)
1094 break;
1095 bsize <<= 1;
1096 }
1097 bdev->bd_block_size = bsize;
1098 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1099}
1100EXPORT_SYMBOL(bd_set_size);
1101
NeilBrown37be4122006-12-08 02:36:16 -08001102static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags,
1103 int for_part);
Peter Zijlstrac48f70c2006-12-08 02:36:18 -08001104static int __blkdev_put(struct block_device *bdev, int for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001105
Peter Zijlstra6d740cd2007-02-20 13:58:18 -08001106/*
1107 * bd_mutex locking:
1108 *
1109 * mutex_lock(part->bd_mutex)
1110 * mutex_lock_nested(whole->bd_mutex, 1)
1111 */
1112
NeilBrown37be4122006-12-08 02:36:16 -08001113static int do_open(struct block_device *bdev, struct file *file, int for_part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114{
1115 struct module *owner = NULL;
1116 struct gendisk *disk;
1117 int ret = -ENXIO;
1118 int part;
1119
1120 file->f_mapping = bdev->bd_inode->i_mapping;
1121 lock_kernel();
1122 disk = get_gendisk(bdev->bd_dev, &part);
1123 if (!disk) {
1124 unlock_kernel();
1125 bdput(bdev);
1126 return ret;
1127 }
1128 owner = disk->fops->owner;
1129
NeilBrown6796bf52006-12-08 02:36:16 -08001130 mutex_lock_nested(&bdev->bd_mutex, for_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (!bdev->bd_openers) {
1132 bdev->bd_disk = disk;
1133 bdev->bd_contains = bdev;
1134 if (!part) {
1135 struct backing_dev_info *bdi;
1136 if (disk->fops->open) {
1137 ret = disk->fops->open(bdev->bd_inode, file);
1138 if (ret)
1139 goto out_first;
1140 }
1141 if (!bdev->bd_openers) {
1142 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1143 bdi = blk_get_backing_dev_info(bdev);
1144 if (bdi == NULL)
1145 bdi = &default_backing_dev_info;
1146 bdev->bd_inode->i_data.backing_dev_info = bdi;
1147 }
1148 if (bdev->bd_invalidated)
1149 rescan_partitions(disk, bdev);
1150 } else {
1151 struct hd_struct *p;
1152 struct block_device *whole;
1153 whole = bdget_disk(disk, 0);
1154 ret = -ENOMEM;
1155 if (!whole)
1156 goto out_first;
NeilBrown37be4122006-12-08 02:36:16 -08001157 BUG_ON(for_part);
1158 ret = __blkdev_get(whole, file->f_mode, file->f_flags, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 if (ret)
1160 goto out_first;
1161 bdev->bd_contains = whole;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 p = disk->part[part - 1];
1163 bdev->bd_inode->i_data.backing_dev_info =
1164 whole->bd_inode->i_data.backing_dev_info;
1165 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 ret = -ENXIO;
1167 goto out_first;
1168 }
1169 kobject_get(&p->kobj);
1170 bdev->bd_part = p;
1171 bd_set_size(bdev, (loff_t) p->nr_sects << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 }
1173 } else {
1174 put_disk(disk);
1175 module_put(owner);
1176 if (bdev->bd_contains == bdev) {
1177 if (bdev->bd_disk->fops->open) {
1178 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
1179 if (ret)
1180 goto out;
1181 }
1182 if (bdev->bd_invalidated)
1183 rescan_partitions(bdev->bd_disk, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
1185 }
1186 bdev->bd_openers++;
NeilBrown37be4122006-12-08 02:36:16 -08001187 if (for_part)
1188 bdev->bd_part_count++;
Arjan van de Venc039e312006-03-23 03:00:28 -08001189 mutex_unlock(&bdev->bd_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 unlock_kernel();
1191 return 0;
1192
1193out_first:
1194 bdev->bd_disk = NULL;
1195 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1196 if (bdev != bdev->bd_contains)
Peter Zijlstrac48f70c2006-12-08 02:36:18 -08001197 __blkdev_put(bdev->bd_contains, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 bdev->bd_contains = NULL;
1199 put_disk(disk);
1200 module_put(owner);
1201out:
Arjan van de Venc039e312006-03-23 03:00:28 -08001202 mutex_unlock(&bdev->bd_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 unlock_kernel();
1204 if (ret)
1205 bdput(bdev);
1206 return ret;
1207}
1208
NeilBrown37be4122006-12-08 02:36:16 -08001209static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags,
1210 int for_part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211{
1212 /*
1213 * This crockload is due to bad choice of ->open() type.
1214 * It will go away.
1215 * For now, block device ->open() routine must _not_
1216 * examine anything in 'inode' argument except ->i_rdev.
1217 */
1218 struct file fake_file = {};
1219 struct dentry fake_dentry = {};
1220 fake_file.f_mode = mode;
1221 fake_file.f_flags = flags;
Josef "Jeff" Sipek0f7fc9e2006-12-08 02:36:35 -08001222 fake_file.f_path.dentry = &fake_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 fake_dentry.d_inode = bdev->bd_inode;
1224
NeilBrown37be4122006-12-08 02:36:16 -08001225 return do_open(bdev, &fake_file, for_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226}
1227
NeilBrown37be4122006-12-08 02:36:16 -08001228int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
1229{
1230 return __blkdev_get(bdev, mode, flags, 0);
1231}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232EXPORT_SYMBOL(blkdev_get);
1233
1234static int blkdev_open(struct inode * inode, struct file * filp)
1235{
1236 struct block_device *bdev;
1237 int res;
1238
1239 /*
1240 * Preserve backwards compatibility and allow large file access
1241 * even if userspace doesn't ask for it explicitly. Some mkfs
1242 * binary needs it. We might want to drop this workaround
1243 * during an unstable branch.
1244 */
1245 filp->f_flags |= O_LARGEFILE;
1246
1247 bdev = bd_acquire(inode);
Pavel Emelianov6a2aae02006-10-28 10:38:33 -07001248 if (bdev == NULL)
1249 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
NeilBrown37be4122006-12-08 02:36:16 -08001251 res = do_open(bdev, filp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 if (res)
1253 return res;
1254
1255 if (!(filp->f_flags & O_EXCL) )
1256 return 0;
1257
1258 if (!(res = bd_claim(bdev, filp)))
1259 return 0;
1260
1261 blkdev_put(bdev);
1262 return res;
1263}
1264
NeilBrown37be4122006-12-08 02:36:16 -08001265static int __blkdev_put(struct block_device *bdev, int for_part)
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001266{
1267 int ret = 0;
1268 struct inode *bd_inode = bdev->bd_inode;
1269 struct gendisk *disk = bdev->bd_disk;
NeilBrown37be4122006-12-08 02:36:16 -08001270 struct block_device *victim = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001271
NeilBrown6796bf52006-12-08 02:36:16 -08001272 mutex_lock_nested(&bdev->bd_mutex, for_part);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001273 lock_kernel();
NeilBrown37be4122006-12-08 02:36:16 -08001274 if (for_part)
1275 bdev->bd_part_count--;
1276
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001277 if (!--bdev->bd_openers) {
1278 sync_blockdev(bdev);
1279 kill_bdev(bdev);
1280 }
1281 if (bdev->bd_contains == bdev) {
1282 if (disk->fops->release)
1283 ret = disk->fops->release(bd_inode, NULL);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001284 }
1285 if (!bdev->bd_openers) {
1286 struct module *owner = disk->fops->owner;
1287
1288 put_disk(disk);
1289 module_put(owner);
1290
1291 if (bdev->bd_contains != bdev) {
1292 kobject_put(&bdev->bd_part->kobj);
1293 bdev->bd_part = NULL;
1294 }
1295 bdev->bd_disk = NULL;
1296 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
NeilBrown37be4122006-12-08 02:36:16 -08001297 if (bdev != bdev->bd_contains)
1298 victim = bdev->bd_contains;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001299 bdev->bd_contains = NULL;
1300 }
1301 unlock_kernel();
1302 mutex_unlock(&bdev->bd_mutex);
1303 bdput(bdev);
NeilBrown37be4122006-12-08 02:36:16 -08001304 if (victim)
1305 __blkdev_put(victim, 1);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001306 return ret;
1307}
1308
NeilBrown37be4122006-12-08 02:36:16 -08001309int blkdev_put(struct block_device *bdev)
1310{
1311 return __blkdev_put(bdev, 0);
1312}
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001313EXPORT_SYMBOL(blkdev_put);
1314
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315static int blkdev_close(struct inode * inode, struct file * filp)
1316{
1317 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1318 if (bdev->bd_holder == filp)
1319 bd_release(bdev);
1320 return blkdev_put(bdev);
1321}
1322
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001323static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
1325 return blkdev_ioctl(file->f_mapping->host, file, cmd, arg);
1326}
1327
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001328const struct address_space_operations def_blk_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 .readpage = blkdev_readpage,
1330 .writepage = blkdev_writepage,
1331 .sync_page = block_sync_page,
1332 .prepare_write = blkdev_prepare_write,
1333 .commit_write = blkdev_commit_write,
1334 .writepages = generic_writepages,
1335 .direct_IO = blkdev_direct_IO,
1336};
1337
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001338const struct file_operations def_blk_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 .open = blkdev_open,
1340 .release = blkdev_close,
1341 .llseek = block_llseek,
Badari Pulavarty543ade12006-09-30 23:28:48 -07001342 .read = do_sync_read,
1343 .write = do_sync_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 .aio_read = generic_file_aio_read,
Badari Pulavarty027445c2006-09-30 23:28:46 -07001345 .aio_write = generic_file_aio_write_nolock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 .mmap = generic_file_mmap,
1347 .fsync = block_fsync,
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001348 .unlocked_ioctl = block_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349#ifdef CONFIG_COMPAT
1350 .compat_ioctl = compat_blkdev_ioctl,
1351#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 .sendfile = generic_file_sendfile,
Jens Axboe7f9c51f2006-05-01 19:59:32 +02001353 .splice_read = generic_file_splice_read,
1354 .splice_write = generic_file_splice_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355};
1356
1357int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1358{
1359 int res;
1360 mm_segment_t old_fs = get_fs();
1361 set_fs(KERNEL_DS);
1362 res = blkdev_ioctl(bdev->bd_inode, NULL, cmd, arg);
1363 set_fs(old_fs);
1364 return res;
1365}
1366
1367EXPORT_SYMBOL(ioctl_by_bdev);
1368
1369/**
1370 * lookup_bdev - lookup a struct block_device by name
1371 *
1372 * @path: special file representing the block device
1373 *
1374 * Get a reference to the blockdevice at @path in the current
1375 * namespace if possible and return it. Return ERR_PTR(error)
1376 * otherwise.
1377 */
1378struct block_device *lookup_bdev(const char *path)
1379{
1380 struct block_device *bdev;
1381 struct inode *inode;
1382 struct nameidata nd;
1383 int error;
1384
1385 if (!path || !*path)
1386 return ERR_PTR(-EINVAL);
1387
1388 error = path_lookup(path, LOOKUP_FOLLOW, &nd);
1389 if (error)
1390 return ERR_PTR(error);
1391
1392 inode = nd.dentry->d_inode;
1393 error = -ENOTBLK;
1394 if (!S_ISBLK(inode->i_mode))
1395 goto fail;
1396 error = -EACCES;
1397 if (nd.mnt->mnt_flags & MNT_NODEV)
1398 goto fail;
1399 error = -ENOMEM;
1400 bdev = bd_acquire(inode);
1401 if (!bdev)
1402 goto fail;
1403out:
1404 path_release(&nd);
1405 return bdev;
1406fail:
1407 bdev = ERR_PTR(error);
1408 goto out;
1409}
1410
1411/**
1412 * open_bdev_excl - open a block device by name and set it up for use
1413 *
1414 * @path: special file representing the block device
1415 * @flags: %MS_RDONLY for opening read-only
1416 * @holder: owner for exclusion
1417 *
1418 * Open the blockdevice described by the special file at @path, claim it
1419 * for the @holder.
1420 */
1421struct block_device *open_bdev_excl(const char *path, int flags, void *holder)
1422{
1423 struct block_device *bdev;
1424 mode_t mode = FMODE_READ;
1425 int error = 0;
1426
1427 bdev = lookup_bdev(path);
1428 if (IS_ERR(bdev))
1429 return bdev;
1430
1431 if (!(flags & MS_RDONLY))
1432 mode |= FMODE_WRITE;
1433 error = blkdev_get(bdev, mode, 0);
1434 if (error)
1435 return ERR_PTR(error);
1436 error = -EACCES;
1437 if (!(flags & MS_RDONLY) && bdev_read_only(bdev))
1438 goto blkdev_put;
1439 error = bd_claim(bdev, holder);
1440 if (error)
1441 goto blkdev_put;
1442
1443 return bdev;
1444
1445blkdev_put:
1446 blkdev_put(bdev);
1447 return ERR_PTR(error);
1448}
1449
1450EXPORT_SYMBOL(open_bdev_excl);
1451
1452/**
1453 * close_bdev_excl - release a blockdevice openen by open_bdev_excl()
1454 *
1455 * @bdev: blockdevice to close
1456 *
1457 * This is the counterpart to open_bdev_excl().
1458 */
1459void close_bdev_excl(struct block_device *bdev)
1460{
1461 bd_release(bdev);
1462 blkdev_put(bdev);
1463}
1464
1465EXPORT_SYMBOL(close_bdev_excl);
David Howellsb71e8a42006-08-29 19:06:11 +01001466
1467int __invalidate_device(struct block_device *bdev)
1468{
1469 struct super_block *sb = get_super(bdev);
1470 int res = 0;
1471
1472 if (sb) {
1473 /*
1474 * no need to lock the super, get_super holds the
1475 * read mutex so the filesystem cannot go away
1476 * under us (->put_super runs with the write lock
1477 * hold).
1478 */
1479 shrink_dcache_sb(sb);
1480 res = invalidate_inodes(sb);
1481 drop_super(sb);
1482 }
Peter Zijlstraf98393a2007-05-06 14:49:54 -07001483 invalidate_bdev(bdev);
David Howellsb71e8a42006-08-29 19:06:11 +01001484 return res;
1485}
1486EXPORT_SYMBOL(__invalidate_device);