blob: e57f1724db3e89242afb362dfcda99178a2c4015 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/inode.c
3 *
4 * (C) 1997 Linus Torvalds
5 */
6
7#include <linux/config.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/dcache.h>
11#include <linux/init.h>
12#include <linux/quotaops.h>
13#include <linux/slab.h>
14#include <linux/writeback.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/wait.h>
18#include <linux/hash.h>
19#include <linux/swap.h>
20#include <linux/security.h>
21#include <linux/pagemap.h>
22#include <linux/cdev.h>
23#include <linux/bootmem.h>
Robert Love0eeca282005-07-12 17:06:03 -040024#include <linux/inotify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/*
27 * This is needed for the following functions:
28 * - inode_has_buffers
29 * - invalidate_inode_buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * - invalidate_bdev
31 *
32 * FIXME: remove all knowledge of the buffer layer from this file
33 */
34#include <linux/buffer_head.h>
35
36/*
37 * New inode.c implementation.
38 *
39 * This implementation has the basic premise of trying
40 * to be extremely low-overhead and SMP-safe, yet be
41 * simple enough to be "obviously correct".
42 *
43 * Famous last words.
44 */
45
46/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
47
48/* #define INODE_PARANOIA 1 */
49/* #define INODE_DEBUG 1 */
50
51/*
52 * Inode lookup is no longer as critical as it used to be:
53 * most of the lookups are going to be through the dcache.
54 */
55#define I_HASHBITS i_hash_shift
56#define I_HASHMASK i_hash_mask
57
58static unsigned int i_hash_mask;
59static unsigned int i_hash_shift;
60
61/*
62 * Each inode can be on two separate lists. One is
63 * the hash list of the inode, used for lookups. The
64 * other linked list is the "type" list:
65 * "in_use" - valid inode, i_count > 0, i_nlink > 0
66 * "dirty" - as "in_use" but also dirty
67 * "unused" - valid inode, i_count = 0
68 *
69 * A "dirty" list is maintained for each super block,
70 * allowing for low-overhead inode sync() operations.
71 */
72
73LIST_HEAD(inode_in_use);
74LIST_HEAD(inode_unused);
75static struct hlist_head *inode_hashtable;
76
77/*
78 * A simple spinlock to protect the list manipulations.
79 *
80 * NOTE! You also have to own the lock if you change
81 * the i_state of an inode while it is in use..
82 */
83DEFINE_SPINLOCK(inode_lock);
84
85/*
86 * iprune_sem provides exclusion between the kswapd or try_to_free_pages
87 * icache shrinking path, and the umount path. Without this exclusion,
88 * by the time prune_icache calls iput for the inode whose pages it has
89 * been invalidating, or by the time it calls clear_inode & destroy_inode
90 * from its final dispose_list, the struct super_block they refer to
91 * (for inode->i_sb->s_op) may already have been freed and reused.
92 */
93DECLARE_MUTEX(iprune_sem);
94
95/*
96 * Statistics gathering..
97 */
98struct inodes_stat_t inodes_stat;
99
100static kmem_cache_t * inode_cachep;
101
102static struct inode *alloc_inode(struct super_block *sb)
103{
104 static struct address_space_operations empty_aops;
105 static struct inode_operations empty_iops;
106 static struct file_operations empty_fops;
107 struct inode *inode;
108
109 if (sb->s_op->alloc_inode)
110 inode = sb->s_op->alloc_inode(sb);
111 else
112 inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
113
114 if (inode) {
115 struct address_space * const mapping = &inode->i_data;
116
117 inode->i_sb = sb;
118 inode->i_blkbits = sb->s_blocksize_bits;
119 inode->i_flags = 0;
120 atomic_set(&inode->i_count, 1);
121 inode->i_op = &empty_iops;
122 inode->i_fop = &empty_fops;
123 inode->i_nlink = 1;
124 atomic_set(&inode->i_writecount, 0);
125 inode->i_size = 0;
126 inode->i_blocks = 0;
127 inode->i_bytes = 0;
128 inode->i_generation = 0;
129#ifdef CONFIG_QUOTA
130 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
131#endif
132 inode->i_pipe = NULL;
133 inode->i_bdev = NULL;
134 inode->i_cdev = NULL;
135 inode->i_rdev = 0;
136 inode->i_security = NULL;
137 inode->dirtied_when = 0;
138 if (security_inode_alloc(inode)) {
139 if (inode->i_sb->s_op->destroy_inode)
140 inode->i_sb->s_op->destroy_inode(inode);
141 else
142 kmem_cache_free(inode_cachep, (inode));
143 return NULL;
144 }
145
146 mapping->a_ops = &empty_aops;
147 mapping->host = inode;
148 mapping->flags = 0;
149 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
150 mapping->assoc_mapping = NULL;
151 mapping->backing_dev_info = &default_backing_dev_info;
152
153 /*
154 * If the block_device provides a backing_dev_info for client
155 * inodes then use that. Otherwise the inode share the bdev's
156 * backing_dev_info.
157 */
158 if (sb->s_bdev) {
159 struct backing_dev_info *bdi;
160
161 bdi = sb->s_bdev->bd_inode_backing_dev_info;
162 if (!bdi)
163 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
164 mapping->backing_dev_info = bdi;
165 }
166 memset(&inode->u, 0, sizeof(inode->u));
167 inode->i_mapping = mapping;
168 }
169 return inode;
170}
171
172void destroy_inode(struct inode *inode)
173{
174 if (inode_has_buffers(inode))
175 BUG();
176 security_inode_free(inode);
177 if (inode->i_sb->s_op->destroy_inode)
178 inode->i_sb->s_op->destroy_inode(inode);
179 else
180 kmem_cache_free(inode_cachep, (inode));
181}
182
183
184/*
185 * These are initializations that only need to be done
186 * once, because the fields are idempotent across use
187 * of the inode, so let the slab aware of that.
188 */
189void inode_init_once(struct inode *inode)
190{
191 memset(inode, 0, sizeof(*inode));
192 INIT_HLIST_NODE(&inode->i_hash);
193 INIT_LIST_HEAD(&inode->i_dentry);
194 INIT_LIST_HEAD(&inode->i_devices);
195 sema_init(&inode->i_sem, 1);
196 init_rwsem(&inode->i_alloc_sem);
197 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
198 rwlock_init(&inode->i_data.tree_lock);
199 spin_lock_init(&inode->i_data.i_mmap_lock);
200 INIT_LIST_HEAD(&inode->i_data.private_list);
201 spin_lock_init(&inode->i_data.private_lock);
202 INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
203 INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
204 spin_lock_init(&inode->i_lock);
205 i_size_ordered_init(inode);
Robert Love0eeca282005-07-12 17:06:03 -0400206#ifdef CONFIG_INOTIFY
207 INIT_LIST_HEAD(&inode->inotify_watches);
208 sema_init(&inode->inotify_sem, 1);
209#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
212EXPORT_SYMBOL(inode_init_once);
213
214static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
215{
216 struct inode * inode = (struct inode *) foo;
217
218 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
219 SLAB_CTOR_CONSTRUCTOR)
220 inode_init_once(inode);
221}
222
223/*
224 * inode_lock must be held
225 */
226void __iget(struct inode * inode)
227{
228 if (atomic_read(&inode->i_count)) {
229 atomic_inc(&inode->i_count);
230 return;
231 }
232 atomic_inc(&inode->i_count);
233 if (!(inode->i_state & (I_DIRTY|I_LOCK)))
234 list_move(&inode->i_list, &inode_in_use);
235 inodes_stat.nr_unused--;
236}
237
238/**
239 * clear_inode - clear an inode
240 * @inode: inode to clear
241 *
242 * This is called by the filesystem to tell us
243 * that the inode is no longer useful. We just
244 * terminate it with extreme prejudice.
245 */
246void clear_inode(struct inode *inode)
247{
248 might_sleep();
249 invalidate_inode_buffers(inode);
250
251 if (inode->i_data.nrpages)
252 BUG();
253 if (!(inode->i_state & I_FREEING))
254 BUG();
255 if (inode->i_state & I_CLEAR)
256 BUG();
257 wait_on_inode(inode);
258 DQUOT_DROP(inode);
259 if (inode->i_sb && inode->i_sb->s_op->clear_inode)
260 inode->i_sb->s_op->clear_inode(inode);
261 if (inode->i_bdev)
262 bd_forget(inode);
263 if (inode->i_cdev)
264 cd_forget(inode);
265 inode->i_state = I_CLEAR;
266}
267
268EXPORT_SYMBOL(clear_inode);
269
270/*
271 * dispose_list - dispose of the contents of a local list
272 * @head: the head of the list to free
273 *
274 * Dispose-list gets a local list with local inodes in it, so it doesn't
275 * need to worry about list corruption and SMP locks.
276 */
277static void dispose_list(struct list_head *head)
278{
279 int nr_disposed = 0;
280
281 while (!list_empty(head)) {
282 struct inode *inode;
283
284 inode = list_entry(head->next, struct inode, i_list);
285 list_del(&inode->i_list);
286
287 if (inode->i_data.nrpages)
288 truncate_inode_pages(&inode->i_data, 0);
289 clear_inode(inode);
Artem B. Bityuckiy4120db42005-07-12 13:58:12 -0700290
291 spin_lock(&inode_lock);
292 hlist_del_init(&inode->i_hash);
293 list_del_init(&inode->i_sb_list);
294 spin_unlock(&inode_lock);
295
296 wake_up_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 destroy_inode(inode);
298 nr_disposed++;
299 }
300 spin_lock(&inode_lock);
301 inodes_stat.nr_inodes -= nr_disposed;
302 spin_unlock(&inode_lock);
303}
304
305/*
306 * Invalidate all inodes for a device.
307 */
308static int invalidate_list(struct list_head *head, struct list_head *dispose)
309{
310 struct list_head *next;
311 int busy = 0, count = 0;
312
313 next = head->next;
314 for (;;) {
315 struct list_head * tmp = next;
316 struct inode * inode;
317
318 /*
319 * We can reschedule here without worrying about the list's
320 * consistency because the per-sb list of inodes must not
321 * change during umount anymore, and because iprune_sem keeps
322 * shrink_icache_memory() away.
323 */
324 cond_resched_lock(&inode_lock);
325
326 next = next->next;
327 if (tmp == head)
328 break;
329 inode = list_entry(tmp, struct inode, i_sb_list);
330 invalidate_inode_buffers(inode);
331 if (!atomic_read(&inode->i_count)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 list_move(&inode->i_list, dispose);
333 inode->i_state |= I_FREEING;
334 count++;
335 continue;
336 }
337 busy = 1;
338 }
339 /* only unused inodes may be cached with i_count zero */
340 inodes_stat.nr_unused -= count;
341 return busy;
342}
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344/**
345 * invalidate_inodes - discard the inodes on a device
346 * @sb: superblock
347 *
348 * Discard all of the inodes for a given superblock. If the discard
349 * fails because there are busy inodes then a non zero value is returned.
350 * If the discard is successful all the inodes have been discarded.
351 */
352int invalidate_inodes(struct super_block * sb)
353{
354 int busy;
355 LIST_HEAD(throw_away);
356
357 down(&iprune_sem);
358 spin_lock(&inode_lock);
Robert Love0eeca282005-07-12 17:06:03 -0400359 inotify_unmount_inodes(&sb->s_inodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 busy = invalidate_list(&sb->s_inodes, &throw_away);
361 spin_unlock(&inode_lock);
362
363 dispose_list(&throw_away);
364 up(&iprune_sem);
365
366 return busy;
367}
368
369EXPORT_SYMBOL(invalidate_inodes);
370
Christoph Hellwig2ef41632005-05-05 16:15:59 -0700371int __invalidate_device(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372{
Christoph Hellwig2ef41632005-05-05 16:15:59 -0700373 struct super_block *sb = get_super(bdev);
374 int res = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 if (sb) {
377 /*
378 * no need to lock the super, get_super holds the
379 * read semaphore so the filesystem cannot go away
380 * under us (->put_super runs with the write lock
381 * hold).
382 */
383 shrink_dcache_sb(sb);
384 res = invalidate_inodes(sb);
385 drop_super(sb);
386 }
387 invalidate_bdev(bdev, 0);
388 return res;
389}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390EXPORT_SYMBOL(__invalidate_device);
391
392static int can_unuse(struct inode *inode)
393{
394 if (inode->i_state)
395 return 0;
396 if (inode_has_buffers(inode))
397 return 0;
398 if (atomic_read(&inode->i_count))
399 return 0;
400 if (inode->i_data.nrpages)
401 return 0;
402 return 1;
403}
404
405/*
406 * Scan `goal' inodes on the unused list for freeable ones. They are moved to
407 * a temporary list and then are freed outside inode_lock by dispose_list().
408 *
409 * Any inodes which are pinned purely because of attached pagecache have their
410 * pagecache removed. We expect the final iput() on that inode to add it to
411 * the front of the inode_unused list. So look for it there and if the
412 * inode is still freeable, proceed. The right inode is found 99.9% of the
413 * time in testing on a 4-way.
414 *
415 * If the inode has metadata buffers attached to mapping->private_list then
416 * try to remove them.
417 */
418static void prune_icache(int nr_to_scan)
419{
420 LIST_HEAD(freeable);
421 int nr_pruned = 0;
422 int nr_scanned;
423 unsigned long reap = 0;
424
425 down(&iprune_sem);
426 spin_lock(&inode_lock);
427 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
428 struct inode *inode;
429
430 if (list_empty(&inode_unused))
431 break;
432
433 inode = list_entry(inode_unused.prev, struct inode, i_list);
434
435 if (inode->i_state || atomic_read(&inode->i_count)) {
436 list_move(&inode->i_list, &inode_unused);
437 continue;
438 }
439 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
440 __iget(inode);
441 spin_unlock(&inode_lock);
442 if (remove_inode_buffers(inode))
443 reap += invalidate_inode_pages(&inode->i_data);
444 iput(inode);
445 spin_lock(&inode_lock);
446
447 if (inode != list_entry(inode_unused.next,
448 struct inode, i_list))
449 continue; /* wrong inode or list_empty */
450 if (!can_unuse(inode))
451 continue;
452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 list_move(&inode->i_list, &freeable);
454 inode->i_state |= I_FREEING;
455 nr_pruned++;
456 }
457 inodes_stat.nr_unused -= nr_pruned;
458 spin_unlock(&inode_lock);
459
460 dispose_list(&freeable);
461 up(&iprune_sem);
462
463 if (current_is_kswapd())
464 mod_page_state(kswapd_inodesteal, reap);
465 else
466 mod_page_state(pginodesteal, reap);
467}
468
469/*
470 * shrink_icache_memory() will attempt to reclaim some unused inodes. Here,
471 * "unused" means that no dentries are referring to the inodes: the files are
472 * not open and the dcache references to those inodes have already been
473 * reclaimed.
474 *
475 * This function is passed the number of inodes to scan, and it returns the
476 * total number of remaining possibly-reclaimable inodes.
477 */
478static int shrink_icache_memory(int nr, unsigned int gfp_mask)
479{
480 if (nr) {
481 /*
482 * Nasty deadlock avoidance. We may hold various FS locks,
483 * and we don't want to recurse into the FS that called us
484 * in clear_inode() and friends..
485 */
486 if (!(gfp_mask & __GFP_FS))
487 return -1;
488 prune_icache(nr);
489 }
490 return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
491}
492
493static void __wait_on_freeing_inode(struct inode *inode);
494/*
495 * Called with the inode lock held.
496 * NOTE: we are not increasing the inode-refcount, you must call __iget()
497 * by hand after calling find_inode now! This simplifies iunique and won't
498 * add any additional branch in the common code.
499 */
500static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data)
501{
502 struct hlist_node *node;
503 struct inode * inode = NULL;
504
505repeat:
506 hlist_for_each (node, head) {
507 inode = hlist_entry(node, struct inode, i_hash);
508 if (inode->i_sb != sb)
509 continue;
510 if (!test(inode, data))
511 continue;
Alexander Viro991114c2005-06-23 00:09:01 -0700512 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 __wait_on_freeing_inode(inode);
514 goto repeat;
515 }
516 break;
517 }
518 return node ? inode : NULL;
519}
520
521/*
522 * find_inode_fast is the fast path version of find_inode, see the comment at
523 * iget_locked for details.
524 */
525static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino)
526{
527 struct hlist_node *node;
528 struct inode * inode = NULL;
529
530repeat:
531 hlist_for_each (node, head) {
532 inode = hlist_entry(node, struct inode, i_hash);
533 if (inode->i_ino != ino)
534 continue;
535 if (inode->i_sb != sb)
536 continue;
Alexander Viro991114c2005-06-23 00:09:01 -0700537 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 __wait_on_freeing_inode(inode);
539 goto repeat;
540 }
541 break;
542 }
543 return node ? inode : NULL;
544}
545
546/**
547 * new_inode - obtain an inode
548 * @sb: superblock
549 *
550 * Allocates a new inode for given superblock.
551 */
552struct inode *new_inode(struct super_block *sb)
553{
554 static unsigned long last_ino;
555 struct inode * inode;
556
557 spin_lock_prefetch(&inode_lock);
558
559 inode = alloc_inode(sb);
560 if (inode) {
561 spin_lock(&inode_lock);
562 inodes_stat.nr_inodes++;
563 list_add(&inode->i_list, &inode_in_use);
564 list_add(&inode->i_sb_list, &sb->s_inodes);
565 inode->i_ino = ++last_ino;
566 inode->i_state = 0;
567 spin_unlock(&inode_lock);
568 }
569 return inode;
570}
571
572EXPORT_SYMBOL(new_inode);
573
574void unlock_new_inode(struct inode *inode)
575{
576 /*
577 * This is special! We do not need the spinlock
578 * when clearing I_LOCK, because we're guaranteed
579 * that nobody else tries to do anything about the
580 * state of the inode when it is locked, as we
581 * just created it (so there can be no old holders
582 * that haven't tested I_LOCK).
583 */
584 inode->i_state &= ~(I_LOCK|I_NEW);
585 wake_up_inode(inode);
586}
587
588EXPORT_SYMBOL(unlock_new_inode);
589
590/*
591 * This is called without the inode lock held.. Be careful.
592 *
593 * We no longer cache the sb_flags in i_flags - see fs.h
594 * -- rmk@arm.uk.linux.org
595 */
596static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
597{
598 struct inode * inode;
599
600 inode = alloc_inode(sb);
601 if (inode) {
602 struct inode * old;
603
604 spin_lock(&inode_lock);
605 /* We released the lock, so.. */
606 old = find_inode(sb, head, test, data);
607 if (!old) {
608 if (set(inode, data))
609 goto set_failed;
610
611 inodes_stat.nr_inodes++;
612 list_add(&inode->i_list, &inode_in_use);
613 list_add(&inode->i_sb_list, &sb->s_inodes);
614 hlist_add_head(&inode->i_hash, head);
615 inode->i_state = I_LOCK|I_NEW;
616 spin_unlock(&inode_lock);
617
618 /* Return the locked inode with I_NEW set, the
619 * caller is responsible for filling in the contents
620 */
621 return inode;
622 }
623
624 /*
625 * Uhhuh, somebody else created the same inode under
626 * us. Use the old inode instead of the one we just
627 * allocated.
628 */
629 __iget(old);
630 spin_unlock(&inode_lock);
631 destroy_inode(inode);
632 inode = old;
633 wait_on_inode(inode);
634 }
635 return inode;
636
637set_failed:
638 spin_unlock(&inode_lock);
639 destroy_inode(inode);
640 return NULL;
641}
642
643/*
644 * get_new_inode_fast is the fast path version of get_new_inode, see the
645 * comment at iget_locked for details.
646 */
647static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino)
648{
649 struct inode * inode;
650
651 inode = alloc_inode(sb);
652 if (inode) {
653 struct inode * old;
654
655 spin_lock(&inode_lock);
656 /* We released the lock, so.. */
657 old = find_inode_fast(sb, head, ino);
658 if (!old) {
659 inode->i_ino = ino;
660 inodes_stat.nr_inodes++;
661 list_add(&inode->i_list, &inode_in_use);
662 list_add(&inode->i_sb_list, &sb->s_inodes);
663 hlist_add_head(&inode->i_hash, head);
664 inode->i_state = I_LOCK|I_NEW;
665 spin_unlock(&inode_lock);
666
667 /* Return the locked inode with I_NEW set, the
668 * caller is responsible for filling in the contents
669 */
670 return inode;
671 }
672
673 /*
674 * Uhhuh, somebody else created the same inode under
675 * us. Use the old inode instead of the one we just
676 * allocated.
677 */
678 __iget(old);
679 spin_unlock(&inode_lock);
680 destroy_inode(inode);
681 inode = old;
682 wait_on_inode(inode);
683 }
684 return inode;
685}
686
687static inline unsigned long hash(struct super_block *sb, unsigned long hashval)
688{
689 unsigned long tmp;
690
691 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
692 L1_CACHE_BYTES;
693 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
694 return tmp & I_HASHMASK;
695}
696
697/**
698 * iunique - get a unique inode number
699 * @sb: superblock
700 * @max_reserved: highest reserved inode number
701 *
702 * Obtain an inode number that is unique on the system for a given
703 * superblock. This is used by file systems that have no natural
704 * permanent inode numbering system. An inode number is returned that
705 * is higher than the reserved limit but unique.
706 *
707 * BUGS:
708 * With a large number of inodes live on the file system this function
709 * currently becomes quite slow.
710 */
711ino_t iunique(struct super_block *sb, ino_t max_reserved)
712{
713 static ino_t counter;
714 struct inode *inode;
715 struct hlist_head * head;
716 ino_t res;
717 spin_lock(&inode_lock);
718retry:
719 if (counter > max_reserved) {
720 head = inode_hashtable + hash(sb,counter);
721 res = counter++;
722 inode = find_inode_fast(sb, head, res);
723 if (!inode) {
724 spin_unlock(&inode_lock);
725 return res;
726 }
727 } else {
728 counter = max_reserved + 1;
729 }
730 goto retry;
731
732}
733
734EXPORT_SYMBOL(iunique);
735
736struct inode *igrab(struct inode *inode)
737{
738 spin_lock(&inode_lock);
Alexander Viro991114c2005-06-23 00:09:01 -0700739 if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 __iget(inode);
741 else
742 /*
743 * Handle the case where s_op->clear_inode is not been
744 * called yet, and somebody is calling igrab
745 * while the inode is getting freed.
746 */
747 inode = NULL;
748 spin_unlock(&inode_lock);
749 return inode;
750}
751
752EXPORT_SYMBOL(igrab);
753
754/**
755 * ifind - internal function, you want ilookup5() or iget5().
756 * @sb: super block of file system to search
757 * @head: the head of the list to search
758 * @test: callback used for comparisons between inodes
759 * @data: opaque data pointer to pass to @test
Anton Altaparmakov88bd5122005-07-13 01:10:44 -0700760 * @wait: if true wait for the inode to be unlocked, if false do not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 *
762 * ifind() searches for the inode specified by @data in the inode
763 * cache. This is a generalized version of ifind_fast() for file systems where
764 * the inode number is not sufficient for unique identification of an inode.
765 *
766 * If the inode is in the cache, the inode is returned with an incremented
767 * reference count.
768 *
769 * Otherwise NULL is returned.
770 *
771 * Note, @test is called with the inode_lock held, so can't sleep.
772 */
773static inline struct inode *ifind(struct super_block *sb,
774 struct hlist_head *head, int (*test)(struct inode *, void *),
Anton Altaparmakov88bd5122005-07-13 01:10:44 -0700775 void *data, const int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776{
777 struct inode *inode;
778
779 spin_lock(&inode_lock);
780 inode = find_inode(sb, head, test, data);
781 if (inode) {
782 __iget(inode);
783 spin_unlock(&inode_lock);
Anton Altaparmakov88bd5122005-07-13 01:10:44 -0700784 if (likely(wait))
785 wait_on_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 return inode;
787 }
788 spin_unlock(&inode_lock);
789 return NULL;
790}
791
792/**
793 * ifind_fast - internal function, you want ilookup() or iget().
794 * @sb: super block of file system to search
795 * @head: head of the list to search
796 * @ino: inode number to search for
797 *
798 * ifind_fast() searches for the inode @ino in the inode cache. This is for
799 * file systems where the inode number is sufficient for unique identification
800 * of an inode.
801 *
802 * If the inode is in the cache, the inode is returned with an incremented
803 * reference count.
804 *
805 * Otherwise NULL is returned.
806 */
807static inline struct inode *ifind_fast(struct super_block *sb,
808 struct hlist_head *head, unsigned long ino)
809{
810 struct inode *inode;
811
812 spin_lock(&inode_lock);
813 inode = find_inode_fast(sb, head, ino);
814 if (inode) {
815 __iget(inode);
816 spin_unlock(&inode_lock);
817 wait_on_inode(inode);
818 return inode;
819 }
820 spin_unlock(&inode_lock);
821 return NULL;
822}
823
824/**
Anton Altaparmakov88bd5122005-07-13 01:10:44 -0700825 * ilookup5_nowait - search for an inode in the inode cache
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 * @sb: super block of file system to search
827 * @hashval: hash value (usually inode number) to search for
828 * @test: callback used for comparisons between inodes
829 * @data: opaque data pointer to pass to @test
830 *
831 * ilookup5() uses ifind() to search for the inode specified by @hashval and
832 * @data in the inode cache. This is a generalized version of ilookup() for
833 * file systems where the inode number is not sufficient for unique
834 * identification of an inode.
835 *
836 * If the inode is in the cache, the inode is returned with an incremented
Anton Altaparmakov88bd5122005-07-13 01:10:44 -0700837 * reference count. Note, the inode lock is not waited upon so you have to be
838 * very careful what you do with the returned inode. You probably should be
839 * using ilookup5() instead.
840 *
841 * Otherwise NULL is returned.
842 *
843 * Note, @test is called with the inode_lock held, so can't sleep.
844 */
845struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
846 int (*test)(struct inode *, void *), void *data)
847{
848 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
849
850 return ifind(sb, head, test, data, 0);
851}
852
853EXPORT_SYMBOL(ilookup5_nowait);
854
855/**
856 * ilookup5 - search for an inode in the inode cache
857 * @sb: super block of file system to search
858 * @hashval: hash value (usually inode number) to search for
859 * @test: callback used for comparisons between inodes
860 * @data: opaque data pointer to pass to @test
861 *
862 * ilookup5() uses ifind() to search for the inode specified by @hashval and
863 * @data in the inode cache. This is a generalized version of ilookup() for
864 * file systems where the inode number is not sufficient for unique
865 * identification of an inode.
866 *
867 * If the inode is in the cache, the inode lock is waited upon and the inode is
868 * returned with an incremented reference count.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 *
870 * Otherwise NULL is returned.
871 *
872 * Note, @test is called with the inode_lock held, so can't sleep.
873 */
874struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
875 int (*test)(struct inode *, void *), void *data)
876{
877 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
878
Anton Altaparmakov88bd5122005-07-13 01:10:44 -0700879 return ifind(sb, head, test, data, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880}
881
882EXPORT_SYMBOL(ilookup5);
883
884/**
885 * ilookup - search for an inode in the inode cache
886 * @sb: super block of file system to search
887 * @ino: inode number to search for
888 *
889 * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache.
890 * This is for file systems where the inode number is sufficient for unique
891 * identification of an inode.
892 *
893 * If the inode is in the cache, the inode is returned with an incremented
894 * reference count.
895 *
896 * Otherwise NULL is returned.
897 */
898struct inode *ilookup(struct super_block *sb, unsigned long ino)
899{
900 struct hlist_head *head = inode_hashtable + hash(sb, ino);
901
902 return ifind_fast(sb, head, ino);
903}
904
905EXPORT_SYMBOL(ilookup);
906
907/**
908 * iget5_locked - obtain an inode from a mounted file system
909 * @sb: super block of file system
910 * @hashval: hash value (usually inode number) to get
911 * @test: callback used for comparisons between inodes
912 * @set: callback used to initialize a new struct inode
913 * @data: opaque data pointer to pass to @test and @set
914 *
915 * This is iget() without the read_inode() portion of get_new_inode().
916 *
917 * iget5_locked() uses ifind() to search for the inode specified by @hashval
918 * and @data in the inode cache and if present it is returned with an increased
919 * reference count. This is a generalized version of iget_locked() for file
920 * systems where the inode number is not sufficient for unique identification
921 * of an inode.
922 *
923 * If the inode is not in cache, get_new_inode() is called to allocate a new
924 * inode and this is returned locked, hashed, and with the I_NEW flag set. The
925 * file system gets to fill it in before unlocking it via unlock_new_inode().
926 *
927 * Note both @test and @set are called with the inode_lock held, so can't sleep.
928 */
929struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
930 int (*test)(struct inode *, void *),
931 int (*set)(struct inode *, void *), void *data)
932{
933 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
934 struct inode *inode;
935
Anton Altaparmakov88bd5122005-07-13 01:10:44 -0700936 inode = ifind(sb, head, test, data, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 if (inode)
938 return inode;
939 /*
940 * get_new_inode() will do the right thing, re-trying the search
941 * in case it had to block at any point.
942 */
943 return get_new_inode(sb, head, test, set, data);
944}
945
946EXPORT_SYMBOL(iget5_locked);
947
948/**
949 * iget_locked - obtain an inode from a mounted file system
950 * @sb: super block of file system
951 * @ino: inode number to get
952 *
953 * This is iget() without the read_inode() portion of get_new_inode_fast().
954 *
955 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
956 * the inode cache and if present it is returned with an increased reference
957 * count. This is for file systems where the inode number is sufficient for
958 * unique identification of an inode.
959 *
960 * If the inode is not in cache, get_new_inode_fast() is called to allocate a
961 * new inode and this is returned locked, hashed, and with the I_NEW flag set.
962 * The file system gets to fill it in before unlocking it via
963 * unlock_new_inode().
964 */
965struct inode *iget_locked(struct super_block *sb, unsigned long ino)
966{
967 struct hlist_head *head = inode_hashtable + hash(sb, ino);
968 struct inode *inode;
969
970 inode = ifind_fast(sb, head, ino);
971 if (inode)
972 return inode;
973 /*
974 * get_new_inode_fast() will do the right thing, re-trying the search
975 * in case it had to block at any point.
976 */
977 return get_new_inode_fast(sb, head, ino);
978}
979
980EXPORT_SYMBOL(iget_locked);
981
982/**
983 * __insert_inode_hash - hash an inode
984 * @inode: unhashed inode
985 * @hashval: unsigned long value used to locate this object in the
986 * inode_hashtable.
987 *
988 * Add an inode to the inode hash for this superblock.
989 */
990void __insert_inode_hash(struct inode *inode, unsigned long hashval)
991{
992 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
993 spin_lock(&inode_lock);
994 hlist_add_head(&inode->i_hash, head);
995 spin_unlock(&inode_lock);
996}
997
998EXPORT_SYMBOL(__insert_inode_hash);
999
1000/**
1001 * remove_inode_hash - remove an inode from the hash
1002 * @inode: inode to unhash
1003 *
1004 * Remove an inode from the superblock.
1005 */
1006void remove_inode_hash(struct inode *inode)
1007{
1008 spin_lock(&inode_lock);
1009 hlist_del_init(&inode->i_hash);
1010 spin_unlock(&inode_lock);
1011}
1012
1013EXPORT_SYMBOL(remove_inode_hash);
1014
1015/*
1016 * Tell the filesystem that this inode is no longer of any interest and should
1017 * be completely destroyed.
1018 *
1019 * We leave the inode in the inode hash table until *after* the filesystem's
1020 * ->delete_inode completes. This ensures that an iget (such as nfsd might
1021 * instigate) will always find up-to-date information either in the hash or on
1022 * disk.
1023 *
1024 * I_FREEING is set so that no-one will take a new reference to the inode while
1025 * it is being deleted.
1026 */
1027void generic_delete_inode(struct inode *inode)
1028{
1029 struct super_operations *op = inode->i_sb->s_op;
1030
1031 list_del_init(&inode->i_list);
1032 list_del_init(&inode->i_sb_list);
1033 inode->i_state|=I_FREEING;
1034 inodes_stat.nr_inodes--;
1035 spin_unlock(&inode_lock);
1036
1037 if (inode->i_data.nrpages)
1038 truncate_inode_pages(&inode->i_data, 0);
1039
1040 security_inode_delete(inode);
1041
1042 if (op->delete_inode) {
1043 void (*delete)(struct inode *) = op->delete_inode;
1044 if (!is_bad_inode(inode))
1045 DQUOT_INIT(inode);
1046 /* s_op->delete_inode internally recalls clear_inode() */
1047 delete(inode);
1048 } else
1049 clear_inode(inode);
1050 spin_lock(&inode_lock);
1051 hlist_del_init(&inode->i_hash);
1052 spin_unlock(&inode_lock);
1053 wake_up_inode(inode);
1054 if (inode->i_state != I_CLEAR)
1055 BUG();
1056 destroy_inode(inode);
1057}
1058
1059EXPORT_SYMBOL(generic_delete_inode);
1060
1061static void generic_forget_inode(struct inode *inode)
1062{
1063 struct super_block *sb = inode->i_sb;
1064
1065 if (!hlist_unhashed(&inode->i_hash)) {
1066 if (!(inode->i_state & (I_DIRTY|I_LOCK)))
1067 list_move(&inode->i_list, &inode_unused);
1068 inodes_stat.nr_unused++;
Alexander Viro991114c2005-06-23 00:09:01 -07001069 if (!sb || (sb->s_flags & MS_ACTIVE)) {
1070 spin_unlock(&inode_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 return;
Alexander Viro991114c2005-06-23 00:09:01 -07001072 }
1073 inode->i_state |= I_WILL_FREE;
1074 spin_unlock(&inode_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 write_inode_now(inode, 1);
1076 spin_lock(&inode_lock);
Alexander Viro991114c2005-06-23 00:09:01 -07001077 inode->i_state &= ~I_WILL_FREE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 inodes_stat.nr_unused--;
1079 hlist_del_init(&inode->i_hash);
1080 }
1081 list_del_init(&inode->i_list);
1082 list_del_init(&inode->i_sb_list);
Alexander Viro991114c2005-06-23 00:09:01 -07001083 inode->i_state |= I_FREEING;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 inodes_stat.nr_inodes--;
1085 spin_unlock(&inode_lock);
1086 if (inode->i_data.nrpages)
1087 truncate_inode_pages(&inode->i_data, 0);
1088 clear_inode(inode);
1089 destroy_inode(inode);
1090}
1091
1092/*
1093 * Normal UNIX filesystem behaviour: delete the
1094 * inode when the usage count drops to zero, and
1095 * i_nlink is zero.
1096 */
Mark Fashehcb2c0232005-07-07 17:56:03 -07001097void generic_drop_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098{
1099 if (!inode->i_nlink)
1100 generic_delete_inode(inode);
1101 else
1102 generic_forget_inode(inode);
1103}
1104
Mark Fashehcb2c0232005-07-07 17:56:03 -07001105EXPORT_SYMBOL_GPL(generic_drop_inode);
1106
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107/*
1108 * Called when we're dropping the last reference
1109 * to an inode.
1110 *
1111 * Call the FS "drop()" function, defaulting to
1112 * the legacy UNIX filesystem behaviour..
1113 *
1114 * NOTE! NOTE! NOTE! We're called with the inode lock
1115 * held, and the drop function is supposed to release
1116 * the lock!
1117 */
1118static inline void iput_final(struct inode *inode)
1119{
1120 struct super_operations *op = inode->i_sb->s_op;
1121 void (*drop)(struct inode *) = generic_drop_inode;
1122
1123 if (op && op->drop_inode)
1124 drop = op->drop_inode;
1125 drop(inode);
1126}
1127
1128/**
1129 * iput - put an inode
1130 * @inode: inode to put
1131 *
1132 * Puts an inode, dropping its usage count. If the inode use count hits
1133 * zero, the inode is then freed and may also be destroyed.
1134 *
1135 * Consequently, iput() can sleep.
1136 */
1137void iput(struct inode *inode)
1138{
1139 if (inode) {
1140 struct super_operations *op = inode->i_sb->s_op;
1141
1142 BUG_ON(inode->i_state == I_CLEAR);
1143
1144 if (op && op->put_inode)
1145 op->put_inode(inode);
1146
1147 if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
1148 iput_final(inode);
1149 }
1150}
1151
1152EXPORT_SYMBOL(iput);
1153
1154/**
1155 * bmap - find a block number in a file
1156 * @inode: inode of file
1157 * @block: block to find
1158 *
1159 * Returns the block number on the device holding the inode that
1160 * is the disk block number for the block of the file requested.
1161 * That is, asked for block 4 of inode 1 the function will return the
1162 * disk block relative to the disk start that holds that block of the
1163 * file.
1164 */
1165sector_t bmap(struct inode * inode, sector_t block)
1166{
1167 sector_t res = 0;
1168 if (inode->i_mapping->a_ops->bmap)
1169 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1170 return res;
1171}
1172
1173EXPORT_SYMBOL(bmap);
1174
1175/**
1176 * update_atime - update the access time
1177 * @inode: inode accessed
1178 *
1179 * Update the accessed time on an inode and mark it for writeback.
1180 * This function automatically handles read only file systems and media,
1181 * as well as the "noatime" flag and inode specific "noatime" markers.
1182 */
1183void update_atime(struct inode *inode)
1184{
1185 struct timespec now;
1186
1187 if (IS_NOATIME(inode))
1188 return;
1189 if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode))
1190 return;
1191 if (IS_RDONLY(inode))
1192 return;
1193
1194 now = current_fs_time(inode->i_sb);
1195 if (!timespec_equal(&inode->i_atime, &now)) {
1196 inode->i_atime = now;
1197 mark_inode_dirty_sync(inode);
1198 } else {
1199 if (!timespec_equal(&inode->i_atime, &now))
1200 inode->i_atime = now;
1201 }
1202}
1203
1204EXPORT_SYMBOL(update_atime);
1205
1206/**
1207 * inode_update_time - update mtime and ctime time
1208 * @inode: inode accessed
1209 * @ctime_too: update ctime too
1210 *
1211 * Update the mtime time on an inode and mark it for writeback.
1212 * When ctime_too is specified update the ctime too.
1213 */
1214
1215void inode_update_time(struct inode *inode, int ctime_too)
1216{
1217 struct timespec now;
1218 int sync_it = 0;
1219
1220 if (IS_NOCMTIME(inode))
1221 return;
1222 if (IS_RDONLY(inode))
1223 return;
1224
1225 now = current_fs_time(inode->i_sb);
1226 if (!timespec_equal(&inode->i_mtime, &now))
1227 sync_it = 1;
1228 inode->i_mtime = now;
1229
1230 if (ctime_too) {
1231 if (!timespec_equal(&inode->i_ctime, &now))
1232 sync_it = 1;
1233 inode->i_ctime = now;
1234 }
1235 if (sync_it)
1236 mark_inode_dirty_sync(inode);
1237}
1238
1239EXPORT_SYMBOL(inode_update_time);
1240
1241int inode_needs_sync(struct inode *inode)
1242{
1243 if (IS_SYNC(inode))
1244 return 1;
1245 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1246 return 1;
1247 return 0;
1248}
1249
1250EXPORT_SYMBOL(inode_needs_sync);
1251
1252/*
1253 * Quota functions that want to walk the inode lists..
1254 */
1255#ifdef CONFIG_QUOTA
1256
1257/* Function back in dquot.c */
1258int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
1259
1260void remove_dquot_ref(struct super_block *sb, int type,
1261 struct list_head *tofree_head)
1262{
1263 struct inode *inode;
1264
1265 if (!sb->dq_op)
1266 return; /* nothing to do */
1267 spin_lock(&inode_lock); /* This lock is for inodes code */
1268
1269 /*
1270 * We don't have to lock against quota code - test IS_QUOTAINIT is
1271 * just for speedup...
1272 */
1273 list_for_each_entry(inode, &sb->s_inodes, i_sb_list)
1274 if (!IS_NOQUOTA(inode))
1275 remove_inode_dquot_ref(inode, type, tofree_head);
1276
1277 spin_unlock(&inode_lock);
1278}
1279
1280#endif
1281
1282int inode_wait(void *word)
1283{
1284 schedule();
1285 return 0;
1286}
1287
1288/*
Miklos Szeredi168a9fd2005-07-12 13:58:10 -07001289 * If we try to find an inode in the inode hash while it is being
1290 * deleted, we have to wait until the filesystem completes its
1291 * deletion before reporting that it isn't found. This function waits
1292 * until the deletion _might_ have completed. Callers are responsible
1293 * to recheck inode state.
1294 *
1295 * It doesn't matter if I_LOCK is not set initially, a call to
1296 * wake_up_inode() after removing from the hash list will DTRT.
1297 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 * This is called with inode_lock held.
1299 */
1300static void __wait_on_freeing_inode(struct inode *inode)
1301{
1302 wait_queue_head_t *wq;
1303 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 wq = bit_waitqueue(&inode->i_state, __I_LOCK);
1305 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1306 spin_unlock(&inode_lock);
1307 schedule();
1308 finish_wait(wq, &wait.wait);
1309 spin_lock(&inode_lock);
1310}
1311
1312void wake_up_inode(struct inode *inode)
1313{
1314 /*
1315 * Prevent speculative execution through spin_unlock(&inode_lock);
1316 */
1317 smp_mb();
1318 wake_up_bit(&inode->i_state, __I_LOCK);
1319}
1320
1321static __initdata unsigned long ihash_entries;
1322static int __init set_ihash_entries(char *str)
1323{
1324 if (!str)
1325 return 0;
1326 ihash_entries = simple_strtoul(str, &str, 0);
1327 return 1;
1328}
1329__setup("ihash_entries=", set_ihash_entries);
1330
1331/*
1332 * Initialize the waitqueues and inode hash table.
1333 */
1334void __init inode_init_early(void)
1335{
1336 int loop;
1337
1338 /* If hashes are distributed across NUMA nodes, defer
1339 * hash allocation until vmalloc space is available.
1340 */
1341 if (hashdist)
1342 return;
1343
1344 inode_hashtable =
1345 alloc_large_system_hash("Inode-cache",
1346 sizeof(struct hlist_head),
1347 ihash_entries,
1348 14,
1349 HASH_EARLY,
1350 &i_hash_shift,
1351 &i_hash_mask,
1352 0);
1353
1354 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1355 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1356}
1357
1358void __init inode_init(unsigned long mempages)
1359{
1360 int loop;
1361
1362 /* inode slab cache */
1363 inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
Andrea Arcangelie422fd22005-05-05 16:15:04 -07001364 0, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_once, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
1366
1367 /* Hash may have been set up in inode_init_early */
1368 if (!hashdist)
1369 return;
1370
1371 inode_hashtable =
1372 alloc_large_system_hash("Inode-cache",
1373 sizeof(struct hlist_head),
1374 ihash_entries,
1375 14,
1376 0,
1377 &i_hash_shift,
1378 &i_hash_mask,
1379 0);
1380
1381 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1382 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1383}
1384
1385void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1386{
1387 inode->i_mode = mode;
1388 if (S_ISCHR(mode)) {
1389 inode->i_fop = &def_chr_fops;
1390 inode->i_rdev = rdev;
1391 } else if (S_ISBLK(mode)) {
1392 inode->i_fop = &def_blk_fops;
1393 inode->i_rdev = rdev;
1394 } else if (S_ISFIFO(mode))
1395 inode->i_fop = &def_fifo_fops;
1396 else if (S_ISSOCK(mode))
1397 inode->i_fop = &bad_sock_fops;
1398 else
1399 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n",
1400 mode);
1401}
1402EXPORT_SYMBOL(init_special_inode);