blob: 5b943eb11d76b3781faddaeaf0252fc110dfe062 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * QNX4 file system, Linux implementation.
3 *
4 * Version : 0.2.1
5 *
6 * Using parts of the xiafs filesystem.
7 *
8 * History :
9 *
10 * 01-06-1998 by Richard Frowijn : first release.
11 * 20-06-1998 by Frank Denis : Linux 2.1.99+ support, boot signature, misc.
12 * 30-06-1998 by Frank Denis : first step to write inodes.
13 */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/fs.h>
21#include <linux/qnx4_fs.h>
22#include <linux/init.h>
23#include <linux/highuid.h>
24#include <linux/smp_lock.h>
25#include <linux/pagemap.h>
26#include <linux/buffer_head.h>
27#include <linux/vfs.h>
28#include <asm/uaccess.h>
29
30#define QNX4_VERSION 4
31#define QNX4_BMNAME ".bitmap"
32
33static struct super_operations qnx4_sops;
34
35#ifdef CONFIG_QNX4FS_RW
36
37int qnx4_sync_inode(struct inode *inode)
38{
39 int err = 0;
40# if 0
41 struct buffer_head *bh;
42
43 bh = qnx4_update_inode(inode);
44 if (bh && buffer_dirty(bh))
45 {
46 sync_dirty_buffer(bh);
47 if (buffer_req(bh) && !buffer_uptodate(bh))
48 {
49 printk ("IO error syncing qnx4 inode [%s:%08lx]\n",
50 inode->i_sb->s_id, inode->i_ino);
51 err = -1;
52 }
53 brelse (bh);
54 } else if (!bh) {
55 err = -1;
56 }
57# endif
58
59 return err;
60}
61
62static void qnx4_delete_inode(struct inode *inode)
63{
64 QNX4DEBUG(("qnx4: deleting inode [%lu]\n", (unsigned long) inode->i_ino));
Mark Fashehfef26652005-09-09 13:01:31 -070065 truncate_inode_pages(&inode->i_data, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 inode->i_size = 0;
67 qnx4_truncate(inode);
68 lock_kernel();
69 qnx4_free_inode(inode);
70 unlock_kernel();
71}
72
73static void qnx4_write_super(struct super_block *sb)
74{
75 lock_kernel();
76 QNX4DEBUG(("qnx4: write_super\n"));
77 sb->s_dirt = 0;
78 unlock_kernel();
79}
80
81static int qnx4_write_inode(struct inode *inode, int unused)
82{
83 struct qnx4_inode_entry *raw_inode;
84 int block, ino;
85 struct buffer_head *bh;
86 ino = inode->i_ino;
87
88 QNX4DEBUG(("qnx4: write inode 1.\n"));
89 if (inode->i_nlink == 0) {
90 return 0;
91 }
92 if (!ino) {
93 printk("qnx4: bad inode number on dev %s: %d is out of range\n",
94 inode->i_sb->s_id, ino);
95 return -EIO;
96 }
97 QNX4DEBUG(("qnx4: write inode 2.\n"));
98 block = ino / QNX4_INODES_PER_BLOCK;
99 lock_kernel();
100 if (!(bh = sb_bread(inode->i_sb, block))) {
101 printk("qnx4: major problem: unable to read inode from dev "
102 "%s\n", inode->i_sb->s_id);
103 unlock_kernel();
104 return -EIO;
105 }
106 raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
107 (ino % QNX4_INODES_PER_BLOCK);
108 raw_inode->di_mode = cpu_to_le16(inode->i_mode);
109 raw_inode->di_uid = cpu_to_le16(fs_high2lowuid(inode->i_uid));
110 raw_inode->di_gid = cpu_to_le16(fs_high2lowgid(inode->i_gid));
111 raw_inode->di_nlink = cpu_to_le16(inode->i_nlink);
112 raw_inode->di_size = cpu_to_le32(inode->i_size);
113 raw_inode->di_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
114 raw_inode->di_atime = cpu_to_le32(inode->i_atime.tv_sec);
115 raw_inode->di_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
116 raw_inode->di_first_xtnt.xtnt_size = cpu_to_le32(inode->i_blocks);
117 mark_buffer_dirty(bh);
118 brelse(bh);
119 unlock_kernel();
120 return 0;
121}
122
123#endif
124
125static void qnx4_put_super(struct super_block *sb);
126static struct inode *qnx4_alloc_inode(struct super_block *sb);
127static void qnx4_destroy_inode(struct inode *inode);
128static void qnx4_read_inode(struct inode *);
129static int qnx4_remount(struct super_block *sb, int *flags, char *data);
David Howells726c3342006-06-23 02:02:58 -0700130static int qnx4_statfs(struct dentry *, struct kstatfs *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132static struct super_operations qnx4_sops =
133{
134 .alloc_inode = qnx4_alloc_inode,
135 .destroy_inode = qnx4_destroy_inode,
136 .read_inode = qnx4_read_inode,
137 .put_super = qnx4_put_super,
138 .statfs = qnx4_statfs,
139 .remount_fs = qnx4_remount,
140#ifdef CONFIG_QNX4FS_RW
141 .write_inode = qnx4_write_inode,
142 .delete_inode = qnx4_delete_inode,
143 .write_super = qnx4_write_super,
144#endif
145};
146
147static int qnx4_remount(struct super_block *sb, int *flags, char *data)
148{
149 struct qnx4_sb_info *qs;
150
151 qs = qnx4_sb(sb);
152 qs->Version = QNX4_VERSION;
153#ifndef CONFIG_QNX4FS_RW
154 *flags |= MS_RDONLY;
155#endif
156 if (*flags & MS_RDONLY) {
157 return 0;
158 }
159
160 mark_buffer_dirty(qs->sb_buf);
161
162 return 0;
163}
164
165static struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
166 int create)
167{
168 struct buffer_head *result = NULL;
169
170 if ( nr >= 0 )
171 nr = qnx4_block_map( inode, nr );
172 if (nr) {
173 result = sb_getblk(inode->i_sb, nr);
174 return result;
175 }
176 if (!create) {
177 return NULL;
178 }
179#if 0
180 tmp = qnx4_new_block(inode->i_sb);
181 if (!tmp) {
182 return NULL;
183 }
184 result = sb_getblk(inode->i_sb, tmp);
185 if (tst) {
186 qnx4_free_block(inode->i_sb, tmp);
187 brelse(result);
188 goto repeat;
189 }
190 tst = tmp;
191#endif
192 inode->i_ctime = CURRENT_TIME_SEC;
193 mark_inode_dirty(inode);
194 return result;
195}
196
197struct buffer_head *qnx4_bread(struct inode *inode, int block, int create)
198{
199 struct buffer_head *bh;
200
201 bh = qnx4_getblk(inode, block, create);
202 if (!bh || buffer_uptodate(bh)) {
203 return bh;
204 }
205 ll_rw_block(READ, 1, &bh);
206 wait_on_buffer(bh);
207 if (buffer_uptodate(bh)) {
208 return bh;
209 }
210 brelse(bh);
211
212 return NULL;
213}
214
215static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create )
216{
217 unsigned long phys;
218
219 QNX4DEBUG(("qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock));
220
221 phys = qnx4_block_map( inode, iblock );
222 if ( phys ) {
223 // logical block is before EOF
224 map_bh(bh, inode->i_sb, phys);
225 } else if ( create ) {
226 // to be done.
227 }
228 return 0;
229}
230
231unsigned long qnx4_block_map( struct inode *inode, long iblock )
232{
233 int ix;
234 long offset, i_xblk;
235 unsigned long block = 0;
236 struct buffer_head *bh = NULL;
237 struct qnx4_xblk *xblk = NULL;
238 struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode);
Alexey Dobriyan75043cb2005-06-24 20:52:52 +0000239 u16 nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 if ( iblock < le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_size) ) {
242 // iblock is in the first extent. This is easy.
243 block = le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_blk) + iblock - 1;
244 } else {
245 // iblock is beyond first extent. We have to follow the extent chain.
246 i_xblk = le32_to_cpu(qnx4_inode->di_xblk);
247 offset = iblock - le32_to_cpu(qnx4_inode->di_first_xtnt.xtnt_size);
248 ix = 0;
249 while ( --nxtnt > 0 ) {
250 if ( ix == 0 ) {
251 // read next xtnt block.
252 bh = sb_bread(inode->i_sb, i_xblk - 1);
253 if ( !bh ) {
254 QNX4DEBUG(("qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
255 return -EIO;
256 }
257 xblk = (struct qnx4_xblk*)bh->b_data;
258 if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) {
259 QNX4DEBUG(("qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk));
260 return -EIO;
261 }
262 }
263 if ( offset < le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_size) ) {
264 // got it!
265 block = le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_blk) + offset - 1;
266 break;
267 }
268 offset -= le32_to_cpu(xblk->xblk_xtnts[ix].xtnt_size);
269 if ( ++ix >= xblk->xblk_num_xtnts ) {
270 i_xblk = le32_to_cpu(xblk->xblk_next_xblk);
271 ix = 0;
272 brelse( bh );
273 bh = NULL;
274 }
275 }
276 if ( bh )
277 brelse( bh );
278 }
279
280 QNX4DEBUG(("qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
281 return block;
282}
283
David Howells726c3342006-06-23 02:02:58 -0700284static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285{
David Howells726c3342006-06-23 02:02:58 -0700286 struct super_block *sb = dentry->d_sb;
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 lock_kernel();
289
290 buf->f_type = sb->s_magic;
291 buf->f_bsize = sb->s_blocksize;
292 buf->f_blocks = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size) * 8;
293 buf->f_bfree = qnx4_count_free_blocks(sb);
294 buf->f_bavail = buf->f_bfree;
295 buf->f_namelen = QNX4_NAME_MAX;
296
297 unlock_kernel();
298
299 return 0;
300}
301
302/*
303 * Check the root directory of the filesystem to make sure
304 * it really _is_ a qnx4 filesystem, and to check the size
305 * of the directory entry.
306 */
307static const char *qnx4_checkroot(struct super_block *sb)
308{
309 struct buffer_head *bh;
310 struct qnx4_inode_entry *rootdir;
311 int rd, rl;
312 int i, j;
313 int found = 0;
314
315 if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') {
316 return "no qnx4 filesystem (no root dir).";
317 } else {
318 QNX4DEBUG(("QNX4 filesystem found on dev %s.\n", sb->s_id));
319 rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
320 rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
321 for (j = 0; j < rl; j++) {
322 bh = sb_bread(sb, rd + j); /* root dir, first block */
323 if (bh == NULL) {
324 return "unable to read root entry.";
325 }
326 for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) {
327 rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE);
328 if (rootdir->di_fname != NULL) {
329 QNX4DEBUG(("Rootdir entry found : [%s]\n", rootdir->di_fname));
330 if (!strncmp(rootdir->di_fname, QNX4_BMNAME, sizeof QNX4_BMNAME)) {
331 found = 1;
332 qnx4_sb(sb)->BitMap = kmalloc( sizeof( struct qnx4_inode_entry ), GFP_KERNEL );
333 if (!qnx4_sb(sb)->BitMap) {
334 brelse (bh);
335 return "not enough memory for bitmap inode";
336 }
337 memcpy( qnx4_sb(sb)->BitMap, rootdir, sizeof( struct qnx4_inode_entry ) ); /* keep bitmap inode known */
338 break;
339 }
340 }
341 }
342 brelse(bh);
343 if (found != 0) {
344 break;
345 }
346 }
347 if (found == 0) {
348 return "bitmap file not found.";
349 }
350 }
351 return NULL;
352}
353
354static int qnx4_fill_super(struct super_block *s, void *data, int silent)
355{
356 struct buffer_head *bh;
357 struct inode *root;
358 const char *errmsg;
359 struct qnx4_sb_info *qs;
360
Panagiotis Issarisf8314dc2006-09-27 01:49:37 -0700361 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 if (!qs)
363 return -ENOMEM;
364 s->s_fs_info = qs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
366 sb_set_blocksize(s, QNX4_BLOCK_SIZE);
367
368 /* Check the superblock signature. Since the qnx4 code is
369 dangerous, we should leave as quickly as possible
370 if we don't belong here... */
371 bh = sb_bread(s, 1);
372 if (!bh) {
373 printk("qnx4: unable to read the superblock\n");
374 goto outnobh;
375 }
Alexey Dobriyan75043cb2005-06-24 20:52:52 +0000376 if ( le32_to_cpup((__le32*) bh->b_data) != QNX4_SUPER_MAGIC ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 if (!silent)
378 printk("qnx4: wrong fsid in superblock.\n");
379 goto out;
380 }
381 s->s_op = &qnx4_sops;
382 s->s_magic = QNX4_SUPER_MAGIC;
383#ifndef CONFIG_QNX4FS_RW
384 s->s_flags |= MS_RDONLY; /* Yup, read-only yet */
385#endif
386 qnx4_sb(s)->sb_buf = bh;
387 qnx4_sb(s)->sb = (struct qnx4_super_block *) bh->b_data;
388
389
390 /* check before allocating dentries, inodes, .. */
391 errmsg = qnx4_checkroot(s);
392 if (errmsg != NULL) {
393 if (!silent)
394 printk("qnx4: %s\n", errmsg);
395 goto out;
396 }
397
398 /* does root not have inode number QNX4_ROOT_INO ?? */
399 root = iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
400 if (!root) {
401 printk("qnx4: get inode failed\n");
402 goto out;
403 }
404
405 s->s_root = d_alloc_root(root);
406 if (s->s_root == NULL)
407 goto outi;
408
409 brelse(bh);
410
411 return 0;
412
413 outi:
414 iput(root);
415 out:
416 brelse(bh);
417 outnobh:
418 kfree(qs);
419 s->s_fs_info = NULL;
420 return -EINVAL;
421}
422
423static void qnx4_put_super(struct super_block *sb)
424{
425 struct qnx4_sb_info *qs = qnx4_sb(sb);
426 kfree( qs->BitMap );
427 kfree( qs );
428 sb->s_fs_info = NULL;
429 return;
430}
431
432static int qnx4_writepage(struct page *page, struct writeback_control *wbc)
433{
434 return block_write_full_page(page,qnx4_get_block, wbc);
435}
436static int qnx4_readpage(struct file *file, struct page *page)
437{
438 return block_read_full_page(page,qnx4_get_block);
439}
440static int qnx4_prepare_write(struct file *file, struct page *page,
441 unsigned from, unsigned to)
442{
443 struct qnx4_inode_info *qnx4_inode = qnx4_i(page->mapping->host);
444 return cont_prepare_write(page, from, to, qnx4_get_block,
445 &qnx4_inode->mmu_private);
446}
447static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
448{
449 return generic_block_bmap(mapping,block,qnx4_get_block);
450}
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700451static const struct address_space_operations qnx4_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 .readpage = qnx4_readpage,
453 .writepage = qnx4_writepage,
454 .sync_page = block_sync_page,
455 .prepare_write = qnx4_prepare_write,
456 .commit_write = generic_commit_write,
457 .bmap = qnx4_bmap
458};
459
460static void qnx4_read_inode(struct inode *inode)
461{
462 struct buffer_head *bh;
463 struct qnx4_inode_entry *raw_inode;
464 int block, ino;
465 struct super_block *sb = inode->i_sb;
466 struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode);
467
468 ino = inode->i_ino;
469 inode->i_mode = 0;
470
471 QNX4DEBUG(("Reading inode : [%d]\n", ino));
472 if (!ino) {
473 printk("qnx4: bad inode number on dev %s: %d is out of range\n",
474 sb->s_id, ino);
475 return;
476 }
477 block = ino / QNX4_INODES_PER_BLOCK;
478
479 if (!(bh = sb_bread(sb, block))) {
480 printk("qnx4: major problem: unable to read inode from dev "
481 "%s\n", sb->s_id);
482 return;
483 }
484 raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
485 (ino % QNX4_INODES_PER_BLOCK);
486
487 inode->i_mode = le16_to_cpu(raw_inode->di_mode);
488 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->di_uid);
489 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->di_gid);
490 inode->i_nlink = le16_to_cpu(raw_inode->di_nlink);
491 inode->i_size = le32_to_cpu(raw_inode->di_size);
492 inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->di_mtime);
493 inode->i_mtime.tv_nsec = 0;
494 inode->i_atime.tv_sec = le32_to_cpu(raw_inode->di_atime);
495 inode->i_atime.tv_nsec = 0;
496 inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->di_ctime);
497 inode->i_ctime.tv_nsec = 0;
498 inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500 memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
501 if (S_ISREG(inode->i_mode)) {
502 inode->i_op = &qnx4_file_inode_operations;
503 inode->i_fop = &qnx4_file_operations;
504 inode->i_mapping->a_ops = &qnx4_aops;
505 qnx4_i(inode)->mmu_private = inode->i_size;
506 } else if (S_ISDIR(inode->i_mode)) {
507 inode->i_op = &qnx4_dir_inode_operations;
508 inode->i_fop = &qnx4_dir_operations;
509 } else if (S_ISLNK(inode->i_mode)) {
510 inode->i_op = &page_symlink_inode_operations;
511 inode->i_mapping->a_ops = &qnx4_aops;
512 qnx4_i(inode)->mmu_private = inode->i_size;
513 } else
514 printk("qnx4: bad inode %d on dev %s\n",ino,sb->s_id);
515 brelse(bh);
516}
517
518static kmem_cache_t *qnx4_inode_cachep;
519
520static struct inode *qnx4_alloc_inode(struct super_block *sb)
521{
522 struct qnx4_inode_info *ei;
Christoph Lametere94b1762006-12-06 20:33:17 -0800523 ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 if (!ei)
525 return NULL;
526 return &ei->vfs_inode;
527}
528
529static void qnx4_destroy_inode(struct inode *inode)
530{
531 kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
532}
533
534static void init_once(void *foo, kmem_cache_t * cachep,
535 unsigned long flags)
536{
537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
538
539 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
540 SLAB_CTOR_CONSTRUCTOR)
541 inode_init_once(&ei->vfs_inode);
542}
543
544static int init_inodecache(void)
545{
546 qnx4_inode_cachep = kmem_cache_create("qnx4_inode_cache",
547 sizeof(struct qnx4_inode_info),
Paul Jacksonfffb60f2006-03-24 03:16:06 -0800548 0, (SLAB_RECLAIM_ACCOUNT|
549 SLAB_MEM_SPREAD),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 init_once, NULL);
551 if (qnx4_inode_cachep == NULL)
552 return -ENOMEM;
553 return 0;
554}
555
556static void destroy_inodecache(void)
557{
Alexey Dobriyan1a1d92c2006-09-27 01:49:40 -0700558 kmem_cache_destroy(qnx4_inode_cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559}
560
David Howells454e2392006-06-23 02:02:57 -0700561static int qnx4_get_sb(struct file_system_type *fs_type,
562 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
David Howells454e2392006-06-23 02:02:57 -0700564 return get_sb_bdev(fs_type, flags, dev_name, data, qnx4_fill_super,
565 mnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566}
567
568static struct file_system_type qnx4_fs_type = {
569 .owner = THIS_MODULE,
570 .name = "qnx4",
571 .get_sb = qnx4_get_sb,
572 .kill_sb = kill_block_super,
573 .fs_flags = FS_REQUIRES_DEV,
574};
575
576static int __init init_qnx4_fs(void)
577{
578 int err;
579
580 err = init_inodecache();
581 if (err)
582 return err;
583
584 err = register_filesystem(&qnx4_fs_type);
585 if (err) {
586 destroy_inodecache();
587 return err;
588 }
589
590 printk("QNX4 filesystem 0.2.3 registered.\n");
591 return 0;
592}
593
594static void __exit exit_qnx4_fs(void)
595{
596 unregister_filesystem(&qnx4_fs_type);
597 destroy_inodecache();
598}
599
600module_init(init_qnx4_fs)
601module_exit(exit_qnx4_fs)
602MODULE_LICENSE("GPL");
603