blob: ea9e996f36731a09934c8a0597d358e992b3e703 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/posix_acl.h>
16#include <linux/sort.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050017#include <linux/gfs2_ondisk.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000018#include <asm/semaphore.h>
19
20#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050021#include "lm_interface.h"
22#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000023#include "acl.h"
24#include "bmap.h"
25#include "dir.h"
26#include "eattr.h"
27#include "glock.h"
28#include "glops.h"
29#include "inode.h"
30#include "log.h"
31#include "meta_io.h"
32#include "ops_address.h"
33#include "ops_file.h"
34#include "ops_inode.h"
35#include "quota.h"
36#include "rgrp.h"
37#include "trans.h"
38#include "unlinked.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050039#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000040
41/**
42 * inode_attr_in - Copy attributes from the dinode into the VFS inode
43 * @ip: The GFS2 inode (with embedded disk inode data)
44 * @inode: The Linux VFS inode
45 *
46 */
47
48static void inode_attr_in(struct gfs2_inode *ip, struct inode *inode)
49{
50 inode->i_ino = ip->i_num.no_formal_ino;
51
52 switch (ip->i_di.di_mode & S_IFMT) {
53 case S_IFBLK:
54 case S_IFCHR:
55 inode->i_rdev = MKDEV(ip->i_di.di_major, ip->i_di.di_minor);
56 break;
57 default:
58 inode->i_rdev = 0;
59 break;
60 };
61
62 inode->i_mode = ip->i_di.di_mode;
63 inode->i_nlink = ip->i_di.di_nlink;
64 inode->i_uid = ip->i_di.di_uid;
65 inode->i_gid = ip->i_di.di_gid;
66 i_size_write(inode, ip->i_di.di_size);
67 inode->i_atime.tv_sec = ip->i_di.di_atime;
68 inode->i_mtime.tv_sec = ip->i_di.di_mtime;
69 inode->i_ctime.tv_sec = ip->i_di.di_ctime;
70 inode->i_atime.tv_nsec = 0;
71 inode->i_mtime.tv_nsec = 0;
72 inode->i_ctime.tv_nsec = 0;
73 inode->i_blksize = PAGE_SIZE;
74 inode->i_blocks = ip->i_di.di_blocks <<
75 (ip->i_sbd->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
76
77 if (ip->i_di.di_flags & GFS2_DIF_IMMUTABLE)
78 inode->i_flags |= S_IMMUTABLE;
79 else
80 inode->i_flags &= ~S_IMMUTABLE;
81
82 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY)
83 inode->i_flags |= S_APPEND;
84 else
85 inode->i_flags &= ~S_APPEND;
86}
87
88/**
89 * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
90 * @ip: The GFS2 inode (with embedded disk inode data)
91 *
92 */
93
94void gfs2_inode_attr_in(struct gfs2_inode *ip)
95{
96 struct inode *inode;
97
98 inode = gfs2_ip2v_lookup(ip);
99 if (inode) {
100 inode_attr_in(ip, inode);
101 iput(inode);
102 }
103}
104
105/**
106 * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
107 * @ip: The GFS2 inode
108 *
109 * Only copy out the attributes that we want the VFS layer
110 * to be able to modify.
111 */
112
113void gfs2_inode_attr_out(struct gfs2_inode *ip)
114{
115 struct inode *inode = ip->i_vnode;
116
117 gfs2_assert_withdraw(ip->i_sbd,
118 (ip->i_di.di_mode & S_IFMT) == (inode->i_mode & S_IFMT));
119 ip->i_di.di_mode = inode->i_mode;
120 ip->i_di.di_uid = inode->i_uid;
121 ip->i_di.di_gid = inode->i_gid;
122 ip->i_di.di_atime = inode->i_atime.tv_sec;
123 ip->i_di.di_mtime = inode->i_mtime.tv_sec;
124 ip->i_di.di_ctime = inode->i_ctime.tv_sec;
125}
126
127/**
128 * gfs2_ip2v_lookup - Get the struct inode for a struct gfs2_inode
129 * @ip: the struct gfs2_inode to get the struct inode for
130 *
131 * Returns: A VFS inode, or NULL if none
132 */
133
134struct inode *gfs2_ip2v_lookup(struct gfs2_inode *ip)
135{
136 struct inode *inode = NULL;
137
138 gfs2_assert_warn(ip->i_sbd, test_bit(GIF_MIN_INIT, &ip->i_flags));
139
140 spin_lock(&ip->i_spin);
141 if (ip->i_vnode)
142 inode = igrab(ip->i_vnode);
143 spin_unlock(&ip->i_spin);
144
145 return inode;
146}
147
148/**
149 * gfs2_ip2v - Get/Create a struct inode for a struct gfs2_inode
150 * @ip: the struct gfs2_inode to get the struct inode for
151 *
152 * Returns: A VFS inode, or NULL if no mem
153 */
154
155struct inode *gfs2_ip2v(struct gfs2_inode *ip)
156{
157 struct inode *inode, *tmp;
158
159 inode = gfs2_ip2v_lookup(ip);
160 if (inode)
161 return inode;
162
163 tmp = new_inode(ip->i_sbd->sd_vfs);
164 if (!tmp)
165 return NULL;
166
167 inode_attr_in(ip, tmp);
168
169 if (S_ISREG(ip->i_di.di_mode)) {
170 tmp->i_op = &gfs2_file_iops;
171 tmp->i_fop = &gfs2_file_fops;
172 tmp->i_mapping->a_ops = &gfs2_file_aops;
173 } else if (S_ISDIR(ip->i_di.di_mode)) {
174 tmp->i_op = &gfs2_dir_iops;
175 tmp->i_fop = &gfs2_dir_fops;
176 } else if (S_ISLNK(ip->i_di.di_mode)) {
177 tmp->i_op = &gfs2_symlink_iops;
178 } else {
179 tmp->i_op = &gfs2_dev_iops;
180 init_special_inode(tmp, tmp->i_mode, tmp->i_rdev);
181 }
182
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500183 tmp->u.generic_ip = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000184
185 for (;;) {
186 spin_lock(&ip->i_spin);
187 if (!ip->i_vnode)
188 break;
189 inode = igrab(ip->i_vnode);
190 spin_unlock(&ip->i_spin);
191
192 if (inode) {
193 iput(tmp);
194 return inode;
195 }
196 yield();
197 }
198
199 inode = tmp;
200
201 gfs2_inode_hold(ip);
202 ip->i_vnode = inode;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500203 inode->u.generic_ip = ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000204
205 spin_unlock(&ip->i_spin);
206
207 insert_inode_hash(inode);
208
209 return inode;
210}
211
212static int iget_test(struct inode *inode, void *opaque)
213{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500214 struct gfs2_inode *ip = inode->u.generic_ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000215 struct gfs2_inum *inum = (struct gfs2_inum *)opaque;
216
217 if (ip && ip->i_num.no_addr == inum->no_addr)
218 return 1;
219
220 return 0;
221}
222
223struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
224{
225 return ilookup5(sb, (unsigned long)inum->no_formal_ino,
226 iget_test, inum);
227}
228
229void gfs2_inode_min_init(struct gfs2_inode *ip, unsigned int type)
230{
231 spin_lock(&ip->i_spin);
232 if (!test_and_set_bit(GIF_MIN_INIT, &ip->i_flags)) {
233 ip->i_di.di_nlink = 1;
234 ip->i_di.di_mode = DT2IF(type);
235 }
236 spin_unlock(&ip->i_spin);
237}
238
239/**
240 * gfs2_inode_refresh - Refresh the incore copy of the dinode
241 * @ip: The GFS2 inode
242 *
243 * Returns: errno
244 */
245
246int gfs2_inode_refresh(struct gfs2_inode *ip)
247{
248 struct buffer_head *dibh;
249 int error;
250
251 error = gfs2_meta_inode_buffer(ip, &dibh);
252 if (error)
253 return error;
254
255 if (gfs2_metatype_check(ip->i_sbd, dibh, GFS2_METATYPE_DI)) {
256 brelse(dibh);
257 return -EIO;
258 }
259
260 spin_lock(&ip->i_spin);
261 gfs2_dinode_in(&ip->i_di, dibh->b_data);
262 set_bit(GIF_MIN_INIT, &ip->i_flags);
263 spin_unlock(&ip->i_spin);
264
265 brelse(dibh);
266
267 if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) {
268 if (gfs2_consist_inode(ip))
269 gfs2_dinode_print(&ip->i_di);
270 return -EIO;
271 }
272 if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino)
273 return -ESTALE;
274
275 ip->i_vn = ip->i_gl->gl_vn;
276
277 return 0;
278}
279
280/**
281 * inode_create - create a struct gfs2_inode
282 * @i_gl: The glock covering the inode
283 * @inum: The inode number
284 * @io_gl: the iopen glock to acquire/hold (using holder in new gfs2_inode)
285 * @io_state: the state the iopen glock should be acquired in
286 * @ipp: pointer to put the returned inode in
287 *
288 * Returns: errno
289 */
290
291static int inode_create(struct gfs2_glock *i_gl, struct gfs2_inum *inum,
292 struct gfs2_glock *io_gl, unsigned int io_state,
293 struct gfs2_inode **ipp)
294{
295 struct gfs2_sbd *sdp = i_gl->gl_sbd;
296 struct gfs2_inode *ip;
297 int error = 0;
298
299 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
300 if (!ip)
301 return -ENOMEM;
302 memset(ip, 0, sizeof(struct gfs2_inode));
303
304 ip->i_num = *inum;
305
306 atomic_set(&ip->i_count, 1);
307
308 ip->i_vn = i_gl->gl_vn - 1;
309
310 ip->i_gl = i_gl;
311 ip->i_sbd = sdp;
312
313 spin_lock_init(&ip->i_spin);
314 init_rwsem(&ip->i_rw_mutex);
315
316 ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
317
318 error = gfs2_glock_nq_init(io_gl,
319 io_state, GL_LOCAL_EXCL | GL_EXACT,
320 &ip->i_iopen_gh);
321 if (error)
322 goto fail;
323 ip->i_iopen_gh.gh_owner = NULL;
324
325 spin_lock(&io_gl->gl_spin);
326 gfs2_glock_hold(i_gl);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500327 io_gl->gl_object = i_gl;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000328 spin_unlock(&io_gl->gl_spin);
329
330 gfs2_glock_hold(i_gl);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500331 i_gl->gl_object = ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000332
333 atomic_inc(&sdp->sd_inode_count);
334
335 *ipp = ip;
336
337 return 0;
338
339 fail:
340 gfs2_meta_cache_flush(ip);
341 kmem_cache_free(gfs2_inode_cachep, ip);
342 *ipp = NULL;
343
344 return error;
345}
346
347/**
348 * gfs2_inode_get - Create or get a reference on an inode
349 * @i_gl: The glock covering the inode
350 * @inum: The inode number
351 * @create:
352 * @ipp: pointer to put the returned inode in
353 *
354 * Returns: errno
355 */
356
357int gfs2_inode_get(struct gfs2_glock *i_gl, struct gfs2_inum *inum, int create,
358 struct gfs2_inode **ipp)
359{
360 struct gfs2_sbd *sdp = i_gl->gl_sbd;
361 struct gfs2_glock *io_gl;
362 int error = 0;
363
364 gfs2_glmutex_lock(i_gl);
365
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500366 *ipp = i_gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000367 if (*ipp) {
368 error = -ESTALE;
369 if ((*ipp)->i_num.no_formal_ino != inum->no_formal_ino)
370 goto out;
371 atomic_inc(&(*ipp)->i_count);
372 error = 0;
373 goto out;
374 }
375
376 if (!create)
377 goto out;
378
379 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops,
380 CREATE, &io_gl);
381 if (!error) {
382 error = inode_create(i_gl, inum, io_gl, LM_ST_SHARED, ipp);
383 gfs2_glock_put(io_gl);
384 }
385
386 out:
387 gfs2_glmutex_unlock(i_gl);
388
389 return error;
390}
391
392void gfs2_inode_hold(struct gfs2_inode *ip)
393{
394 gfs2_assert(ip->i_sbd, atomic_read(&ip->i_count) > 0);
395 atomic_inc(&ip->i_count);
396}
397
398void gfs2_inode_put(struct gfs2_inode *ip)
399{
400 gfs2_assert(ip->i_sbd, atomic_read(&ip->i_count) > 0);
401 atomic_dec(&ip->i_count);
402}
403
404void gfs2_inode_destroy(struct gfs2_inode *ip)
405{
406 struct gfs2_sbd *sdp = ip->i_sbd;
407 struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
408 struct gfs2_glock *i_gl = ip->i_gl;
409
410 gfs2_assert_warn(sdp, !atomic_read(&ip->i_count));
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500411 gfs2_assert(sdp, io_gl->gl_object == i_gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000412
413 spin_lock(&io_gl->gl_spin);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500414 io_gl->gl_object = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000415 gfs2_glock_put(i_gl);
416 spin_unlock(&io_gl->gl_spin);
417
418 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
419
420 gfs2_meta_cache_flush(ip);
421 kmem_cache_free(gfs2_inode_cachep, ip);
422
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500423 i_gl->gl_object = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000424 gfs2_glock_put(i_gl);
425
426 atomic_dec(&sdp->sd_inode_count);
427}
428
429static int dinode_dealloc(struct gfs2_inode *ip, struct gfs2_unlinked *ul)
430{
431 struct gfs2_sbd *sdp = ip->i_sbd;
432 struct gfs2_alloc *al;
433 struct gfs2_rgrpd *rgd;
434 int error;
435
436 if (ip->i_di.di_blocks != 1) {
437 if (gfs2_consist_inode(ip))
438 gfs2_dinode_print(&ip->i_di);
439 return -EIO;
440 }
441
442 al = gfs2_alloc_get(ip);
443
444 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
445 if (error)
446 goto out;
447
448 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
449 if (error)
450 goto out_qs;
451
452 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
453 if (!rgd) {
454 gfs2_consist_inode(ip);
455 error = -EIO;
456 goto out_rindex_relse;
457 }
458
459 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
460 &al->al_rgd_gh);
461 if (error)
462 goto out_rindex_relse;
463
464 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_UNLINKED +
465 RES_STATFS + RES_QUOTA, 1);
466 if (error)
467 goto out_rg_gunlock;
468
469 gfs2_trans_add_gl(ip->i_gl);
470
471 gfs2_free_di(rgd, ip);
472
473 error = gfs2_unlinked_ondisk_rm(sdp, ul);
474
475 gfs2_trans_end(sdp);
476 clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
477
478 out_rg_gunlock:
479 gfs2_glock_dq_uninit(&al->al_rgd_gh);
480
481 out_rindex_relse:
482 gfs2_glock_dq_uninit(&al->al_ri_gh);
483
484 out_qs:
485 gfs2_quota_unhold(ip);
486
487 out:
488 gfs2_alloc_put(ip);
489
490 return error;
491}
492
493/**
494 * inode_dealloc - Deallocate all on-disk blocks for an inode (dinode)
495 * @sdp: the filesystem
496 * @inum: the inode number to deallocate
497 * @io_gh: a holder for the iopen glock for this inode
498 *
499 * Returns: errno
500 */
501
502static int inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul,
503 struct gfs2_holder *io_gh)
504{
505 struct gfs2_inode *ip;
506 struct gfs2_holder i_gh;
507 int error;
508
509 error = gfs2_glock_nq_num(sdp,
510 ul->ul_ut.ut_inum.no_addr, &gfs2_inode_glops,
511 LM_ST_EXCLUSIVE, 0, &i_gh);
512 if (error)
513 return error;
514
515 /* We reacquire the iopen lock here to avoid a race with the NFS server
516 calling gfs2_read_inode() with the inode number of a inode we're in
517 the process of deallocating. And we can't keep our hold on the lock
518 from inode_dealloc_init() for deadlock reasons. */
519
520 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY, io_gh);
521 error = gfs2_glock_nq(io_gh);
522 switch (error) {
523 case 0:
524 break;
525 case GLR_TRYFAILED:
526 error = 1;
527 default:
528 goto out;
529 }
530
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500531 gfs2_assert_warn(sdp, !i_gh.gh_gl->gl_object);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000532 error = inode_create(i_gh.gh_gl, &ul->ul_ut.ut_inum, io_gh->gh_gl,
533 LM_ST_EXCLUSIVE, &ip);
534
535 gfs2_glock_dq(io_gh);
536
537 if (error)
538 goto out;
539
540 error = gfs2_inode_refresh(ip);
541 if (error)
542 goto out_iput;
543
544 if (ip->i_di.di_nlink) {
545 if (gfs2_consist_inode(ip))
546 gfs2_dinode_print(&ip->i_di);
547 error = -EIO;
548 goto out_iput;
549 }
550
551 if (S_ISDIR(ip->i_di.di_mode) &&
552 (ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
553 error = gfs2_dir_exhash_dealloc(ip);
554 if (error)
555 goto out_iput;
556 }
557
558 if (ip->i_di.di_eattr) {
559 error = gfs2_ea_dealloc(ip);
560 if (error)
561 goto out_iput;
562 }
563
564 if (!gfs2_is_stuffed(ip)) {
565 error = gfs2_file_dealloc(ip);
566 if (error)
567 goto out_iput;
568 }
569
570 error = dinode_dealloc(ip, ul);
571 if (error)
572 goto out_iput;
573
574 out_iput:
575 gfs2_glmutex_lock(i_gh.gh_gl);
576 gfs2_inode_put(ip);
577 gfs2_inode_destroy(ip);
578 gfs2_glmutex_unlock(i_gh.gh_gl);
579
580 out:
581 gfs2_glock_dq_uninit(&i_gh);
582
583 return error;
584}
585
586/**
587 * try_inode_dealloc - Try to deallocate an inode and all its blocks
588 * @sdp: the filesystem
589 *
590 * Returns: 0 on success, -errno on error, 1 on busy (inode open)
591 */
592
593static int try_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
594{
595 struct gfs2_holder io_gh;
596 int error = 0;
597
598 gfs2_try_toss_inode(sdp, &ul->ul_ut.ut_inum);
599
600 error = gfs2_glock_nq_num(sdp,
601 ul->ul_ut.ut_inum.no_addr, &gfs2_iopen_glops,
602 LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &io_gh);
603 switch (error) {
604 case 0:
605 break;
606 case GLR_TRYFAILED:
607 return 1;
608 default:
609 return error;
610 }
611
612 gfs2_glock_dq(&io_gh);
613 error = inode_dealloc(sdp, ul, &io_gh);
614 gfs2_holder_uninit(&io_gh);
615
616 return error;
617}
618
619static int inode_dealloc_uninit(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
620{
621 struct gfs2_rgrpd *rgd;
622 struct gfs2_holder ri_gh, rgd_gh;
623 int error;
624
625 error = gfs2_rindex_hold(sdp, &ri_gh);
626 if (error)
627 return error;
628
629 rgd = gfs2_blk2rgrpd(sdp, ul->ul_ut.ut_inum.no_addr);
630 if (!rgd) {
631 gfs2_consist(sdp);
632 error = -EIO;
633 goto out;
634 }
635
636 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rgd_gh);
637 if (error)
638 goto out;
639
640 error = gfs2_trans_begin(sdp,
641 RES_RG_BIT + RES_UNLINKED + RES_STATFS,
642 0);
643 if (error)
644 goto out_gunlock;
645
646 gfs2_free_uninit_di(rgd, ul->ul_ut.ut_inum.no_addr);
647 gfs2_unlinked_ondisk_rm(sdp, ul);
648
649 gfs2_trans_end(sdp);
650
651 out_gunlock:
652 gfs2_glock_dq_uninit(&rgd_gh);
653 out:
654 gfs2_glock_dq_uninit(&ri_gh);
655
656 return error;
657}
658
659int gfs2_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
660{
661 if (ul->ul_ut.ut_flags & GFS2_UTF_UNINIT)
662 return inode_dealloc_uninit(sdp, ul);
663 else
664 return try_inode_dealloc(sdp, ul);
665}
666
667/**
668 * gfs2_change_nlink - Change nlink count on inode
669 * @ip: The GFS2 inode
670 * @diff: The change in the nlink count required
671 *
672 * Returns: errno
673 */
674
675int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
676{
677 struct buffer_head *dibh;
678 uint32_t nlink;
679 int error;
680
681 nlink = ip->i_di.di_nlink + diff;
682
683 /* If we are reducing the nlink count, but the new value ends up being
684 bigger than the old one, we must have underflowed. */
685 if (diff < 0 && nlink > ip->i_di.di_nlink) {
686 if (gfs2_consist_inode(ip))
687 gfs2_dinode_print(&ip->i_di);
688 return -EIO;
689 }
690
691 error = gfs2_meta_inode_buffer(ip, &dibh);
692 if (error)
693 return error;
694
695 ip->i_di.di_nlink = nlink;
696 ip->i_di.di_ctime = get_seconds();
697
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000698 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000699 gfs2_dinode_out(&ip->i_di, dibh->b_data);
700 brelse(dibh);
701
702 return 0;
703}
704
705/**
706 * gfs2_lookupi - Look up a filename in a directory and return its inode
707 * @d_gh: An initialized holder for the directory glock
708 * @name: The name of the inode to look for
709 * @is_root: If 1, ignore the caller's permissions
710 * @i_gh: An uninitialized holder for the new inode glock
711 *
712 * There will always be a vnode (Linux VFS inode) for the d_gh inode unless
713 * @is_root is true.
714 *
715 * Returns: errno
716 */
717
Steven Whitehouse7359a192006-02-13 12:27:43 +0000718int gfs2_lookupi(struct inode *dir, struct qstr *name, int is_root,
719 struct inode **inodep)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000720{
Steven Whitehouse7359a192006-02-13 12:27:43 +0000721 struct gfs2_inode *ipp;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500722 struct gfs2_inode *dip = dir->u.generic_ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000723 struct gfs2_sbd *sdp = dip->i_sbd;
724 struct gfs2_holder d_gh;
725 struct gfs2_inum inum;
726 unsigned int type;
727 struct gfs2_glock *gl;
Steven Whitehouse7359a192006-02-13 12:27:43 +0000728 int error = 0;
729
730 *inodep = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000731
732 if (!name->len || name->len > GFS2_FNAMESIZE)
733 return -ENAMETOOLONG;
734
735 if (gfs2_filecmp(name, ".", 1) ||
Steven Whitehouse7359a192006-02-13 12:27:43 +0000736 (gfs2_filecmp(name, "..", 2) && dir == sdp->sd_root_dir)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000737 gfs2_inode_hold(dip);
Steven Whitehouse7359a192006-02-13 12:27:43 +0000738 ipp = dip;
739 goto done;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000740 }
741
742 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
743 if (error)
744 return error;
745
746 if (!is_root) {
747 error = gfs2_repermission(dip->i_vnode, MAY_EXEC, NULL);
748 if (error)
749 goto out;
750 }
751
752 error = gfs2_dir_search(dip, name, &inum, &type);
753 if (error)
754 goto out;
755
756 error = gfs2_glock_get(sdp, inum.no_addr, &gfs2_inode_glops,
757 CREATE, &gl);
758 if (error)
759 goto out;
760
Steven Whitehouse7359a192006-02-13 12:27:43 +0000761 error = gfs2_inode_get(gl, &inum, CREATE, &ipp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000762 if (!error)
Steven Whitehouse7359a192006-02-13 12:27:43 +0000763 gfs2_inode_min_init(ipp, type);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000764
765 gfs2_glock_put(gl);
766
Steven Whitehouse7359a192006-02-13 12:27:43 +0000767out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000768 gfs2_glock_dq_uninit(&d_gh);
Steven Whitehouse7359a192006-02-13 12:27:43 +0000769done:
770 if (error == 0) {
771 *inodep = gfs2_ip2v(ipp);
772 if (!*inodep)
773 error = -ENOMEM;
774 gfs2_inode_put(ipp);
775 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000776 return error;
777}
778
779static int pick_formal_ino_1(struct gfs2_sbd *sdp, uint64_t *formal_ino)
780{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500781 struct gfs2_inode *ip = sdp->sd_ir_inode->u.generic_ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000782 struct buffer_head *bh;
783 struct gfs2_inum_range ir;
784 int error;
785
786 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
787 if (error)
788 return error;
Steven Whitehousef55ab262006-02-21 12:51:39 +0000789 mutex_lock(&sdp->sd_inum_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000790
791 error = gfs2_meta_inode_buffer(ip, &bh);
792 if (error) {
Steven Whitehousef55ab262006-02-21 12:51:39 +0000793 mutex_unlock(&sdp->sd_inum_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000794 gfs2_trans_end(sdp);
795 return error;
796 }
797
798 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
799
800 if (ir.ir_length) {
801 *formal_ino = ir.ir_start++;
802 ir.ir_length--;
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000803 gfs2_trans_add_bh(ip->i_gl, bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000804 gfs2_inum_range_out(&ir,
805 bh->b_data + sizeof(struct gfs2_dinode));
806 brelse(bh);
Steven Whitehousef55ab262006-02-21 12:51:39 +0000807 mutex_unlock(&sdp->sd_inum_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000808 gfs2_trans_end(sdp);
809 return 0;
810 }
811
812 brelse(bh);
813
Steven Whitehousef55ab262006-02-21 12:51:39 +0000814 mutex_unlock(&sdp->sd_inum_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000815 gfs2_trans_end(sdp);
816
817 return 1;
818}
819
820static int pick_formal_ino_2(struct gfs2_sbd *sdp, uint64_t *formal_ino)
821{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500822 struct gfs2_inode *ip = sdp->sd_ir_inode->u.generic_ip;
823 struct gfs2_inode *m_ip = sdp->sd_inum_inode->u.generic_ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000824 struct gfs2_holder gh;
825 struct buffer_head *bh;
826 struct gfs2_inum_range ir;
827 int error;
828
829 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
830 if (error)
831 return error;
832
833 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
834 if (error)
835 goto out;
Steven Whitehousef55ab262006-02-21 12:51:39 +0000836 mutex_lock(&sdp->sd_inum_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000837
838 error = gfs2_meta_inode_buffer(ip, &bh);
839 if (error)
840 goto out_end_trans;
841
842 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
843
844 if (!ir.ir_length) {
845 struct buffer_head *m_bh;
846 uint64_t x, y;
847
848 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
849 if (error)
850 goto out_brelse;
851
852 x = *(uint64_t *)(m_bh->b_data + sizeof(struct gfs2_dinode));
853 x = y = be64_to_cpu(x);
854 ir.ir_start = x;
855 ir.ir_length = GFS2_INUM_QUANTUM;
856 x += GFS2_INUM_QUANTUM;
857 if (x < y)
858 gfs2_consist_inode(m_ip);
859 x = cpu_to_be64(x);
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000860 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000861 *(uint64_t *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x;
862
863 brelse(m_bh);
864 }
865
866 *formal_ino = ir.ir_start++;
867 ir.ir_length--;
868
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000869 gfs2_trans_add_bh(ip->i_gl, bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000870 gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
871
872 out_brelse:
873 brelse(bh);
874
875 out_end_trans:
Steven Whitehousef55ab262006-02-21 12:51:39 +0000876 mutex_unlock(&sdp->sd_inum_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000877 gfs2_trans_end(sdp);
878
879 out:
880 gfs2_glock_dq_uninit(&gh);
881
882 return error;
883}
884
885static int pick_formal_ino(struct gfs2_sbd *sdp, uint64_t *inum)
886{
887 int error;
888
889 error = pick_formal_ino_1(sdp, inum);
890 if (error <= 0)
891 return error;
892
893 error = pick_formal_ino_2(sdp, inum);
894
895 return error;
896}
897
898/**
899 * create_ok - OK to create a new on-disk inode here?
900 * @dip: Directory in which dinode is to be created
901 * @name: Name of new dinode
902 * @mode:
903 *
904 * Returns: errno
905 */
906
907static int create_ok(struct gfs2_inode *dip, struct qstr *name,
908 unsigned int mode)
909{
910 int error;
911
912 error = gfs2_repermission(dip->i_vnode, MAY_WRITE | MAY_EXEC, NULL);
913 if (error)
914 return error;
915
916 /* Don't create entries in an unlinked directory */
917 if (!dip->i_di.di_nlink)
918 return -EPERM;
919
920 error = gfs2_dir_search(dip, name, NULL, NULL);
921 switch (error) {
922 case -ENOENT:
923 error = 0;
924 break;
925 case 0:
926 return -EEXIST;
927 default:
928 return error;
929 }
930
931 if (dip->i_di.di_entries == (uint32_t)-1)
932 return -EFBIG;
933 if (S_ISDIR(mode) && dip->i_di.di_nlink == (uint32_t)-1)
934 return -EMLINK;
935
936 return 0;
937}
938
939static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
940 unsigned int *uid, unsigned int *gid)
941{
942 if (dip->i_sbd->sd_args.ar_suiddir &&
943 (dip->i_di.di_mode & S_ISUID) &&
944 dip->i_di.di_uid) {
945 if (S_ISDIR(*mode))
946 *mode |= S_ISUID;
947 else if (dip->i_di.di_uid != current->fsuid)
948 *mode &= ~07111;
949 *uid = dip->i_di.di_uid;
950 } else
951 *uid = current->fsuid;
952
953 if (dip->i_di.di_mode & S_ISGID) {
954 if (S_ISDIR(*mode))
955 *mode |= S_ISGID;
956 *gid = dip->i_di.di_gid;
957 } else
958 *gid = current->fsgid;
959}
960
961static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_unlinked *ul)
962{
963 struct gfs2_sbd *sdp = dip->i_sbd;
964 int error;
965
966 gfs2_alloc_get(dip);
967
968 dip->i_alloc.al_requested = RES_DINODE;
969 error = gfs2_inplace_reserve(dip);
970 if (error)
971 goto out;
972
973 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_UNLINKED +
974 RES_STATFS, 0);
975 if (error)
976 goto out_ipreserv;
977
978 ul->ul_ut.ut_inum.no_addr = gfs2_alloc_di(dip);
979
980 ul->ul_ut.ut_flags = GFS2_UTF_UNINIT;
981 error = gfs2_unlinked_ondisk_add(sdp, ul);
982
983 gfs2_trans_end(sdp);
984
985 out_ipreserv:
986 gfs2_inplace_release(dip);
987
988 out:
989 gfs2_alloc_put(dip);
990
991 return error;
992}
993
994/**
995 * init_dinode - Fill in a new dinode structure
996 * @dip: the directory this inode is being created in
997 * @gl: The glock covering the new inode
998 * @inum: the inode number
999 * @mode: the file permissions
1000 * @uid:
1001 * @gid:
1002 *
1003 */
1004
1005static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
1006 struct gfs2_inum *inum, unsigned int mode,
1007 unsigned int uid, unsigned int gid)
1008{
1009 struct gfs2_sbd *sdp = dip->i_sbd;
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001010 struct gfs2_dinode *di;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001011 struct buffer_head *dibh;
1012
1013 dibh = gfs2_meta_new(gl, inum->no_addr);
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +00001014 gfs2_trans_add_bh(gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001015 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
1016 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001017 di = (struct gfs2_dinode *)dibh->b_data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001018
Steven Whitehouse2442a092006-01-30 11:49:32 +00001019 di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
1020 di->di_num.no_addr = cpu_to_be64(inum->no_addr);
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001021 di->di_mode = cpu_to_be32(mode);
1022 di->di_uid = cpu_to_be32(uid);
1023 di->di_gid = cpu_to_be32(gid);
1024 di->di_nlink = cpu_to_be32(0);
1025 di->di_size = cpu_to_be64(0);
1026 di->di_blocks = cpu_to_be64(1);
1027 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
1028 di->di_major = di->di_minor = cpu_to_be32(0);
1029 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
1030 di->__pad[0] = di->__pad[1] = 0;
1031 di->di_flags = cpu_to_be32(0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001032
1033 if (S_ISREG(mode)) {
1034 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
1035 gfs2_tune_get(sdp, gt_new_files_jdata))
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001036 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001037 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
1038 gfs2_tune_get(sdp, gt_new_files_directio))
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001039 di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001040 } else if (S_ISDIR(mode)) {
Steven Whitehouse568f4c92006-02-27 12:00:42 -05001041 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
1042 GFS2_DIF_INHERIT_DIRECTIO);
1043 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
1044 GFS2_DIF_INHERIT_JDATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001045 }
1046
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001047 di->__pad1 = 0;
1048 di->di_height = cpu_to_be32(0);
1049 di->__pad2 = 0;
1050 di->__pad3 = 0;
1051 di->di_depth = cpu_to_be16(0);
1052 di->di_entries = cpu_to_be32(0);
1053 memset(&di->__pad4, 0, sizeof(di->__pad4));
1054 di->di_eattr = cpu_to_be64(0);
1055 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
1056
David Teiglandb3b94fa2006-01-16 16:50:04 +00001057 brelse(dibh);
1058}
1059
1060static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
1061 unsigned int mode, struct gfs2_unlinked *ul)
1062{
1063 struct gfs2_sbd *sdp = dip->i_sbd;
1064 unsigned int uid, gid;
1065 int error;
1066
1067 munge_mode_uid_gid(dip, &mode, &uid, &gid);
1068
1069 gfs2_alloc_get(dip);
1070
1071 error = gfs2_quota_lock(dip, uid, gid);
1072 if (error)
1073 goto out;
1074
1075 error = gfs2_quota_check(dip, uid, gid);
1076 if (error)
1077 goto out_quota;
1078
1079 error = gfs2_trans_begin(sdp, RES_DINODE + RES_UNLINKED +
1080 RES_QUOTA, 0);
1081 if (error)
1082 goto out_quota;
1083
1084 ul->ul_ut.ut_flags = 0;
1085 error = gfs2_unlinked_ondisk_munge(sdp, ul);
1086
1087 init_dinode(dip, gl, &ul->ul_ut.ut_inum,
1088 mode, uid, gid);
1089
1090 gfs2_quota_change(dip, +1, uid, gid);
1091
1092 gfs2_trans_end(sdp);
1093
1094 out_quota:
1095 gfs2_quota_unlock(dip);
1096
1097 out:
1098 gfs2_alloc_put(dip);
1099
1100 return error;
1101}
1102
1103static int link_dinode(struct gfs2_inode *dip, struct qstr *name,
1104 struct gfs2_inode *ip, struct gfs2_unlinked *ul)
1105{
1106 struct gfs2_sbd *sdp = dip->i_sbd;
1107 struct gfs2_alloc *al;
1108 int alloc_required;
1109 struct buffer_head *dibh;
1110 int error;
1111
1112 al = gfs2_alloc_get(dip);
1113
1114 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1115 if (error)
1116 goto fail;
1117
1118 error = gfs2_diradd_alloc_required(dip, name, &alloc_required);
1119 if (alloc_required) {
1120 error = gfs2_quota_check(dip, dip->i_di.di_uid,
1121 dip->i_di.di_gid);
1122 if (error)
1123 goto fail_quota_locks;
1124
1125 al->al_requested = sdp->sd_max_dirres;
1126
1127 error = gfs2_inplace_reserve(dip);
1128 if (error)
1129 goto fail_quota_locks;
1130
1131 error = gfs2_trans_begin(sdp,
1132 sdp->sd_max_dirres +
1133 al->al_rgd->rd_ri.ri_length +
1134 2 * RES_DINODE + RES_UNLINKED +
1135 RES_STATFS + RES_QUOTA, 0);
1136 if (error)
1137 goto fail_ipreserv;
1138 } else {
1139 error = gfs2_trans_begin(sdp,
1140 RES_LEAF +
1141 2 * RES_DINODE +
1142 RES_UNLINKED, 0);
1143 if (error)
1144 goto fail_quota_locks;
1145 }
1146
1147 error = gfs2_dir_add(dip, name, &ip->i_num, IF2DT(ip->i_di.di_mode));
1148 if (error)
1149 goto fail_end_trans;
1150
1151 error = gfs2_meta_inode_buffer(ip, &dibh);
1152 if (error)
1153 goto fail_end_trans;
1154 ip->i_di.di_nlink = 1;
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +00001155 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001156 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1157 brelse(dibh);
1158
1159 error = gfs2_unlinked_ondisk_rm(sdp, ul);
1160 if (error)
1161 goto fail_end_trans;
1162
1163 return 0;
1164
1165 fail_end_trans:
1166 gfs2_trans_end(sdp);
1167
1168 fail_ipreserv:
1169 if (dip->i_alloc.al_rgd)
1170 gfs2_inplace_release(dip);
1171
1172 fail_quota_locks:
1173 gfs2_quota_unlock(dip);
1174
1175 fail:
1176 gfs2_alloc_put(dip);
1177
1178 return error;
1179}
1180
1181/**
1182 * gfs2_createi - Create a new inode
1183 * @ghs: An array of two holders
1184 * @name: The name of the new file
1185 * @mode: the permissions on the new inode
1186 *
1187 * @ghs[0] is an initialized holder for the directory
1188 * @ghs[1] is the holder for the inode lock
1189 *
Steven Whitehouse7359a192006-02-13 12:27:43 +00001190 * If the return value is not NULL, the glocks on both the directory and the new
David Teiglandb3b94fa2006-01-16 16:50:04 +00001191 * file are held. A transaction has been started and an inplace reservation
1192 * is held, as well.
1193 *
Steven Whitehouse7359a192006-02-13 12:27:43 +00001194 * Returns: An inode
David Teiglandb3b94fa2006-01-16 16:50:04 +00001195 */
1196
Steven Whitehouse568f4c92006-02-27 12:00:42 -05001197struct inode *gfs2_createi(struct gfs2_holder *ghs, struct qstr *name,
1198 unsigned int mode)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001199{
Steven Whitehouse7359a192006-02-13 12:27:43 +00001200 struct inode *inode;
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001201 struct gfs2_inode *dip = ghs->gh_gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001202 struct gfs2_sbd *sdp = dip->i_sbd;
1203 struct gfs2_unlinked *ul;
1204 struct gfs2_inode *ip;
1205 int error;
1206
1207 if (!name->len || name->len > GFS2_FNAMESIZE)
Steven Whitehouse7359a192006-02-13 12:27:43 +00001208 return ERR_PTR(-ENAMETOOLONG);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001209
1210 error = gfs2_unlinked_get(sdp, &ul);
1211 if (error)
Steven Whitehouse7359a192006-02-13 12:27:43 +00001212 return ERR_PTR(error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001213
1214 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
1215 error = gfs2_glock_nq(ghs);
1216 if (error)
1217 goto fail;
1218
1219 error = create_ok(dip, name, mode);
1220 if (error)
1221 goto fail_gunlock;
1222
1223 error = pick_formal_ino(sdp, &ul->ul_ut.ut_inum.no_formal_ino);
1224 if (error)
1225 goto fail_gunlock;
1226
1227 error = alloc_dinode(dip, ul);
1228 if (error)
1229 goto fail_gunlock;
1230
1231 if (ul->ul_ut.ut_inum.no_addr < dip->i_num.no_addr) {
1232 gfs2_glock_dq(ghs);
1233
1234 error = gfs2_glock_nq_num(sdp,
1235 ul->ul_ut.ut_inum.no_addr,
1236 &gfs2_inode_glops,
1237 LM_ST_EXCLUSIVE, GL_SKIP,
1238 ghs + 1);
1239 if (error) {
1240 gfs2_unlinked_put(sdp, ul);
Steven Whitehouse7359a192006-02-13 12:27:43 +00001241 return ERR_PTR(error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001242 }
1243
1244 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
1245 error = gfs2_glock_nq(ghs);
1246 if (error) {
1247 gfs2_glock_dq_uninit(ghs + 1);
1248 gfs2_unlinked_put(sdp, ul);
Steven Whitehouse7359a192006-02-13 12:27:43 +00001249 return ERR_PTR(error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001250 }
1251
1252 error = create_ok(dip, name, mode);
1253 if (error)
1254 goto fail_gunlock2;
1255 } else {
1256 error = gfs2_glock_nq_num(sdp,
1257 ul->ul_ut.ut_inum.no_addr,
1258 &gfs2_inode_glops,
1259 LM_ST_EXCLUSIVE, GL_SKIP,
1260 ghs + 1);
1261 if (error)
1262 goto fail_gunlock;
1263 }
1264
1265 error = make_dinode(dip, ghs[1].gh_gl, mode, ul);
1266 if (error)
1267 goto fail_gunlock2;
1268
1269 error = gfs2_inode_get(ghs[1].gh_gl, &ul->ul_ut.ut_inum, CREATE, &ip);
1270 if (error)
1271 goto fail_gunlock2;
1272
1273 error = gfs2_inode_refresh(ip);
1274 if (error)
1275 goto fail_iput;
1276
1277 error = gfs2_acl_create(dip, ip);
1278 if (error)
1279 goto fail_iput;
1280
1281 error = link_dinode(dip, name, ip, ul);
1282 if (error)
1283 goto fail_iput;
1284
1285 gfs2_unlinked_put(sdp, ul);
1286
Steven Whitehouse7359a192006-02-13 12:27:43 +00001287 inode = gfs2_ip2v(ip);
1288 gfs2_inode_put(ip);
1289 if (!inode)
1290 return ERR_PTR(-ENOMEM);
1291 return inode;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001292
1293 fail_iput:
1294 gfs2_inode_put(ip);
1295
1296 fail_gunlock2:
1297 gfs2_glock_dq_uninit(ghs + 1);
1298
1299 fail_gunlock:
1300 gfs2_glock_dq(ghs);
1301
1302 fail:
1303 gfs2_unlinked_put(sdp, ul);
1304
Steven Whitehouse7359a192006-02-13 12:27:43 +00001305 return ERR_PTR(error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001306}
1307
1308/**
1309 * gfs2_unlinki - Unlink a file
1310 * @dip: The inode of the directory
1311 * @name: The name of the file to be unlinked
1312 * @ip: The inode of the file to be removed
1313 *
1314 * Assumes Glocks on both dip and ip are held.
1315 *
1316 * Returns: errno
1317 */
1318
1319int gfs2_unlinki(struct gfs2_inode *dip, struct qstr *name,
1320 struct gfs2_inode *ip, struct gfs2_unlinked *ul)
1321{
1322 struct gfs2_sbd *sdp = dip->i_sbd;
1323 int error;
1324
1325 error = gfs2_dir_del(dip, name);
1326 if (error)
1327 return error;
1328
1329 error = gfs2_change_nlink(ip, -1);
1330 if (error)
1331 return error;
1332
1333 /* If this inode is being unlinked from the directory structure,
1334 we need to mark that in the log so that it isn't lost during
1335 a crash. */
1336
1337 if (!ip->i_di.di_nlink) {
1338 ul->ul_ut.ut_inum = ip->i_num;
1339 error = gfs2_unlinked_ondisk_add(sdp, ul);
1340 if (!error)
1341 set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
1342 }
1343
1344 return error;
1345}
1346
1347/**
1348 * gfs2_rmdiri - Remove a directory
1349 * @dip: The parent directory of the directory to be removed
1350 * @name: The name of the directory to be removed
1351 * @ip: The GFS2 inode of the directory to be removed
1352 *
1353 * Assumes Glocks on dip and ip are held
1354 *
1355 * Returns: errno
1356 */
1357
1358int gfs2_rmdiri(struct gfs2_inode *dip, struct qstr *name,
1359 struct gfs2_inode *ip, struct gfs2_unlinked *ul)
1360{
1361 struct gfs2_sbd *sdp = dip->i_sbd;
1362 struct qstr dotname;
1363 int error;
1364
1365 if (ip->i_di.di_entries != 2) {
1366 if (gfs2_consist_inode(ip))
1367 gfs2_dinode_print(&ip->i_di);
1368 return -EIO;
1369 }
1370
1371 error = gfs2_dir_del(dip, name);
1372 if (error)
1373 return error;
1374
1375 error = gfs2_change_nlink(dip, -1);
1376 if (error)
1377 return error;
1378
1379 dotname.len = 1;
1380 dotname.name = ".";
1381 error = gfs2_dir_del(ip, &dotname);
1382 if (error)
1383 return error;
1384
1385 dotname.len = 2;
1386 dotname.name = "..";
1387 error = gfs2_dir_del(ip, &dotname);
1388 if (error)
1389 return error;
1390
1391 error = gfs2_change_nlink(ip, -2);
1392 if (error)
1393 return error;
1394
1395 /* This inode is being unlinked from the directory structure and
1396 we need to mark that in the log so that it isn't lost during
1397 a crash. */
1398
1399 ul->ul_ut.ut_inum = ip->i_num;
1400 error = gfs2_unlinked_ondisk_add(sdp, ul);
1401 if (!error)
1402 set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
1403
1404 return error;
1405}
1406
1407/*
1408 * gfs2_unlink_ok - check to see that a inode is still in a directory
1409 * @dip: the directory
1410 * @name: the name of the file
1411 * @ip: the inode
1412 *
1413 * Assumes that the lock on (at least) @dip is held.
1414 *
1415 * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1416 */
1417
1418int gfs2_unlink_ok(struct gfs2_inode *dip, struct qstr *name,
1419 struct gfs2_inode *ip)
1420{
1421 struct gfs2_inum inum;
1422 unsigned int type;
1423 int error;
1424
1425 if (IS_IMMUTABLE(ip->i_vnode) || IS_APPEND(ip->i_vnode))
1426 return -EPERM;
1427
1428 if ((dip->i_di.di_mode & S_ISVTX) &&
1429 dip->i_di.di_uid != current->fsuid &&
1430 ip->i_di.di_uid != current->fsuid &&
1431 !capable(CAP_FOWNER))
1432 return -EPERM;
1433
1434 if (IS_APPEND(dip->i_vnode))
1435 return -EPERM;
1436
1437 error = gfs2_repermission(dip->i_vnode, MAY_WRITE | MAY_EXEC, NULL);
1438 if (error)
1439 return error;
1440
1441 error = gfs2_dir_search(dip, name, &inum, &type);
1442 if (error)
1443 return error;
1444
1445 if (!gfs2_inum_equal(&inum, &ip->i_num))
1446 return -ENOENT;
1447
1448 if (IF2DT(ip->i_di.di_mode) != type) {
1449 gfs2_consist_inode(dip);
1450 return -EIO;
1451 }
1452
1453 return 0;
1454}
1455
1456/*
1457 * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1458 * @this: move this
1459 * @to: to here
1460 *
1461 * Follow @to back to the root and make sure we don't encounter @this
1462 * Assumes we already hold the rename lock.
1463 *
1464 * Returns: errno
1465 */
1466
1467int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1468{
1469 struct gfs2_sbd *sdp = this->i_sbd;
Steven Whitehouse7359a192006-02-13 12:27:43 +00001470 struct inode *dir = to->i_vnode;
1471 struct inode *tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001472 struct qstr dotdot;
1473 int error = 0;
1474
1475 memset(&dotdot, 0, sizeof(struct qstr));
1476 dotdot.name = "..";
1477 dotdot.len = 2;
1478
Steven Whitehouse7359a192006-02-13 12:27:43 +00001479 igrab(dir);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001480
1481 for (;;) {
Steven Whitehouse7359a192006-02-13 12:27:43 +00001482 if (dir == this->i_vnode) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001483 error = -EINVAL;
1484 break;
1485 }
Steven Whitehouse7359a192006-02-13 12:27:43 +00001486 if (dir == sdp->sd_root_dir) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001487 error = 0;
1488 break;
1489 }
1490
Steven Whitehouse7359a192006-02-13 12:27:43 +00001491 error = gfs2_lookupi(dir, &dotdot, 1, &tmp);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001492 if (error)
1493 break;
1494
Steven Whitehouse7359a192006-02-13 12:27:43 +00001495 iput(dir);
1496 dir = tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001497 }
1498
Steven Whitehouse7359a192006-02-13 12:27:43 +00001499 iput(dir);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001500
1501 return error;
1502}
1503
1504/**
1505 * gfs2_readlinki - return the contents of a symlink
1506 * @ip: the symlink's inode
1507 * @buf: a pointer to the buffer to be filled
1508 * @len: a pointer to the length of @buf
1509 *
1510 * If @buf is too small, a piece of memory is kmalloc()ed and needs
1511 * to be freed by the caller.
1512 *
1513 * Returns: errno
1514 */
1515
1516int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1517{
1518 struct gfs2_holder i_gh;
1519 struct buffer_head *dibh;
1520 unsigned int x;
1521 int error;
1522
1523 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
1524 error = gfs2_glock_nq_atime(&i_gh);
1525 if (error) {
1526 gfs2_holder_uninit(&i_gh);
1527 return error;
1528 }
1529
1530 if (!ip->i_di.di_size) {
1531 gfs2_consist_inode(ip);
1532 error = -EIO;
1533 goto out;
1534 }
1535
1536 error = gfs2_meta_inode_buffer(ip, &dibh);
1537 if (error)
1538 goto out;
1539
1540 x = ip->i_di.di_size + 1;
1541 if (x > *len) {
1542 *buf = kmalloc(x, GFP_KERNEL);
1543 if (!*buf) {
1544 error = -ENOMEM;
1545 goto out_brelse;
1546 }
1547 }
1548
1549 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1550 *len = x;
1551
1552 out_brelse:
1553 brelse(dibh);
1554
1555 out:
1556 gfs2_glock_dq_uninit(&i_gh);
1557
1558 return error;
1559}
1560
1561/**
1562 * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1563 * conditionally update the inode's atime
1564 * @gh: the holder to acquire
1565 *
1566 * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1567 * Update if the difference between the current time and the inode's current
1568 * atime is greater than an interval specified at mount.
1569 *
1570 * Returns: errno
1571 */
1572
1573int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1574{
1575 struct gfs2_glock *gl = gh->gh_gl;
1576 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001577 struct gfs2_inode *ip = gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001578 int64_t curtime, quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1579 unsigned int state;
1580 int flags;
1581 int error;
1582
1583 if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1584 gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1585 gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1586 return -EINVAL;
1587
1588 state = gh->gh_state;
1589 flags = gh->gh_flags;
1590
1591 error = gfs2_glock_nq(gh);
1592 if (error)
1593 return error;
1594
1595 if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1596 (sdp->sd_vfs->s_flags & MS_RDONLY))
1597 return 0;
1598
1599 curtime = get_seconds();
1600 if (curtime - ip->i_di.di_atime >= quantum) {
1601 gfs2_glock_dq(gh);
1602 gfs2_holder_reinit(LM_ST_EXCLUSIVE,
1603 gh->gh_flags & ~LM_FLAG_ANY,
1604 gh);
1605 error = gfs2_glock_nq(gh);
1606 if (error)
1607 return error;
1608
1609 /* Verify that atime hasn't been updated while we were
1610 trying to get exclusive lock. */
1611
1612 curtime = get_seconds();
1613 if (curtime - ip->i_di.di_atime >= quantum) {
1614 struct buffer_head *dibh;
1615
1616 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1617 if (error == -EROFS)
1618 return 0;
1619 if (error)
1620 goto fail;
1621
1622 error = gfs2_meta_inode_buffer(ip, &dibh);
1623 if (error)
1624 goto fail_end_trans;
1625
1626 ip->i_di.di_atime = curtime;
1627
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +00001628 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001629 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1630 brelse(dibh);
1631
1632 gfs2_trans_end(sdp);
1633 }
1634
1635 /* If someone else has asked for the glock,
1636 unlock and let them have it. Then reacquire
1637 in the original state. */
1638 if (gfs2_glock_is_blocking(gl)) {
1639 gfs2_glock_dq(gh);
1640 gfs2_holder_reinit(state, flags, gh);
1641 return gfs2_glock_nq(gh);
1642 }
1643 }
1644
1645 return 0;
1646
1647 fail_end_trans:
1648 gfs2_trans_end(sdp);
1649
1650 fail:
1651 gfs2_glock_dq(gh);
1652
1653 return error;
1654}
1655
1656/**
1657 * glock_compare_atime - Compare two struct gfs2_glock structures for sort
1658 * @arg_a: the first structure
1659 * @arg_b: the second structure
1660 *
1661 * Returns: 1 if A > B
1662 * -1 if A < B
1663 * 0 if A = B
1664 */
1665
1666static int glock_compare_atime(const void *arg_a, const void *arg_b)
1667{
1668 struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1669 struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1670 struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1671 struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1672 int ret = 0;
1673
1674 if (a->ln_number > b->ln_number)
1675 ret = 1;
1676 else if (a->ln_number < b->ln_number)
1677 ret = -1;
1678 else {
1679 if (gh_a->gh_state == LM_ST_SHARED &&
1680 gh_b->gh_state == LM_ST_EXCLUSIVE)
1681 ret = 1;
1682 else if (gh_a->gh_state == LM_ST_SHARED &&
1683 (gh_b->gh_flags & GL_ATIME))
1684 ret = 1;
1685 }
1686
1687 return ret;
1688}
1689
1690/**
1691 * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
1692 * atime update
1693 * @num_gh: the number of structures
1694 * @ghs: an array of struct gfs2_holder structures
1695 *
1696 * Returns: 0 on success (all glocks acquired),
1697 * errno on failure (no glocks acquired)
1698 */
1699
1700int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
1701{
1702 struct gfs2_holder **p;
1703 unsigned int x;
1704 int error = 0;
1705
1706 if (!num_gh)
1707 return 0;
1708
1709 if (num_gh == 1) {
1710 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1711 if (ghs->gh_flags & GL_ATIME)
1712 error = gfs2_glock_nq_atime(ghs);
1713 else
1714 error = gfs2_glock_nq(ghs);
1715 return error;
1716 }
1717
1718 p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1719 if (!p)
1720 return -ENOMEM;
1721
1722 for (x = 0; x < num_gh; x++)
1723 p[x] = &ghs[x];
1724
1725 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
1726
1727 for (x = 0; x < num_gh; x++) {
1728 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1729
1730 if (p[x]->gh_flags & GL_ATIME)
1731 error = gfs2_glock_nq_atime(p[x]);
1732 else
1733 error = gfs2_glock_nq(p[x]);
1734
1735 if (error) {
1736 while (x--)
1737 gfs2_glock_dq(p[x]);
1738 break;
1739 }
1740 }
1741
1742 kfree(p);
1743
1744 return error;
1745}
1746
1747/**
1748 * gfs2_try_toss_vnode - See if we can toss a vnode from memory
1749 * @ip: the inode
1750 *
1751 * Returns: 1 if the vnode was tossed
1752 */
1753
1754void gfs2_try_toss_vnode(struct gfs2_inode *ip)
1755{
1756 struct inode *inode;
1757
1758 inode = gfs2_ip2v_lookup(ip);
1759 if (!inode)
1760 return;
1761
1762 d_prune_aliases(inode);
1763
1764 if (S_ISDIR(ip->i_di.di_mode)) {
1765 struct list_head *head = &inode->i_dentry;
1766 struct dentry *d = NULL;
1767
1768 spin_lock(&dcache_lock);
1769 if (list_empty(head))
1770 spin_unlock(&dcache_lock);
1771 else {
1772 d = list_entry(head->next, struct dentry, d_alias);
1773 dget_locked(d);
1774 spin_unlock(&dcache_lock);
1775
1776 if (have_submounts(d))
1777 dput(d);
1778 else {
1779 shrink_dcache_parent(d);
1780 dput(d);
1781 d_prune_aliases(inode);
1782 }
1783 }
1784 }
1785
1786 inode->i_nlink = 0;
1787 iput(inode);
1788}
1789
1790
1791static int
1792__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1793{
1794 struct buffer_head *dibh;
1795 int error;
1796
1797 error = gfs2_meta_inode_buffer(ip, &dibh);
1798 if (!error) {
1799 error = inode_setattr(ip->i_vnode, attr);
1800 gfs2_assert_warn(ip->i_sbd, !error);
1801 gfs2_inode_attr_out(ip);
1802
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +00001803 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001804 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1805 brelse(dibh);
1806 }
1807 return error;
1808}
1809
1810/**
1811 * gfs2_setattr_simple -
1812 * @ip:
1813 * @attr:
1814 *
1815 * Called with a reference on the vnode.
1816 *
1817 * Returns: errno
1818 */
1819
1820int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1821{
1822 int error;
1823
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001824 if (current->journal_info)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001825 return __gfs2_setattr_simple(ip, attr);
1826
1827 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE, 0);
1828 if (error)
1829 return error;
1830
1831 error = __gfs2_setattr_simple(ip, attr);
1832
1833 gfs2_trans_end(ip->i_sbd);
1834
1835 return error;
1836}
1837
1838int gfs2_repermission(struct inode *inode, int mask, struct nameidata *nd)
1839{
1840 return permission(inode, mask, nd);
1841}
1842