blob: 2a00b96eac01bba19b95f9219254f51354be065e [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/posix_acl.h>
16#include <linux/sort.h>
17#include <asm/semaphore.h>
18
19#include "gfs2.h"
20#include "acl.h"
21#include "bmap.h"
22#include "dir.h"
23#include "eattr.h"
24#include "glock.h"
25#include "glops.h"
26#include "inode.h"
27#include "log.h"
28#include "meta_io.h"
29#include "ops_address.h"
30#include "ops_file.h"
31#include "ops_inode.h"
32#include "quota.h"
33#include "rgrp.h"
34#include "trans.h"
35#include "unlinked.h"
36
37/**
38 * inode_attr_in - Copy attributes from the dinode into the VFS inode
39 * @ip: The GFS2 inode (with embedded disk inode data)
40 * @inode: The Linux VFS inode
41 *
42 */
43
44static void inode_attr_in(struct gfs2_inode *ip, struct inode *inode)
45{
46 inode->i_ino = ip->i_num.no_formal_ino;
47
48 switch (ip->i_di.di_mode & S_IFMT) {
49 case S_IFBLK:
50 case S_IFCHR:
51 inode->i_rdev = MKDEV(ip->i_di.di_major, ip->i_di.di_minor);
52 break;
53 default:
54 inode->i_rdev = 0;
55 break;
56 };
57
58 inode->i_mode = ip->i_di.di_mode;
59 inode->i_nlink = ip->i_di.di_nlink;
60 inode->i_uid = ip->i_di.di_uid;
61 inode->i_gid = ip->i_di.di_gid;
62 i_size_write(inode, ip->i_di.di_size);
63 inode->i_atime.tv_sec = ip->i_di.di_atime;
64 inode->i_mtime.tv_sec = ip->i_di.di_mtime;
65 inode->i_ctime.tv_sec = ip->i_di.di_ctime;
66 inode->i_atime.tv_nsec = 0;
67 inode->i_mtime.tv_nsec = 0;
68 inode->i_ctime.tv_nsec = 0;
69 inode->i_blksize = PAGE_SIZE;
70 inode->i_blocks = ip->i_di.di_blocks <<
71 (ip->i_sbd->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
72
73 if (ip->i_di.di_flags & GFS2_DIF_IMMUTABLE)
74 inode->i_flags |= S_IMMUTABLE;
75 else
76 inode->i_flags &= ~S_IMMUTABLE;
77
78 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY)
79 inode->i_flags |= S_APPEND;
80 else
81 inode->i_flags &= ~S_APPEND;
82}
83
84/**
85 * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
86 * @ip: The GFS2 inode (with embedded disk inode data)
87 *
88 */
89
90void gfs2_inode_attr_in(struct gfs2_inode *ip)
91{
92 struct inode *inode;
93
94 inode = gfs2_ip2v_lookup(ip);
95 if (inode) {
96 inode_attr_in(ip, inode);
97 iput(inode);
98 }
99}
100
101/**
102 * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
103 * @ip: The GFS2 inode
104 *
105 * Only copy out the attributes that we want the VFS layer
106 * to be able to modify.
107 */
108
109void gfs2_inode_attr_out(struct gfs2_inode *ip)
110{
111 struct inode *inode = ip->i_vnode;
112
113 gfs2_assert_withdraw(ip->i_sbd,
114 (ip->i_di.di_mode & S_IFMT) == (inode->i_mode & S_IFMT));
115 ip->i_di.di_mode = inode->i_mode;
116 ip->i_di.di_uid = inode->i_uid;
117 ip->i_di.di_gid = inode->i_gid;
118 ip->i_di.di_atime = inode->i_atime.tv_sec;
119 ip->i_di.di_mtime = inode->i_mtime.tv_sec;
120 ip->i_di.di_ctime = inode->i_ctime.tv_sec;
121}
122
123/**
124 * gfs2_ip2v_lookup - Get the struct inode for a struct gfs2_inode
125 * @ip: the struct gfs2_inode to get the struct inode for
126 *
127 * Returns: A VFS inode, or NULL if none
128 */
129
130struct inode *gfs2_ip2v_lookup(struct gfs2_inode *ip)
131{
132 struct inode *inode = NULL;
133
134 gfs2_assert_warn(ip->i_sbd, test_bit(GIF_MIN_INIT, &ip->i_flags));
135
136 spin_lock(&ip->i_spin);
137 if (ip->i_vnode)
138 inode = igrab(ip->i_vnode);
139 spin_unlock(&ip->i_spin);
140
141 return inode;
142}
143
144/**
145 * gfs2_ip2v - Get/Create a struct inode for a struct gfs2_inode
146 * @ip: the struct gfs2_inode to get the struct inode for
147 *
148 * Returns: A VFS inode, or NULL if no mem
149 */
150
151struct inode *gfs2_ip2v(struct gfs2_inode *ip)
152{
153 struct inode *inode, *tmp;
154
155 inode = gfs2_ip2v_lookup(ip);
156 if (inode)
157 return inode;
158
159 tmp = new_inode(ip->i_sbd->sd_vfs);
160 if (!tmp)
161 return NULL;
162
163 inode_attr_in(ip, tmp);
164
165 if (S_ISREG(ip->i_di.di_mode)) {
166 tmp->i_op = &gfs2_file_iops;
167 tmp->i_fop = &gfs2_file_fops;
168 tmp->i_mapping->a_ops = &gfs2_file_aops;
169 } else if (S_ISDIR(ip->i_di.di_mode)) {
170 tmp->i_op = &gfs2_dir_iops;
171 tmp->i_fop = &gfs2_dir_fops;
172 } else if (S_ISLNK(ip->i_di.di_mode)) {
173 tmp->i_op = &gfs2_symlink_iops;
174 } else {
175 tmp->i_op = &gfs2_dev_iops;
176 init_special_inode(tmp, tmp->i_mode, tmp->i_rdev);
177 }
178
179 set_v2ip(tmp, NULL);
180
181 for (;;) {
182 spin_lock(&ip->i_spin);
183 if (!ip->i_vnode)
184 break;
185 inode = igrab(ip->i_vnode);
186 spin_unlock(&ip->i_spin);
187
188 if (inode) {
189 iput(tmp);
190 return inode;
191 }
192 yield();
193 }
194
195 inode = tmp;
196
197 gfs2_inode_hold(ip);
198 ip->i_vnode = inode;
199 set_v2ip(inode, ip);
200
201 spin_unlock(&ip->i_spin);
202
203 insert_inode_hash(inode);
204
205 return inode;
206}
207
208static int iget_test(struct inode *inode, void *opaque)
209{
210 struct gfs2_inode *ip = get_v2ip(inode);
211 struct gfs2_inum *inum = (struct gfs2_inum *)opaque;
212
213 if (ip && ip->i_num.no_addr == inum->no_addr)
214 return 1;
215
216 return 0;
217}
218
219struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
220{
221 return ilookup5(sb, (unsigned long)inum->no_formal_ino,
222 iget_test, inum);
223}
224
225void gfs2_inode_min_init(struct gfs2_inode *ip, unsigned int type)
226{
227 spin_lock(&ip->i_spin);
228 if (!test_and_set_bit(GIF_MIN_INIT, &ip->i_flags)) {
229 ip->i_di.di_nlink = 1;
230 ip->i_di.di_mode = DT2IF(type);
231 }
232 spin_unlock(&ip->i_spin);
233}
234
235/**
236 * gfs2_inode_refresh - Refresh the incore copy of the dinode
237 * @ip: The GFS2 inode
238 *
239 * Returns: errno
240 */
241
242int gfs2_inode_refresh(struct gfs2_inode *ip)
243{
244 struct buffer_head *dibh;
245 int error;
246
247 error = gfs2_meta_inode_buffer(ip, &dibh);
248 if (error)
249 return error;
250
251 if (gfs2_metatype_check(ip->i_sbd, dibh, GFS2_METATYPE_DI)) {
252 brelse(dibh);
253 return -EIO;
254 }
255
256 spin_lock(&ip->i_spin);
257 gfs2_dinode_in(&ip->i_di, dibh->b_data);
258 set_bit(GIF_MIN_INIT, &ip->i_flags);
259 spin_unlock(&ip->i_spin);
260
261 brelse(dibh);
262
263 if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) {
264 if (gfs2_consist_inode(ip))
265 gfs2_dinode_print(&ip->i_di);
266 return -EIO;
267 }
268 if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino)
269 return -ESTALE;
270
271 ip->i_vn = ip->i_gl->gl_vn;
272
273 return 0;
274}
275
276/**
277 * inode_create - create a struct gfs2_inode
278 * @i_gl: The glock covering the inode
279 * @inum: The inode number
280 * @io_gl: the iopen glock to acquire/hold (using holder in new gfs2_inode)
281 * @io_state: the state the iopen glock should be acquired in
282 * @ipp: pointer to put the returned inode in
283 *
284 * Returns: errno
285 */
286
287static int inode_create(struct gfs2_glock *i_gl, struct gfs2_inum *inum,
288 struct gfs2_glock *io_gl, unsigned int io_state,
289 struct gfs2_inode **ipp)
290{
291 struct gfs2_sbd *sdp = i_gl->gl_sbd;
292 struct gfs2_inode *ip;
293 int error = 0;
294
295 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
296 if (!ip)
297 return -ENOMEM;
298 memset(ip, 0, sizeof(struct gfs2_inode));
299
300 ip->i_num = *inum;
301
302 atomic_set(&ip->i_count, 1);
303
304 ip->i_vn = i_gl->gl_vn - 1;
305
306 ip->i_gl = i_gl;
307 ip->i_sbd = sdp;
308
309 spin_lock_init(&ip->i_spin);
310 init_rwsem(&ip->i_rw_mutex);
311
312 ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
313
314 error = gfs2_glock_nq_init(io_gl,
315 io_state, GL_LOCAL_EXCL | GL_EXACT,
316 &ip->i_iopen_gh);
317 if (error)
318 goto fail;
319 ip->i_iopen_gh.gh_owner = NULL;
320
321 spin_lock(&io_gl->gl_spin);
322 gfs2_glock_hold(i_gl);
323 set_gl2gl(io_gl, i_gl);
324 spin_unlock(&io_gl->gl_spin);
325
326 gfs2_glock_hold(i_gl);
327 set_gl2ip(i_gl, ip);
328
329 atomic_inc(&sdp->sd_inode_count);
330
331 *ipp = ip;
332
333 return 0;
334
335 fail:
336 gfs2_meta_cache_flush(ip);
337 kmem_cache_free(gfs2_inode_cachep, ip);
338 *ipp = NULL;
339
340 return error;
341}
342
343/**
344 * gfs2_inode_get - Create or get a reference on an inode
345 * @i_gl: The glock covering the inode
346 * @inum: The inode number
347 * @create:
348 * @ipp: pointer to put the returned inode in
349 *
350 * Returns: errno
351 */
352
353int gfs2_inode_get(struct gfs2_glock *i_gl, struct gfs2_inum *inum, int create,
354 struct gfs2_inode **ipp)
355{
356 struct gfs2_sbd *sdp = i_gl->gl_sbd;
357 struct gfs2_glock *io_gl;
358 int error = 0;
359
360 gfs2_glmutex_lock(i_gl);
361
362 *ipp = get_gl2ip(i_gl);
363 if (*ipp) {
364 error = -ESTALE;
365 if ((*ipp)->i_num.no_formal_ino != inum->no_formal_ino)
366 goto out;
367 atomic_inc(&(*ipp)->i_count);
368 error = 0;
369 goto out;
370 }
371
372 if (!create)
373 goto out;
374
375 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops,
376 CREATE, &io_gl);
377 if (!error) {
378 error = inode_create(i_gl, inum, io_gl, LM_ST_SHARED, ipp);
379 gfs2_glock_put(io_gl);
380 }
381
382 out:
383 gfs2_glmutex_unlock(i_gl);
384
385 return error;
386}
387
388void gfs2_inode_hold(struct gfs2_inode *ip)
389{
390 gfs2_assert(ip->i_sbd, atomic_read(&ip->i_count) > 0);
391 atomic_inc(&ip->i_count);
392}
393
394void gfs2_inode_put(struct gfs2_inode *ip)
395{
396 gfs2_assert(ip->i_sbd, atomic_read(&ip->i_count) > 0);
397 atomic_dec(&ip->i_count);
398}
399
400void gfs2_inode_destroy(struct gfs2_inode *ip)
401{
402 struct gfs2_sbd *sdp = ip->i_sbd;
403 struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
404 struct gfs2_glock *i_gl = ip->i_gl;
405
406 gfs2_assert_warn(sdp, !atomic_read(&ip->i_count));
407 gfs2_assert(sdp, get_gl2gl(io_gl) == i_gl);
408
409 spin_lock(&io_gl->gl_spin);
410 set_gl2gl(io_gl, NULL);
411 gfs2_glock_put(i_gl);
412 spin_unlock(&io_gl->gl_spin);
413
414 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
415
416 gfs2_meta_cache_flush(ip);
417 kmem_cache_free(gfs2_inode_cachep, ip);
418
419 set_gl2ip(i_gl, NULL);
420 gfs2_glock_put(i_gl);
421
422 atomic_dec(&sdp->sd_inode_count);
423}
424
425static int dinode_dealloc(struct gfs2_inode *ip, struct gfs2_unlinked *ul)
426{
427 struct gfs2_sbd *sdp = ip->i_sbd;
428 struct gfs2_alloc *al;
429 struct gfs2_rgrpd *rgd;
430 int error;
431
432 if (ip->i_di.di_blocks != 1) {
433 if (gfs2_consist_inode(ip))
434 gfs2_dinode_print(&ip->i_di);
435 return -EIO;
436 }
437
438 al = gfs2_alloc_get(ip);
439
440 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
441 if (error)
442 goto out;
443
444 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
445 if (error)
446 goto out_qs;
447
448 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
449 if (!rgd) {
450 gfs2_consist_inode(ip);
451 error = -EIO;
452 goto out_rindex_relse;
453 }
454
455 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
456 &al->al_rgd_gh);
457 if (error)
458 goto out_rindex_relse;
459
460 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_UNLINKED +
461 RES_STATFS + RES_QUOTA, 1);
462 if (error)
463 goto out_rg_gunlock;
464
465 gfs2_trans_add_gl(ip->i_gl);
466
467 gfs2_free_di(rgd, ip);
468
469 error = gfs2_unlinked_ondisk_rm(sdp, ul);
470
471 gfs2_trans_end(sdp);
472 clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
473
474 out_rg_gunlock:
475 gfs2_glock_dq_uninit(&al->al_rgd_gh);
476
477 out_rindex_relse:
478 gfs2_glock_dq_uninit(&al->al_ri_gh);
479
480 out_qs:
481 gfs2_quota_unhold(ip);
482
483 out:
484 gfs2_alloc_put(ip);
485
486 return error;
487}
488
489/**
490 * inode_dealloc - Deallocate all on-disk blocks for an inode (dinode)
491 * @sdp: the filesystem
492 * @inum: the inode number to deallocate
493 * @io_gh: a holder for the iopen glock for this inode
494 *
495 * Returns: errno
496 */
497
498static int inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul,
499 struct gfs2_holder *io_gh)
500{
501 struct gfs2_inode *ip;
502 struct gfs2_holder i_gh;
503 int error;
504
505 error = gfs2_glock_nq_num(sdp,
506 ul->ul_ut.ut_inum.no_addr, &gfs2_inode_glops,
507 LM_ST_EXCLUSIVE, 0, &i_gh);
508 if (error)
509 return error;
510
511 /* We reacquire the iopen lock here to avoid a race with the NFS server
512 calling gfs2_read_inode() with the inode number of a inode we're in
513 the process of deallocating. And we can't keep our hold on the lock
514 from inode_dealloc_init() for deadlock reasons. */
515
516 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY, io_gh);
517 error = gfs2_glock_nq(io_gh);
518 switch (error) {
519 case 0:
520 break;
521 case GLR_TRYFAILED:
522 error = 1;
523 default:
524 goto out;
525 }
526
527 gfs2_assert_warn(sdp, !get_gl2ip(i_gh.gh_gl));
528 error = inode_create(i_gh.gh_gl, &ul->ul_ut.ut_inum, io_gh->gh_gl,
529 LM_ST_EXCLUSIVE, &ip);
530
531 gfs2_glock_dq(io_gh);
532
533 if (error)
534 goto out;
535
536 error = gfs2_inode_refresh(ip);
537 if (error)
538 goto out_iput;
539
540 if (ip->i_di.di_nlink) {
541 if (gfs2_consist_inode(ip))
542 gfs2_dinode_print(&ip->i_di);
543 error = -EIO;
544 goto out_iput;
545 }
546
547 if (S_ISDIR(ip->i_di.di_mode) &&
548 (ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
549 error = gfs2_dir_exhash_dealloc(ip);
550 if (error)
551 goto out_iput;
552 }
553
554 if (ip->i_di.di_eattr) {
555 error = gfs2_ea_dealloc(ip);
556 if (error)
557 goto out_iput;
558 }
559
560 if (!gfs2_is_stuffed(ip)) {
561 error = gfs2_file_dealloc(ip);
562 if (error)
563 goto out_iput;
564 }
565
566 error = dinode_dealloc(ip, ul);
567 if (error)
568 goto out_iput;
569
570 out_iput:
571 gfs2_glmutex_lock(i_gh.gh_gl);
572 gfs2_inode_put(ip);
573 gfs2_inode_destroy(ip);
574 gfs2_glmutex_unlock(i_gh.gh_gl);
575
576 out:
577 gfs2_glock_dq_uninit(&i_gh);
578
579 return error;
580}
581
582/**
583 * try_inode_dealloc - Try to deallocate an inode and all its blocks
584 * @sdp: the filesystem
585 *
586 * Returns: 0 on success, -errno on error, 1 on busy (inode open)
587 */
588
589static int try_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
590{
591 struct gfs2_holder io_gh;
592 int error = 0;
593
594 gfs2_try_toss_inode(sdp, &ul->ul_ut.ut_inum);
595
596 error = gfs2_glock_nq_num(sdp,
597 ul->ul_ut.ut_inum.no_addr, &gfs2_iopen_glops,
598 LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &io_gh);
599 switch (error) {
600 case 0:
601 break;
602 case GLR_TRYFAILED:
603 return 1;
604 default:
605 return error;
606 }
607
608 gfs2_glock_dq(&io_gh);
609 error = inode_dealloc(sdp, ul, &io_gh);
610 gfs2_holder_uninit(&io_gh);
611
612 return error;
613}
614
615static int inode_dealloc_uninit(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
616{
617 struct gfs2_rgrpd *rgd;
618 struct gfs2_holder ri_gh, rgd_gh;
619 int error;
620
621 error = gfs2_rindex_hold(sdp, &ri_gh);
622 if (error)
623 return error;
624
625 rgd = gfs2_blk2rgrpd(sdp, ul->ul_ut.ut_inum.no_addr);
626 if (!rgd) {
627 gfs2_consist(sdp);
628 error = -EIO;
629 goto out;
630 }
631
632 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rgd_gh);
633 if (error)
634 goto out;
635
636 error = gfs2_trans_begin(sdp,
637 RES_RG_BIT + RES_UNLINKED + RES_STATFS,
638 0);
639 if (error)
640 goto out_gunlock;
641
642 gfs2_free_uninit_di(rgd, ul->ul_ut.ut_inum.no_addr);
643 gfs2_unlinked_ondisk_rm(sdp, ul);
644
645 gfs2_trans_end(sdp);
646
647 out_gunlock:
648 gfs2_glock_dq_uninit(&rgd_gh);
649 out:
650 gfs2_glock_dq_uninit(&ri_gh);
651
652 return error;
653}
654
655int gfs2_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
656{
657 if (ul->ul_ut.ut_flags & GFS2_UTF_UNINIT)
658 return inode_dealloc_uninit(sdp, ul);
659 else
660 return try_inode_dealloc(sdp, ul);
661}
662
663/**
664 * gfs2_change_nlink - Change nlink count on inode
665 * @ip: The GFS2 inode
666 * @diff: The change in the nlink count required
667 *
668 * Returns: errno
669 */
670
671int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
672{
673 struct buffer_head *dibh;
674 uint32_t nlink;
675 int error;
676
677 nlink = ip->i_di.di_nlink + diff;
678
679 /* If we are reducing the nlink count, but the new value ends up being
680 bigger than the old one, we must have underflowed. */
681 if (diff < 0 && nlink > ip->i_di.di_nlink) {
682 if (gfs2_consist_inode(ip))
683 gfs2_dinode_print(&ip->i_di);
684 return -EIO;
685 }
686
687 error = gfs2_meta_inode_buffer(ip, &dibh);
688 if (error)
689 return error;
690
691 ip->i_di.di_nlink = nlink;
692 ip->i_di.di_ctime = get_seconds();
693
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000694 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000695 gfs2_dinode_out(&ip->i_di, dibh->b_data);
696 brelse(dibh);
697
698 return 0;
699}
700
701/**
702 * gfs2_lookupi - Look up a filename in a directory and return its inode
703 * @d_gh: An initialized holder for the directory glock
704 * @name: The name of the inode to look for
705 * @is_root: If 1, ignore the caller's permissions
706 * @i_gh: An uninitialized holder for the new inode glock
707 *
708 * There will always be a vnode (Linux VFS inode) for the d_gh inode unless
709 * @is_root is true.
710 *
711 * Returns: errno
712 */
713
Steven Whitehouse7359a192006-02-13 12:27:43 +0000714int gfs2_lookupi(struct inode *dir, struct qstr *name, int is_root,
715 struct inode **inodep)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000716{
Steven Whitehouse7359a192006-02-13 12:27:43 +0000717 struct gfs2_inode *ipp;
718 struct gfs2_inode *dip = get_v2ip(dir);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000719 struct gfs2_sbd *sdp = dip->i_sbd;
720 struct gfs2_holder d_gh;
721 struct gfs2_inum inum;
722 unsigned int type;
723 struct gfs2_glock *gl;
Steven Whitehouse7359a192006-02-13 12:27:43 +0000724 int error = 0;
725
726 *inodep = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000727
728 if (!name->len || name->len > GFS2_FNAMESIZE)
729 return -ENAMETOOLONG;
730
731 if (gfs2_filecmp(name, ".", 1) ||
Steven Whitehouse7359a192006-02-13 12:27:43 +0000732 (gfs2_filecmp(name, "..", 2) && dir == sdp->sd_root_dir)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000733 gfs2_inode_hold(dip);
Steven Whitehouse7359a192006-02-13 12:27:43 +0000734 ipp = dip;
735 goto done;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000736 }
737
738 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
739 if (error)
740 return error;
741
742 if (!is_root) {
743 error = gfs2_repermission(dip->i_vnode, MAY_EXEC, NULL);
744 if (error)
745 goto out;
746 }
747
748 error = gfs2_dir_search(dip, name, &inum, &type);
749 if (error)
750 goto out;
751
752 error = gfs2_glock_get(sdp, inum.no_addr, &gfs2_inode_glops,
753 CREATE, &gl);
754 if (error)
755 goto out;
756
Steven Whitehouse7359a192006-02-13 12:27:43 +0000757 error = gfs2_inode_get(gl, &inum, CREATE, &ipp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000758 if (!error)
Steven Whitehouse7359a192006-02-13 12:27:43 +0000759 gfs2_inode_min_init(ipp, type);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000760
761 gfs2_glock_put(gl);
762
Steven Whitehouse7359a192006-02-13 12:27:43 +0000763out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000764 gfs2_glock_dq_uninit(&d_gh);
Steven Whitehouse7359a192006-02-13 12:27:43 +0000765done:
766 if (error == 0) {
767 *inodep = gfs2_ip2v(ipp);
768 if (!*inodep)
769 error = -ENOMEM;
770 gfs2_inode_put(ipp);
771 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000772 return error;
773}
774
775static int pick_formal_ino_1(struct gfs2_sbd *sdp, uint64_t *formal_ino)
776{
Steven Whitehousef42faf42006-01-30 18:34:10 +0000777 struct gfs2_inode *ip = get_v2ip(sdp->sd_ir_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000778 struct buffer_head *bh;
779 struct gfs2_inum_range ir;
780 int error;
781
782 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
783 if (error)
784 return error;
785 down(&sdp->sd_inum_mutex);
786
787 error = gfs2_meta_inode_buffer(ip, &bh);
788 if (error) {
789 up(&sdp->sd_inum_mutex);
790 gfs2_trans_end(sdp);
791 return error;
792 }
793
794 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
795
796 if (ir.ir_length) {
797 *formal_ino = ir.ir_start++;
798 ir.ir_length--;
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000799 gfs2_trans_add_bh(ip->i_gl, bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000800 gfs2_inum_range_out(&ir,
801 bh->b_data + sizeof(struct gfs2_dinode));
802 brelse(bh);
803 up(&sdp->sd_inum_mutex);
804 gfs2_trans_end(sdp);
805 return 0;
806 }
807
808 brelse(bh);
809
810 up(&sdp->sd_inum_mutex);
811 gfs2_trans_end(sdp);
812
813 return 1;
814}
815
816static int pick_formal_ino_2(struct gfs2_sbd *sdp, uint64_t *formal_ino)
817{
Steven Whitehousef42faf42006-01-30 18:34:10 +0000818 struct gfs2_inode *ip = get_v2ip(sdp->sd_ir_inode);
819 struct gfs2_inode *m_ip = get_v2ip(sdp->sd_inum_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000820 struct gfs2_holder gh;
821 struct buffer_head *bh;
822 struct gfs2_inum_range ir;
823 int error;
824
825 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
826 if (error)
827 return error;
828
829 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
830 if (error)
831 goto out;
832 down(&sdp->sd_inum_mutex);
833
834 error = gfs2_meta_inode_buffer(ip, &bh);
835 if (error)
836 goto out_end_trans;
837
838 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
839
840 if (!ir.ir_length) {
841 struct buffer_head *m_bh;
842 uint64_t x, y;
843
844 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
845 if (error)
846 goto out_brelse;
847
848 x = *(uint64_t *)(m_bh->b_data + sizeof(struct gfs2_dinode));
849 x = y = be64_to_cpu(x);
850 ir.ir_start = x;
851 ir.ir_length = GFS2_INUM_QUANTUM;
852 x += GFS2_INUM_QUANTUM;
853 if (x < y)
854 gfs2_consist_inode(m_ip);
855 x = cpu_to_be64(x);
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000856 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000857 *(uint64_t *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x;
858
859 brelse(m_bh);
860 }
861
862 *formal_ino = ir.ir_start++;
863 ir.ir_length--;
864
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000865 gfs2_trans_add_bh(ip->i_gl, bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000866 gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
867
868 out_brelse:
869 brelse(bh);
870
871 out_end_trans:
872 up(&sdp->sd_inum_mutex);
873 gfs2_trans_end(sdp);
874
875 out:
876 gfs2_glock_dq_uninit(&gh);
877
878 return error;
879}
880
881static int pick_formal_ino(struct gfs2_sbd *sdp, uint64_t *inum)
882{
883 int error;
884
885 error = pick_formal_ino_1(sdp, inum);
886 if (error <= 0)
887 return error;
888
889 error = pick_formal_ino_2(sdp, inum);
890
891 return error;
892}
893
894/**
895 * create_ok - OK to create a new on-disk inode here?
896 * @dip: Directory in which dinode is to be created
897 * @name: Name of new dinode
898 * @mode:
899 *
900 * Returns: errno
901 */
902
903static int create_ok(struct gfs2_inode *dip, struct qstr *name,
904 unsigned int mode)
905{
906 int error;
907
908 error = gfs2_repermission(dip->i_vnode, MAY_WRITE | MAY_EXEC, NULL);
909 if (error)
910 return error;
911
912 /* Don't create entries in an unlinked directory */
913 if (!dip->i_di.di_nlink)
914 return -EPERM;
915
916 error = gfs2_dir_search(dip, name, NULL, NULL);
917 switch (error) {
918 case -ENOENT:
919 error = 0;
920 break;
921 case 0:
922 return -EEXIST;
923 default:
924 return error;
925 }
926
927 if (dip->i_di.di_entries == (uint32_t)-1)
928 return -EFBIG;
929 if (S_ISDIR(mode) && dip->i_di.di_nlink == (uint32_t)-1)
930 return -EMLINK;
931
932 return 0;
933}
934
935static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
936 unsigned int *uid, unsigned int *gid)
937{
938 if (dip->i_sbd->sd_args.ar_suiddir &&
939 (dip->i_di.di_mode & S_ISUID) &&
940 dip->i_di.di_uid) {
941 if (S_ISDIR(*mode))
942 *mode |= S_ISUID;
943 else if (dip->i_di.di_uid != current->fsuid)
944 *mode &= ~07111;
945 *uid = dip->i_di.di_uid;
946 } else
947 *uid = current->fsuid;
948
949 if (dip->i_di.di_mode & S_ISGID) {
950 if (S_ISDIR(*mode))
951 *mode |= S_ISGID;
952 *gid = dip->i_di.di_gid;
953 } else
954 *gid = current->fsgid;
955}
956
957static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_unlinked *ul)
958{
959 struct gfs2_sbd *sdp = dip->i_sbd;
960 int error;
961
962 gfs2_alloc_get(dip);
963
964 dip->i_alloc.al_requested = RES_DINODE;
965 error = gfs2_inplace_reserve(dip);
966 if (error)
967 goto out;
968
969 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_UNLINKED +
970 RES_STATFS, 0);
971 if (error)
972 goto out_ipreserv;
973
974 ul->ul_ut.ut_inum.no_addr = gfs2_alloc_di(dip);
975
976 ul->ul_ut.ut_flags = GFS2_UTF_UNINIT;
977 error = gfs2_unlinked_ondisk_add(sdp, ul);
978
979 gfs2_trans_end(sdp);
980
981 out_ipreserv:
982 gfs2_inplace_release(dip);
983
984 out:
985 gfs2_alloc_put(dip);
986
987 return error;
988}
989
990/**
991 * init_dinode - Fill in a new dinode structure
992 * @dip: the directory this inode is being created in
993 * @gl: The glock covering the new inode
994 * @inum: the inode number
995 * @mode: the file permissions
996 * @uid:
997 * @gid:
998 *
999 */
1000
1001static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
1002 struct gfs2_inum *inum, unsigned int mode,
1003 unsigned int uid, unsigned int gid)
1004{
1005 struct gfs2_sbd *sdp = dip->i_sbd;
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001006 struct gfs2_dinode *di;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001007 struct buffer_head *dibh;
1008
1009 dibh = gfs2_meta_new(gl, inum->no_addr);
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +00001010 gfs2_trans_add_bh(gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001011 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
1012 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001013 di = (struct gfs2_dinode *)dibh->b_data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001014
Steven Whitehouse2442a092006-01-30 11:49:32 +00001015 di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
1016 di->di_num.no_addr = cpu_to_be64(inum->no_addr);
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001017 di->di_mode = cpu_to_be32(mode);
1018 di->di_uid = cpu_to_be32(uid);
1019 di->di_gid = cpu_to_be32(gid);
1020 di->di_nlink = cpu_to_be32(0);
1021 di->di_size = cpu_to_be64(0);
1022 di->di_blocks = cpu_to_be64(1);
1023 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
1024 di->di_major = di->di_minor = cpu_to_be32(0);
1025 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
1026 di->__pad[0] = di->__pad[1] = 0;
1027 di->di_flags = cpu_to_be32(0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001028
1029 if (S_ISREG(mode)) {
1030 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
1031 gfs2_tune_get(sdp, gt_new_files_jdata))
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001032 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001033 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
1034 gfs2_tune_get(sdp, gt_new_files_directio))
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001035 di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001036 } else if (S_ISDIR(mode)) {
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001037 di->di_flags |= cpu_to_be32(dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO);
1038 di->di_flags |= cpu_to_be32(dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001039 }
1040
Steven Whitehouseb96ca4f2006-01-18 10:57:10 +00001041 di->__pad1 = 0;
1042 di->di_height = cpu_to_be32(0);
1043 di->__pad2 = 0;
1044 di->__pad3 = 0;
1045 di->di_depth = cpu_to_be16(0);
1046 di->di_entries = cpu_to_be32(0);
1047 memset(&di->__pad4, 0, sizeof(di->__pad4));
1048 di->di_eattr = cpu_to_be64(0);
1049 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
1050
David Teiglandb3b94fa2006-01-16 16:50:04 +00001051 brelse(dibh);
1052}
1053
1054static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
1055 unsigned int mode, struct gfs2_unlinked *ul)
1056{
1057 struct gfs2_sbd *sdp = dip->i_sbd;
1058 unsigned int uid, gid;
1059 int error;
1060
1061 munge_mode_uid_gid(dip, &mode, &uid, &gid);
1062
1063 gfs2_alloc_get(dip);
1064
1065 error = gfs2_quota_lock(dip, uid, gid);
1066 if (error)
1067 goto out;
1068
1069 error = gfs2_quota_check(dip, uid, gid);
1070 if (error)
1071 goto out_quota;
1072
1073 error = gfs2_trans_begin(sdp, RES_DINODE + RES_UNLINKED +
1074 RES_QUOTA, 0);
1075 if (error)
1076 goto out_quota;
1077
1078 ul->ul_ut.ut_flags = 0;
1079 error = gfs2_unlinked_ondisk_munge(sdp, ul);
1080
1081 init_dinode(dip, gl, &ul->ul_ut.ut_inum,
1082 mode, uid, gid);
1083
1084 gfs2_quota_change(dip, +1, uid, gid);
1085
1086 gfs2_trans_end(sdp);
1087
1088 out_quota:
1089 gfs2_quota_unlock(dip);
1090
1091 out:
1092 gfs2_alloc_put(dip);
1093
1094 return error;
1095}
1096
1097static int link_dinode(struct gfs2_inode *dip, struct qstr *name,
1098 struct gfs2_inode *ip, struct gfs2_unlinked *ul)
1099{
1100 struct gfs2_sbd *sdp = dip->i_sbd;
1101 struct gfs2_alloc *al;
1102 int alloc_required;
1103 struct buffer_head *dibh;
1104 int error;
1105
1106 al = gfs2_alloc_get(dip);
1107
1108 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1109 if (error)
1110 goto fail;
1111
1112 error = gfs2_diradd_alloc_required(dip, name, &alloc_required);
1113 if (alloc_required) {
1114 error = gfs2_quota_check(dip, dip->i_di.di_uid,
1115 dip->i_di.di_gid);
1116 if (error)
1117 goto fail_quota_locks;
1118
1119 al->al_requested = sdp->sd_max_dirres;
1120
1121 error = gfs2_inplace_reserve(dip);
1122 if (error)
1123 goto fail_quota_locks;
1124
1125 error = gfs2_trans_begin(sdp,
1126 sdp->sd_max_dirres +
1127 al->al_rgd->rd_ri.ri_length +
1128 2 * RES_DINODE + RES_UNLINKED +
1129 RES_STATFS + RES_QUOTA, 0);
1130 if (error)
1131 goto fail_ipreserv;
1132 } else {
1133 error = gfs2_trans_begin(sdp,
1134 RES_LEAF +
1135 2 * RES_DINODE +
1136 RES_UNLINKED, 0);
1137 if (error)
1138 goto fail_quota_locks;
1139 }
1140
1141 error = gfs2_dir_add(dip, name, &ip->i_num, IF2DT(ip->i_di.di_mode));
1142 if (error)
1143 goto fail_end_trans;
1144
1145 error = gfs2_meta_inode_buffer(ip, &dibh);
1146 if (error)
1147 goto fail_end_trans;
1148 ip->i_di.di_nlink = 1;
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +00001149 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001150 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1151 brelse(dibh);
1152
1153 error = gfs2_unlinked_ondisk_rm(sdp, ul);
1154 if (error)
1155 goto fail_end_trans;
1156
1157 return 0;
1158
1159 fail_end_trans:
1160 gfs2_trans_end(sdp);
1161
1162 fail_ipreserv:
1163 if (dip->i_alloc.al_rgd)
1164 gfs2_inplace_release(dip);
1165
1166 fail_quota_locks:
1167 gfs2_quota_unlock(dip);
1168
1169 fail:
1170 gfs2_alloc_put(dip);
1171
1172 return error;
1173}
1174
1175/**
1176 * gfs2_createi - Create a new inode
1177 * @ghs: An array of two holders
1178 * @name: The name of the new file
1179 * @mode: the permissions on the new inode
1180 *
1181 * @ghs[0] is an initialized holder for the directory
1182 * @ghs[1] is the holder for the inode lock
1183 *
Steven Whitehouse7359a192006-02-13 12:27:43 +00001184 * If the return value is not NULL, the glocks on both the directory and the new
David Teiglandb3b94fa2006-01-16 16:50:04 +00001185 * file are held. A transaction has been started and an inplace reservation
1186 * is held, as well.
1187 *
Steven Whitehouse7359a192006-02-13 12:27:43 +00001188 * Returns: An inode
David Teiglandb3b94fa2006-01-16 16:50:04 +00001189 */
1190
Steven Whitehouse7359a192006-02-13 12:27:43 +00001191struct inode *gfs2_createi(struct gfs2_holder *ghs, struct qstr *name, unsigned int mode)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001192{
Steven Whitehouse7359a192006-02-13 12:27:43 +00001193 struct inode *inode;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001194 struct gfs2_inode *dip = get_gl2ip(ghs->gh_gl);
1195 struct gfs2_sbd *sdp = dip->i_sbd;
1196 struct gfs2_unlinked *ul;
1197 struct gfs2_inode *ip;
1198 int error;
1199
1200 if (!name->len || name->len > GFS2_FNAMESIZE)
Steven Whitehouse7359a192006-02-13 12:27:43 +00001201 return ERR_PTR(-ENAMETOOLONG);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001202
1203 error = gfs2_unlinked_get(sdp, &ul);
1204 if (error)
Steven Whitehouse7359a192006-02-13 12:27:43 +00001205 return ERR_PTR(error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001206
1207 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
1208 error = gfs2_glock_nq(ghs);
1209 if (error)
1210 goto fail;
1211
1212 error = create_ok(dip, name, mode);
1213 if (error)
1214 goto fail_gunlock;
1215
1216 error = pick_formal_ino(sdp, &ul->ul_ut.ut_inum.no_formal_ino);
1217 if (error)
1218 goto fail_gunlock;
1219
1220 error = alloc_dinode(dip, ul);
1221 if (error)
1222 goto fail_gunlock;
1223
1224 if (ul->ul_ut.ut_inum.no_addr < dip->i_num.no_addr) {
1225 gfs2_glock_dq(ghs);
1226
1227 error = gfs2_glock_nq_num(sdp,
1228 ul->ul_ut.ut_inum.no_addr,
1229 &gfs2_inode_glops,
1230 LM_ST_EXCLUSIVE, GL_SKIP,
1231 ghs + 1);
1232 if (error) {
1233 gfs2_unlinked_put(sdp, ul);
Steven Whitehouse7359a192006-02-13 12:27:43 +00001234 return ERR_PTR(error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001235 }
1236
1237 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
1238 error = gfs2_glock_nq(ghs);
1239 if (error) {
1240 gfs2_glock_dq_uninit(ghs + 1);
1241 gfs2_unlinked_put(sdp, ul);
Steven Whitehouse7359a192006-02-13 12:27:43 +00001242 return ERR_PTR(error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001243 }
1244
1245 error = create_ok(dip, name, mode);
1246 if (error)
1247 goto fail_gunlock2;
1248 } else {
1249 error = gfs2_glock_nq_num(sdp,
1250 ul->ul_ut.ut_inum.no_addr,
1251 &gfs2_inode_glops,
1252 LM_ST_EXCLUSIVE, GL_SKIP,
1253 ghs + 1);
1254 if (error)
1255 goto fail_gunlock;
1256 }
1257
1258 error = make_dinode(dip, ghs[1].gh_gl, mode, ul);
1259 if (error)
1260 goto fail_gunlock2;
1261
1262 error = gfs2_inode_get(ghs[1].gh_gl, &ul->ul_ut.ut_inum, CREATE, &ip);
1263 if (error)
1264 goto fail_gunlock2;
1265
1266 error = gfs2_inode_refresh(ip);
1267 if (error)
1268 goto fail_iput;
1269
1270 error = gfs2_acl_create(dip, ip);
1271 if (error)
1272 goto fail_iput;
1273
1274 error = link_dinode(dip, name, ip, ul);
1275 if (error)
1276 goto fail_iput;
1277
1278 gfs2_unlinked_put(sdp, ul);
1279
Steven Whitehouse7359a192006-02-13 12:27:43 +00001280 inode = gfs2_ip2v(ip);
1281 gfs2_inode_put(ip);
1282 if (!inode)
1283 return ERR_PTR(-ENOMEM);
1284 return inode;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001285
1286 fail_iput:
1287 gfs2_inode_put(ip);
1288
1289 fail_gunlock2:
1290 gfs2_glock_dq_uninit(ghs + 1);
1291
1292 fail_gunlock:
1293 gfs2_glock_dq(ghs);
1294
1295 fail:
1296 gfs2_unlinked_put(sdp, ul);
1297
Steven Whitehouse7359a192006-02-13 12:27:43 +00001298 return ERR_PTR(error);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001299}
1300
1301/**
1302 * gfs2_unlinki - Unlink a file
1303 * @dip: The inode of the directory
1304 * @name: The name of the file to be unlinked
1305 * @ip: The inode of the file to be removed
1306 *
1307 * Assumes Glocks on both dip and ip are held.
1308 *
1309 * Returns: errno
1310 */
1311
1312int gfs2_unlinki(struct gfs2_inode *dip, struct qstr *name,
1313 struct gfs2_inode *ip, struct gfs2_unlinked *ul)
1314{
1315 struct gfs2_sbd *sdp = dip->i_sbd;
1316 int error;
1317
1318 error = gfs2_dir_del(dip, name);
1319 if (error)
1320 return error;
1321
1322 error = gfs2_change_nlink(ip, -1);
1323 if (error)
1324 return error;
1325
1326 /* If this inode is being unlinked from the directory structure,
1327 we need to mark that in the log so that it isn't lost during
1328 a crash. */
1329
1330 if (!ip->i_di.di_nlink) {
1331 ul->ul_ut.ut_inum = ip->i_num;
1332 error = gfs2_unlinked_ondisk_add(sdp, ul);
1333 if (!error)
1334 set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
1335 }
1336
1337 return error;
1338}
1339
1340/**
1341 * gfs2_rmdiri - Remove a directory
1342 * @dip: The parent directory of the directory to be removed
1343 * @name: The name of the directory to be removed
1344 * @ip: The GFS2 inode of the directory to be removed
1345 *
1346 * Assumes Glocks on dip and ip are held
1347 *
1348 * Returns: errno
1349 */
1350
1351int gfs2_rmdiri(struct gfs2_inode *dip, struct qstr *name,
1352 struct gfs2_inode *ip, struct gfs2_unlinked *ul)
1353{
1354 struct gfs2_sbd *sdp = dip->i_sbd;
1355 struct qstr dotname;
1356 int error;
1357
1358 if (ip->i_di.di_entries != 2) {
1359 if (gfs2_consist_inode(ip))
1360 gfs2_dinode_print(&ip->i_di);
1361 return -EIO;
1362 }
1363
1364 error = gfs2_dir_del(dip, name);
1365 if (error)
1366 return error;
1367
1368 error = gfs2_change_nlink(dip, -1);
1369 if (error)
1370 return error;
1371
1372 dotname.len = 1;
1373 dotname.name = ".";
1374 error = gfs2_dir_del(ip, &dotname);
1375 if (error)
1376 return error;
1377
1378 dotname.len = 2;
1379 dotname.name = "..";
1380 error = gfs2_dir_del(ip, &dotname);
1381 if (error)
1382 return error;
1383
1384 error = gfs2_change_nlink(ip, -2);
1385 if (error)
1386 return error;
1387
1388 /* This inode is being unlinked from the directory structure and
1389 we need to mark that in the log so that it isn't lost during
1390 a crash. */
1391
1392 ul->ul_ut.ut_inum = ip->i_num;
1393 error = gfs2_unlinked_ondisk_add(sdp, ul);
1394 if (!error)
1395 set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
1396
1397 return error;
1398}
1399
1400/*
1401 * gfs2_unlink_ok - check to see that a inode is still in a directory
1402 * @dip: the directory
1403 * @name: the name of the file
1404 * @ip: the inode
1405 *
1406 * Assumes that the lock on (at least) @dip is held.
1407 *
1408 * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1409 */
1410
1411int gfs2_unlink_ok(struct gfs2_inode *dip, struct qstr *name,
1412 struct gfs2_inode *ip)
1413{
1414 struct gfs2_inum inum;
1415 unsigned int type;
1416 int error;
1417
1418 if (IS_IMMUTABLE(ip->i_vnode) || IS_APPEND(ip->i_vnode))
1419 return -EPERM;
1420
1421 if ((dip->i_di.di_mode & S_ISVTX) &&
1422 dip->i_di.di_uid != current->fsuid &&
1423 ip->i_di.di_uid != current->fsuid &&
1424 !capable(CAP_FOWNER))
1425 return -EPERM;
1426
1427 if (IS_APPEND(dip->i_vnode))
1428 return -EPERM;
1429
1430 error = gfs2_repermission(dip->i_vnode, MAY_WRITE | MAY_EXEC, NULL);
1431 if (error)
1432 return error;
1433
1434 error = gfs2_dir_search(dip, name, &inum, &type);
1435 if (error)
1436 return error;
1437
1438 if (!gfs2_inum_equal(&inum, &ip->i_num))
1439 return -ENOENT;
1440
1441 if (IF2DT(ip->i_di.di_mode) != type) {
1442 gfs2_consist_inode(dip);
1443 return -EIO;
1444 }
1445
1446 return 0;
1447}
1448
1449/*
1450 * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1451 * @this: move this
1452 * @to: to here
1453 *
1454 * Follow @to back to the root and make sure we don't encounter @this
1455 * Assumes we already hold the rename lock.
1456 *
1457 * Returns: errno
1458 */
1459
1460int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1461{
1462 struct gfs2_sbd *sdp = this->i_sbd;
Steven Whitehouse7359a192006-02-13 12:27:43 +00001463 struct inode *dir = to->i_vnode;
1464 struct inode *tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001465 struct qstr dotdot;
1466 int error = 0;
1467
1468 memset(&dotdot, 0, sizeof(struct qstr));
1469 dotdot.name = "..";
1470 dotdot.len = 2;
1471
Steven Whitehouse7359a192006-02-13 12:27:43 +00001472 igrab(dir);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001473
1474 for (;;) {
Steven Whitehouse7359a192006-02-13 12:27:43 +00001475 if (dir == this->i_vnode) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001476 error = -EINVAL;
1477 break;
1478 }
Steven Whitehouse7359a192006-02-13 12:27:43 +00001479 if (dir == sdp->sd_root_dir) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001480 error = 0;
1481 break;
1482 }
1483
Steven Whitehouse7359a192006-02-13 12:27:43 +00001484 error = gfs2_lookupi(dir, &dotdot, 1, &tmp);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001485 if (error)
1486 break;
1487
Steven Whitehouse7359a192006-02-13 12:27:43 +00001488 iput(dir);
1489 dir = tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001490 }
1491
Steven Whitehouse7359a192006-02-13 12:27:43 +00001492 iput(dir);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001493
1494 return error;
1495}
1496
1497/**
1498 * gfs2_readlinki - return the contents of a symlink
1499 * @ip: the symlink's inode
1500 * @buf: a pointer to the buffer to be filled
1501 * @len: a pointer to the length of @buf
1502 *
1503 * If @buf is too small, a piece of memory is kmalloc()ed and needs
1504 * to be freed by the caller.
1505 *
1506 * Returns: errno
1507 */
1508
1509int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1510{
1511 struct gfs2_holder i_gh;
1512 struct buffer_head *dibh;
1513 unsigned int x;
1514 int error;
1515
1516 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
1517 error = gfs2_glock_nq_atime(&i_gh);
1518 if (error) {
1519 gfs2_holder_uninit(&i_gh);
1520 return error;
1521 }
1522
1523 if (!ip->i_di.di_size) {
1524 gfs2_consist_inode(ip);
1525 error = -EIO;
1526 goto out;
1527 }
1528
1529 error = gfs2_meta_inode_buffer(ip, &dibh);
1530 if (error)
1531 goto out;
1532
1533 x = ip->i_di.di_size + 1;
1534 if (x > *len) {
1535 *buf = kmalloc(x, GFP_KERNEL);
1536 if (!*buf) {
1537 error = -ENOMEM;
1538 goto out_brelse;
1539 }
1540 }
1541
1542 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1543 *len = x;
1544
1545 out_brelse:
1546 brelse(dibh);
1547
1548 out:
1549 gfs2_glock_dq_uninit(&i_gh);
1550
1551 return error;
1552}
1553
1554/**
1555 * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1556 * conditionally update the inode's atime
1557 * @gh: the holder to acquire
1558 *
1559 * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1560 * Update if the difference between the current time and the inode's current
1561 * atime is greater than an interval specified at mount.
1562 *
1563 * Returns: errno
1564 */
1565
1566int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1567{
1568 struct gfs2_glock *gl = gh->gh_gl;
1569 struct gfs2_sbd *sdp = gl->gl_sbd;
1570 struct gfs2_inode *ip = get_gl2ip(gl);
1571 int64_t curtime, quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1572 unsigned int state;
1573 int flags;
1574 int error;
1575
1576 if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1577 gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1578 gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1579 return -EINVAL;
1580
1581 state = gh->gh_state;
1582 flags = gh->gh_flags;
1583
1584 error = gfs2_glock_nq(gh);
1585 if (error)
1586 return error;
1587
1588 if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1589 (sdp->sd_vfs->s_flags & MS_RDONLY))
1590 return 0;
1591
1592 curtime = get_seconds();
1593 if (curtime - ip->i_di.di_atime >= quantum) {
1594 gfs2_glock_dq(gh);
1595 gfs2_holder_reinit(LM_ST_EXCLUSIVE,
1596 gh->gh_flags & ~LM_FLAG_ANY,
1597 gh);
1598 error = gfs2_glock_nq(gh);
1599 if (error)
1600 return error;
1601
1602 /* Verify that atime hasn't been updated while we were
1603 trying to get exclusive lock. */
1604
1605 curtime = get_seconds();
1606 if (curtime - ip->i_di.di_atime >= quantum) {
1607 struct buffer_head *dibh;
1608
1609 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1610 if (error == -EROFS)
1611 return 0;
1612 if (error)
1613 goto fail;
1614
1615 error = gfs2_meta_inode_buffer(ip, &dibh);
1616 if (error)
1617 goto fail_end_trans;
1618
1619 ip->i_di.di_atime = curtime;
1620
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +00001621 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001622 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1623 brelse(dibh);
1624
1625 gfs2_trans_end(sdp);
1626 }
1627
1628 /* If someone else has asked for the glock,
1629 unlock and let them have it. Then reacquire
1630 in the original state. */
1631 if (gfs2_glock_is_blocking(gl)) {
1632 gfs2_glock_dq(gh);
1633 gfs2_holder_reinit(state, flags, gh);
1634 return gfs2_glock_nq(gh);
1635 }
1636 }
1637
1638 return 0;
1639
1640 fail_end_trans:
1641 gfs2_trans_end(sdp);
1642
1643 fail:
1644 gfs2_glock_dq(gh);
1645
1646 return error;
1647}
1648
1649/**
1650 * glock_compare_atime - Compare two struct gfs2_glock structures for sort
1651 * @arg_a: the first structure
1652 * @arg_b: the second structure
1653 *
1654 * Returns: 1 if A > B
1655 * -1 if A < B
1656 * 0 if A = B
1657 */
1658
1659static int glock_compare_atime(const void *arg_a, const void *arg_b)
1660{
1661 struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1662 struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1663 struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1664 struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1665 int ret = 0;
1666
1667 if (a->ln_number > b->ln_number)
1668 ret = 1;
1669 else if (a->ln_number < b->ln_number)
1670 ret = -1;
1671 else {
1672 if (gh_a->gh_state == LM_ST_SHARED &&
1673 gh_b->gh_state == LM_ST_EXCLUSIVE)
1674 ret = 1;
1675 else if (gh_a->gh_state == LM_ST_SHARED &&
1676 (gh_b->gh_flags & GL_ATIME))
1677 ret = 1;
1678 }
1679
1680 return ret;
1681}
1682
1683/**
1684 * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
1685 * atime update
1686 * @num_gh: the number of structures
1687 * @ghs: an array of struct gfs2_holder structures
1688 *
1689 * Returns: 0 on success (all glocks acquired),
1690 * errno on failure (no glocks acquired)
1691 */
1692
1693int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
1694{
1695 struct gfs2_holder **p;
1696 unsigned int x;
1697 int error = 0;
1698
1699 if (!num_gh)
1700 return 0;
1701
1702 if (num_gh == 1) {
1703 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1704 if (ghs->gh_flags & GL_ATIME)
1705 error = gfs2_glock_nq_atime(ghs);
1706 else
1707 error = gfs2_glock_nq(ghs);
1708 return error;
1709 }
1710
1711 p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1712 if (!p)
1713 return -ENOMEM;
1714
1715 for (x = 0; x < num_gh; x++)
1716 p[x] = &ghs[x];
1717
1718 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
1719
1720 for (x = 0; x < num_gh; x++) {
1721 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1722
1723 if (p[x]->gh_flags & GL_ATIME)
1724 error = gfs2_glock_nq_atime(p[x]);
1725 else
1726 error = gfs2_glock_nq(p[x]);
1727
1728 if (error) {
1729 while (x--)
1730 gfs2_glock_dq(p[x]);
1731 break;
1732 }
1733 }
1734
1735 kfree(p);
1736
1737 return error;
1738}
1739
1740/**
1741 * gfs2_try_toss_vnode - See if we can toss a vnode from memory
1742 * @ip: the inode
1743 *
1744 * Returns: 1 if the vnode was tossed
1745 */
1746
1747void gfs2_try_toss_vnode(struct gfs2_inode *ip)
1748{
1749 struct inode *inode;
1750
1751 inode = gfs2_ip2v_lookup(ip);
1752 if (!inode)
1753 return;
1754
1755 d_prune_aliases(inode);
1756
1757 if (S_ISDIR(ip->i_di.di_mode)) {
1758 struct list_head *head = &inode->i_dentry;
1759 struct dentry *d = NULL;
1760
1761 spin_lock(&dcache_lock);
1762 if (list_empty(head))
1763 spin_unlock(&dcache_lock);
1764 else {
1765 d = list_entry(head->next, struct dentry, d_alias);
1766 dget_locked(d);
1767 spin_unlock(&dcache_lock);
1768
1769 if (have_submounts(d))
1770 dput(d);
1771 else {
1772 shrink_dcache_parent(d);
1773 dput(d);
1774 d_prune_aliases(inode);
1775 }
1776 }
1777 }
1778
1779 inode->i_nlink = 0;
1780 iput(inode);
1781}
1782
1783
1784static int
1785__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1786{
1787 struct buffer_head *dibh;
1788 int error;
1789
1790 error = gfs2_meta_inode_buffer(ip, &dibh);
1791 if (!error) {
1792 error = inode_setattr(ip->i_vnode, attr);
1793 gfs2_assert_warn(ip->i_sbd, !error);
1794 gfs2_inode_attr_out(ip);
1795
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +00001796 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001797 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1798 brelse(dibh);
1799 }
1800 return error;
1801}
1802
1803/**
1804 * gfs2_setattr_simple -
1805 * @ip:
1806 * @attr:
1807 *
1808 * Called with a reference on the vnode.
1809 *
1810 * Returns: errno
1811 */
1812
1813int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1814{
1815 int error;
1816
1817 if (get_transaction)
1818 return __gfs2_setattr_simple(ip, attr);
1819
1820 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE, 0);
1821 if (error)
1822 return error;
1823
1824 error = __gfs2_setattr_simple(ip, attr);
1825
1826 gfs2_trans_end(ip->i_sbd);
1827
1828 return error;
1829}
1830
1831int gfs2_repermission(struct inode *inode, int mask, struct nameidata *nd)
1832{
1833 return permission(inode, mask, nd);
1834}
1835