blob: d65a89030c91dd2d174254c82d1b000c2c155f25 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/ufs/inode.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * from
9 *
10 * linux/fs/ext2/inode.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * from
18 *
19 * linux/fs/minix/inode.c
20 *
21 * Copyright (C) 1991, 1992 Linus Torvalds
22 *
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
26 */
27
28#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <linux/errno.h>
31#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/time.h>
33#include <linux/stat.h>
34#include <linux/string.h>
35#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/buffer_head.h>
Christoph Hellwiga9185b42010-03-05 09:21:37 +010037#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Mike Frysingere5420592008-02-08 04:21:31 -080039#include "ufs_fs.h"
Christoph Hellwigbcd6d4e2007-10-16 23:26:51 -070040#include "ufs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include "swab.h"
42#include "util.h"
43
Al Viro4e3911f2015-06-04 14:13:14 -040044static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
46 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
47 int ptrs = uspi->s_apb;
48 int ptrs_bits = uspi->s_apbshift;
49 const long direct_blocks = UFS_NDADDR,
50 indirect_blocks = ptrs,
51 double_blocks = (1 << (ptrs_bits * 2));
52 int n = 0;
53
54
Evgeniy Dushistovabf5d152006-06-25 05:47:24 -070055 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
Roel Kluin37044c82009-06-17 16:26:28 -070056 if (i_block < direct_blocks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 offsets[n++] = i_block;
58 } else if ((i_block -= direct_blocks) < indirect_blocks) {
59 offsets[n++] = UFS_IND_BLOCK;
60 offsets[n++] = i_block;
61 } else if ((i_block -= indirect_blocks) < double_blocks) {
62 offsets[n++] = UFS_DIND_BLOCK;
63 offsets[n++] = i_block >> ptrs_bits;
64 offsets[n++] = i_block & (ptrs - 1);
65 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
66 offsets[n++] = UFS_TIND_BLOCK;
67 offsets[n++] = i_block >> (ptrs_bits * 2);
68 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
69 offsets[n++] = i_block & (ptrs - 1);
70 } else {
71 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
72 }
73 return n;
74}
75
Al Viro724bb092015-06-17 12:02:56 -040076typedef struct {
77 void *p;
78 union {
79 __fs32 key32;
80 __fs64 key64;
81 };
82 struct buffer_head *bh;
83} Indirect;
84
85static inline int grow_chain32(struct ufs_inode_info *ufsi,
86 struct buffer_head *bh, __fs32 *v,
87 Indirect *from, Indirect *to)
88{
89 Indirect *p;
90 unsigned seq;
91 to->bh = bh;
92 do {
93 seq = read_seqbegin(&ufsi->meta_lock);
94 to->key32 = *(__fs32 *)(to->p = v);
95 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
96 ;
97 } while (read_seqretry(&ufsi->meta_lock, seq));
98 return (p > to);
99}
100
101static inline int grow_chain64(struct ufs_inode_info *ufsi,
102 struct buffer_head *bh, __fs64 *v,
103 Indirect *from, Indirect *to)
104{
105 Indirect *p;
106 unsigned seq;
107 to->bh = bh;
108 do {
109 seq = read_seqbegin(&ufsi->meta_lock);
110 to->key64 = *(__fs64 *)(to->p = v);
111 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
112 ;
113 } while (read_seqretry(&ufsi->meta_lock, seq));
114 return (p > to);
115}
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117/*
118 * Returns the location of the fragment from
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300119 * the beginning of the filesystem.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 */
121
Al Viro4b7068c2015-06-04 14:27:23 -0400122static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 struct ufs_inode_info *ufsi = UFS_I(inode);
125 struct super_block *sb = inode->i_sb;
126 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
127 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
128 int shift = uspi->s_apbshift-uspi->s_fpbshift;
Al Viro724bb092015-06-17 12:02:56 -0400129 Indirect chain[4], *q = chain;
Al Viro4b7068c2015-06-04 14:27:23 -0400130 unsigned *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 unsigned flags = UFS_SB(sb)->s_flags;
Al Viro724bb092015-06-17 12:02:56 -0400132 u64 res = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Andrew Morton7256d812006-06-29 02:24:29 -0700134 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
135 uspi->s_fpbshift, uspi->s_apbmask,
136 (unsigned long long)mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138 if (depth == 0)
Al Viro724bb092015-06-17 12:02:56 -0400139 goto no_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Al Viro724bb092015-06-17 12:02:56 -0400141again:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 p = offsets;
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
145 goto ufs2;
146
Al Viro724bb092015-06-17 12:02:56 -0400147 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
148 goto changed;
149 if (!q->key32)
150 goto no_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 while (--depth) {
Al Viro724bb092015-06-17 12:02:56 -0400152 __fs32 *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 struct buffer_head *bh;
Al Viro4e3911f2015-06-04 14:13:14 -0400154 unsigned n = *p++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Al Viro724bb092015-06-17 12:02:56 -0400156 bh = sb_bread(sb, uspi->s_sbbase +
157 fs32_to_cpu(sb, q->key32) + (n>>shift));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 if (!bh)
Al Viro724bb092015-06-17 12:02:56 -0400159 goto no_block;
160 ptr = (__fs32 *)bh->b_data + (n & mask);
161 if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
162 goto changed;
163 if (!q->key32)
164 goto no_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 }
Al Viro724bb092015-06-17 12:02:56 -0400166 res = fs32_to_cpu(sb, q->key32);
167 goto found;
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169ufs2:
Al Viro724bb092015-06-17 12:02:56 -0400170 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
171 goto changed;
172 if (!q->key64)
173 goto no_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175 while (--depth) {
Al Viro724bb092015-06-17 12:02:56 -0400176 __fs64 *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 struct buffer_head *bh;
Al Viro4e3911f2015-06-04 14:13:14 -0400178 unsigned n = *p++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Al Viro724bb092015-06-17 12:02:56 -0400180 bh = sb_bread(sb, uspi->s_sbbase +
181 fs64_to_cpu(sb, q->key64) + (n>>shift));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 if (!bh)
Al Viro724bb092015-06-17 12:02:56 -0400183 goto no_block;
184 ptr = (__fs64 *)bh->b_data + (n & mask);
185 if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
186 goto changed;
187 if (!q->key64)
188 goto no_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
Al Viro724bb092015-06-17 12:02:56 -0400190 res = fs64_to_cpu(sb, q->key64);
191found:
Al Viro4b7068c2015-06-04 14:27:23 -0400192 res += uspi->s_sbbase;
Al Viro724bb092015-06-17 12:02:56 -0400193no_block:
194 while (q > chain) {
195 brelse(q->bh);
196 q--;
197 }
198 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Al Viro724bb092015-06-17 12:02:56 -0400200changed:
201 while (q > chain) {
202 brelse(q->bh);
203 q--;
204 }
205 goto again;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700208/**
209 * ufs_inode_getfrag() - allocate new fragment(s)
Fabian Frederickedc023ca2014-08-08 14:21:08 -0700210 * @inode: pointer to inode
211 * @fragment: number of `fragment' which hold pointer
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700212 * to new allocated fragment(s)
Fabian Frederickedc023ca2014-08-08 14:21:08 -0700213 * @new_fragment: number of new allocated fragment(s)
214 * @required: how many fragment(s) we require
215 * @err: we set it if something wrong
216 * @phys: pointer to where we save physical number of new allocated fragments,
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700217 * NULL if we allocate not data(indirect blocks for example).
Fabian Frederickedc023ca2014-08-08 14:21:08 -0700218 * @new: we set it if we allocate new block
219 * @locked_page: for ufs_new_fragments()
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700220 */
221static struct buffer_head *
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800222ufs_inode_getfrag(struct inode *inode, u64 fragment,
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700223 sector_t new_fragment, unsigned int required, int *err,
224 long *phys, int *new, struct page *locked_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 struct ufs_inode_info *ufsi = UFS_I(inode);
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700227 struct super_block *sb = inode->i_sb;
228 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800229 unsigned blockoff, lastblockoff;
230 u64 tmp, goal, lastfrag, block, lastblock;
231 void *p, *p2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800233 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
234 "metadata %d\n", inode->i_ino, (unsigned long long)fragment,
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700235 (unsigned long long)new_fragment, required, !phys);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* TODO : to be done for write support
238 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
239 goto ufs2;
240 */
241
242 block = ufs_fragstoblks (fragment);
243 blockoff = ufs_fragnum (fragment);
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800244 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 goal = 0;
247
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800248 tmp = ufs_data_ptr_to_cpu(sb, p);
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 lastfrag = ufsi->i_lastfrag;
Al Virobbb3eb92015-06-19 00:10:00 -0400251 if (tmp && fragment < lastfrag)
252 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254 lastblock = ufs_fragstoblks (lastfrag);
255 lastblockoff = ufs_fragnum (lastfrag);
256 /*
257 * We will extend file into new block beyond last allocated block
258 */
259 if (lastblock < block) {
260 /*
261 * We must reallocate last allocated block
262 */
263 if (lastblockoff) {
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800264 p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock);
265 tmp = ufs_new_fragments(inode, p2, lastfrag,
266 ufs_data_ptr_to_cpu(sb, p2),
267 uspi->s_fpb - lastblockoff,
268 err, locked_page);
Al Viro5a39c252015-06-18 22:39:46 -0400269 if (!tmp)
270 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 lastfrag = ufsi->i_lastfrag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 }
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800273 tmp = ufs_data_ptr_to_cpu(sb,
274 ufs_get_direct_data_ptr(uspi, ufsi,
275 lastblock));
Evgeniy Dushistovc37336b2006-08-27 01:23:45 -0700276 if (tmp)
277 goal = tmp + uspi->s_fpb;
Al Viro010d3312015-06-17 12:44:14 -0400278 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
Evgeniy Dushistov6ef4d6b2006-06-25 05:47:20 -0700279 goal, required + blockoff,
Evgeniy Dushistova685e262007-01-29 13:19:54 -0800280 err,
281 phys != NULL ? locked_page : NULL);
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800282 } else if (lastblock == block) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 /*
284 * We will extend last allocated block
285 */
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800286 tmp = ufs_new_fragments(inode, p, fragment -
287 (blockoff - lastblockoff),
288 ufs_data_ptr_to_cpu(sb, p),
289 required + (blockoff - lastblockoff),
Evgeniy Dushistova685e262007-01-29 13:19:54 -0800290 err, phys != NULL ? locked_page : NULL);
Evgeniy Dushistovc37336b2006-08-27 01:23:45 -0700291 } else /* (lastblock > block) */ {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 /*
293 * We will allocate new block before last allocated block
294 */
Evgeniy Dushistovc37336b2006-08-27 01:23:45 -0700295 if (block) {
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800296 tmp = ufs_data_ptr_to_cpu(sb,
297 ufs_get_direct_data_ptr(uspi, ufsi, block - 1));
Evgeniy Dushistovc37336b2006-08-27 01:23:45 -0700298 if (tmp)
299 goal = tmp + uspi->s_fpb;
300 }
Evgeniy Dushistov6ef4d6b2006-06-25 05:47:20 -0700301 tmp = ufs_new_fragments(inode, p, fragment - blockoff,
Evgeniy Dushistova685e262007-01-29 13:19:54 -0800302 goal, uspi->s_fpb, err,
303 phys != NULL ? locked_page : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
305 if (!tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 *err = -ENOSPC;
307 return NULL;
308 }
309
Al Virobbb3eb92015-06-19 00:10:00 -0400310 if (phys) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 *err = 0;
312 *new = 1;
313 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 inode->i_ctime = CURRENT_TIME_SEC;
315 if (IS_SYNC(inode))
316 ufs_sync_inode (inode);
317 mark_inode_dirty(inode);
Al Virobbb3eb92015-06-19 00:10:00 -0400318out:
319 tmp += uspi->s_sbbase + blockoff;
320 if (!phys) {
321 return sb_getblk(sb, tmp);
322 } else {
323 *phys = tmp;
324 return NULL;
325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327 /* This part : To be implemented ....
328 Required only for writing, not required for READ-ONLY.
329ufs2:
330
331 u2_block = ufs_fragstoblks(fragment);
332 u2_blockoff = ufs_fragnum(fragment);
333 p = ufsi->i_u1.u2_i_data + block;
334 goal = 0;
335
336repeat2:
337 tmp = fs32_to_cpu(sb, *p);
338 lastfrag = ufsi->i_lastfrag;
339
340 */
341}
342
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700343/**
344 * ufs_inode_getblock() - allocate new block
Fabian Frederickedc023ca2014-08-08 14:21:08 -0700345 * @inode: pointer to inode
346 * @bh: pointer to block which hold "pointer" to new allocated block
347 * @fragment: number of `fragment' which hold pointer
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700348 * to new allocated block
Fabian Frederickedc023ca2014-08-08 14:21:08 -0700349 * @new_fragment: number of new allocated fragment
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700350 * (block will hold this fragment and also uspi->s_fpb-1)
Fabian Frederickedc023ca2014-08-08 14:21:08 -0700351 * @err: see ufs_inode_getfrag()
352 * @phys: see ufs_inode_getfrag()
353 * @new: see ufs_inode_getfrag()
354 * @locked_page: see ufs_inode_getfrag()
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700355 */
356static struct buffer_head *
357ufs_inode_getblock(struct inode *inode, struct buffer_head *bh,
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800358 u64 fragment, sector_t new_fragment, int *err,
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700359 long *phys, int *new, struct page *locked_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700361 struct super_block *sb = inode->i_sb;
362 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 struct buffer_head * result;
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800364 unsigned blockoff;
Al Virobbb3eb92015-06-19 00:10:00 -0400365 u64 tmp = 0, goal, block;
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800366 void *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 block = ufs_fragstoblks (fragment);
369 blockoff = ufs_fragnum (fragment);
370
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800371 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
372 inode->i_ino, (unsigned long long)fragment,
373 (unsigned long long)new_fragment, !phys);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 result = NULL;
376 if (!bh)
377 goto out;
378 if (!buffer_uptodate(bh)) {
379 ll_rw_block (READ, 1, &bh);
380 wait_on_buffer (bh);
381 if (!buffer_uptodate(bh))
382 goto out;
383 }
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800384 if (uspi->fs_magic == UFS2_MAGIC)
385 p = (__fs64 *)bh->b_data + block;
386 else
387 p = (__fs32 *)bh->b_data + block;
Al Viro5a39c252015-06-18 22:39:46 -0400388
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800389 tmp = ufs_data_ptr_to_cpu(sb, p);
Al Virobbb3eb92015-06-19 00:10:00 -0400390 if (tmp)
Al Viro5a39c252015-06-18 22:39:46 -0400391 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Evgeniy Dushistov54fb9962007-02-12 00:54:32 -0800393 if (block && (uspi->fs_magic == UFS2_MAGIC ?
394 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) :
395 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1]))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 goal = tmp + uspi->s_fpb;
397 else
398 goal = bh->b_blocknr + uspi->s_fpb;
Evgeniy Dushistov6ef4d6b2006-06-25 05:47:20 -0700399 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
400 uspi->s_fpb, err, locked_page);
Al Viro5a39c252015-06-18 22:39:46 -0400401 if (!tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 goto out;
Evgeniy Dushistovc9a27b52006-06-25 05:47:19 -0700403
Al Virobbb3eb92015-06-19 00:10:00 -0400404 if (new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 *new = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 mark_buffer_dirty(bh);
408 if (IS_SYNC(inode))
409 sync_dirty_buffer(bh);
410 inode->i_ctime = CURRENT_TIME_SEC;
411 mark_inode_dirty(inode);
412out:
413 brelse (bh);
Al Virobbb3eb92015-06-19 00:10:00 -0400414 if (tmp) {
415 tmp += uspi->s_sbbase + blockoff;
416 if (phys) {
417 *phys = tmp;
418 } else {
419 result = sb_getblk(sb, tmp);
420 }
421 }
Evgeniy Dushistovabf5d152006-06-25 05:47:24 -0700422 UFSD("EXIT\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 return result;
424}
425
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700426/**
Alessio Igor Bogani7422caa2011-04-08 19:33:07 +0200427 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700428 * readpage, writepage and so on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 */
430
Al Viro010d3312015-06-17 12:44:14 -0400431static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432{
433 struct super_block * sb = inode->i_sb;
Arnd Bergmann788257d2011-01-24 10:14:12 +0100434 struct ufs_sb_info * sbi = UFS_SB(sb);
435 struct ufs_sb_private_info * uspi = sbi->s_uspi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 struct buffer_head * bh;
437 int ret, err, new;
Al Viro4b7068c2015-06-04 14:27:23 -0400438 unsigned offsets[4];
439 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 unsigned long ptr,phys;
441 u64 phys64 = 0;
Al Viro010d3312015-06-17 12:44:14 -0400442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 if (!create) {
Al Viro4b7068c2015-06-04 14:27:23 -0400444 phys64 = ufs_frag_map(inode, offsets, depth);
445 if (phys64) {
446 phys64 += fragment & uspi->s_fpbmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 map_bh(bh_result, sb, phys64);
Al Viro4b7068c2015-06-04 14:27:23 -0400448 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 return 0;
450 }
451
452 /* This code entered only while writing ....? */
453
454 err = -EIO;
455 new = 0;
456 ret = 0;
457 bh = NULL;
458
Al Viro724bb092015-06-17 12:02:56 -0400459 mutex_lock(&UFS_I(inode)->truncate_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Evgeniy Dushistovabf5d152006-06-25 05:47:24 -0700461 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
Al Viro71dd4282015-06-04 14:34:43 -0400462 if (!depth)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 goto abort_too_big;
464
465 err = 0;
466 ptr = fragment;
Al Viro010d3312015-06-17 12:44:14 -0400467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 /*
469 * ok, these macros clean the logic up a bit and make
470 * it much more readable:
471 */
472#define GET_INODE_DATABLOCK(x) \
Evgeniy Dushistova685e262007-01-29 13:19:54 -0800473 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
474 bh_result->b_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475#define GET_INODE_PTR(x) \
Evgeniy Dushistova685e262007-01-29 13:19:54 -0800476 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
477 bh_result->b_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478#define GET_INDIRECT_DATABLOCK(x) \
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700479 ufs_inode_getblock(inode, bh, x, fragment, \
Evgeniy Dushistovd63b7092007-01-05 16:37:04 -0800480 &err, &phys, &new, bh_result->b_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481#define GET_INDIRECT_PTR(x) \
Evgeniy Dushistov022a6dc2006-06-25 05:47:27 -0700482 ufs_inode_getblock(inode, bh, x, fragment, \
Evgeniy Dushistovd63b7092007-01-05 16:37:04 -0800483 &err, NULL, NULL, NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Al Viro71dd4282015-06-04 14:34:43 -0400485 if (depth == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 bh = GET_INODE_DATABLOCK(ptr);
487 goto out;
488 }
489 ptr -= UFS_NDIR_FRAGMENT;
Al Viro71dd4282015-06-04 14:34:43 -0400490 if (depth == 2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
492 goto get_indirect;
493 }
494 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
Al Viro71dd4282015-06-04 14:34:43 -0400495 if (depth == 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
497 goto get_double;
498 }
499 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
500 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
501 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
502get_double:
503 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
504get_indirect:
505 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
506
507#undef GET_INODE_DATABLOCK
508#undef GET_INODE_PTR
509#undef GET_INDIRECT_DATABLOCK
510#undef GET_INDIRECT_PTR
511
512out:
513 if (err)
514 goto abort;
515 if (new)
516 set_buffer_new(bh_result);
517 map_bh(bh_result, sb, phys);
518abort:
Al Viro724bb092015-06-17 12:02:56 -0400519 mutex_unlock(&UFS_I(inode)->truncate_mutex);
Arnd Bergmann788257d2011-01-24 10:14:12 +0100520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 return err;
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523abort_too_big:
524 ufs_warning(sb, "ufs_get_block", "block > big");
525 goto abort;
526}
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528static int ufs_writepage(struct page *page, struct writeback_control *wbc)
529{
530 return block_write_full_page(page,ufs_getfrag_block,wbc);
531}
Nick Piggin82b9d1d2007-10-16 01:25:19 -0700532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533static int ufs_readpage(struct file *file, struct page *page)
534{
535 return block_read_full_page(page,ufs_getfrag_block);
536}
Nick Piggin82b9d1d2007-10-16 01:25:19 -0700537
Christoph Hellwigf4e420d2010-06-04 11:29:56 +0200538int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
Christoph Hellwig6e1db882010-06-04 11:29:57 +0200540 return __block_write_begin(page, pos, len, ufs_getfrag_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
Nick Piggin82b9d1d2007-10-16 01:25:19 -0700542
Al Viro010d3312015-06-17 12:44:14 -0400543static void ufs_truncate_blocks(struct inode *);
544
Marco Stornelli83f6e372012-12-15 11:45:14 +0100545static void ufs_write_failed(struct address_space *mapping, loff_t to)
546{
547 struct inode *inode = mapping->host;
548
Al Viro3b7a3a02015-06-16 18:06:40 -0400549 if (to > inode->i_size) {
Kirill A. Shutemov7caef262013-09-12 15:13:56 -0700550 truncate_pagecache(inode, inode->i_size);
Al Viro3b7a3a02015-06-16 18:06:40 -0400551 ufs_truncate_blocks(inode);
552 }
Marco Stornelli83f6e372012-12-15 11:45:14 +0100553}
554
Nick Piggin82b9d1d2007-10-16 01:25:19 -0700555static int ufs_write_begin(struct file *file, struct address_space *mapping,
556 loff_t pos, unsigned len, unsigned flags,
557 struct page **pagep, void **fsdata)
558{
Christoph Hellwig155130a2010-06-04 11:29:58 +0200559 int ret;
560
561 ret = block_write_begin(mapping, pos, len, flags, pagep,
Christoph Hellwigf4e420d2010-06-04 11:29:56 +0200562 ufs_getfrag_block);
Marco Stornelli83f6e372012-12-15 11:45:14 +0100563 if (unlikely(ret))
564 ufs_write_failed(mapping, pos + len);
Christoph Hellwig155130a2010-06-04 11:29:58 +0200565
566 return ret;
Nick Piggin82b9d1d2007-10-16 01:25:19 -0700567}
568
Al Viro3b7a3a02015-06-16 18:06:40 -0400569static int ufs_write_end(struct file *file, struct address_space *mapping,
570 loff_t pos, unsigned len, unsigned copied,
571 struct page *page, void *fsdata)
572{
573 int ret;
574
575 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
576 if (ret < len)
577 ufs_write_failed(mapping, pos + len);
578 return ret;
579}
580
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
582{
583 return generic_block_bmap(mapping,block,ufs_getfrag_block);
584}
Nick Piggin82b9d1d2007-10-16 01:25:19 -0700585
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700586const struct address_space_operations ufs_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 .readpage = ufs_readpage,
588 .writepage = ufs_writepage,
Nick Piggin82b9d1d2007-10-16 01:25:19 -0700589 .write_begin = ufs_write_begin,
Al Viro3b7a3a02015-06-16 18:06:40 -0400590 .write_end = ufs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 .bmap = ufs_bmap
592};
593
Evgeniy Dushistov826843a2006-06-25 05:47:21 -0700594static void ufs_set_inode_ops(struct inode *inode)
595{
596 if (S_ISREG(inode->i_mode)) {
597 inode->i_op = &ufs_file_inode_operations;
598 inode->i_fop = &ufs_file_operations;
599 inode->i_mapping->a_ops = &ufs_aops;
600 } else if (S_ISDIR(inode->i_mode)) {
601 inode->i_op = &ufs_dir_inode_operations;
602 inode->i_fop = &ufs_dir_operations;
603 inode->i_mapping->a_ops = &ufs_aops;
604 } else if (S_ISLNK(inode->i_mode)) {
Al Viro4b8061a2015-05-02 10:28:56 -0400605 if (!inode->i_blocks) {
Evgeniy Dushistov826843a2006-06-25 05:47:21 -0700606 inode->i_op = &ufs_fast_symlink_inode_operations;
Al Viro4b8061a2015-05-02 10:28:56 -0400607 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
608 } else {
Dmitry Monakhov311b95492010-04-15 00:56:58 +0200609 inode->i_op = &ufs_symlink_inode_operations;
Evgeniy Dushistov826843a2006-06-25 05:47:21 -0700610 inode->i_mapping->a_ops = &ufs_aops;
611 }
612 } else
613 init_special_inode(inode, inode->i_mode,
614 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
615}
616
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700617static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
619 struct ufs_inode_info *ufsi = UFS_I(inode);
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700620 struct super_block *sb = inode->i_sb;
Al Viro6a9a06d2011-07-26 02:49:13 -0400621 umode_t mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 /*
624 * Copy data to the in-core inode.
625 */
626 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
Miklos Szeredibfe86842011-10-28 14:13:29 +0200627 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700628 if (inode->i_nlink == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700630 return -1;
631 }
Al Viro010d3312015-06-17 12:44:14 -0400632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 /*
634 * Linux now has 32-bit uid and gid, so we can support EFT.
635 */
Eric W. Biederman722354652012-02-10 12:21:22 -0800636 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
637 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
640 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
641 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
642 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
643 inode->i_mtime.tv_nsec = 0;
644 inode->i_atime.tv_nsec = 0;
645 inode->i_ctime.tv_nsec = 0;
646 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800647 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
650 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700651
Al Viro010d3312015-06-17 12:44:14 -0400652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
Duane Griffinf33219b2009-01-08 22:43:49 +0000654 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
655 sizeof(ufs_inode->ui_u2.ui_addr));
Evgeniy Dushistovdd187a22006-06-25 05:47:25 -0700656 } else {
Duane Griffinf33219b2009-01-08 22:43:49 +0000657 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
Duane Griffinb12903f2009-01-08 22:43:50 +0000658 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
659 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 }
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700661 return 0;
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700662}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700664static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700665{
666 struct ufs_inode_info *ufsi = UFS_I(inode);
667 struct super_block *sb = inode->i_sb;
Al Viro6a9a06d2011-07-26 02:49:13 -0400668 umode_t mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Evgeniy Dushistovabf5d152006-06-25 05:47:24 -0700670 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 /*
672 * Copy data to the in-core inode.
673 */
674 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
Miklos Szeredibfe86842011-10-28 14:13:29 +0200675 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700676 if (inode->i_nlink == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700678 return -1;
679 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
681 /*
682 * Linux now has 32-bit uid and gid, so we can support EFT.
683 */
Eric W. Biederman722354652012-02-10 12:21:22 -0800684 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
685 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
687 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
Evgeniy Dushistov21898502007-03-16 13:38:07 -0800688 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
689 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
690 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
691 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
692 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
693 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800695 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 /*
698 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
699 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
700 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
Duane Griffinf33219b2009-01-08 22:43:49 +0000703 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
704 sizeof(ufs2_inode->ui_u2.ui_addr));
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700705 } else {
Duane Griffinf33219b2009-01-08 22:43:49 +0000706 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
Duane Griffinb12903f2009-01-08 22:43:50 +0000707 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
708 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 }
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700710 return 0;
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700711}
712
David Howellsb55c4602008-02-07 00:15:48 -0800713struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700714{
David Howellsb55c4602008-02-07 00:15:48 -0800715 struct ufs_inode_info *ufsi;
716 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700717 struct buffer_head * bh;
David Howellsb55c4602008-02-07 00:15:48 -0800718 struct inode *inode;
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700719 int err;
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700720
David Howellsb55c4602008-02-07 00:15:48 -0800721 UFSD("ENTER, ino %lu\n", ino);
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700722
David Howellsb55c4602008-02-07 00:15:48 -0800723 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700724 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
David Howellsb55c4602008-02-07 00:15:48 -0800725 ino);
726 return ERR_PTR(-EIO);
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700727 }
728
David Howellsb55c4602008-02-07 00:15:48 -0800729 inode = iget_locked(sb, ino);
730 if (!inode)
731 return ERR_PTR(-ENOMEM);
732 if (!(inode->i_state & I_NEW))
733 return inode;
734
735 ufsi = UFS_I(inode);
736
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700737 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
738 if (!bh) {
739 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
740 inode->i_ino);
741 goto bad_inode;
742 }
743 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
744 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
745
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700746 err = ufs2_read_inode(inode,
747 ufs2_inode + ufs_inotofsbo(inode->i_ino));
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700748 } else {
749 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
750
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700751 err = ufs1_read_inode(inode,
752 ufs_inode + ufs_inotofsbo(inode->i_ino));
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700753 }
754
Evgeniy Dushistov07a0cfe2007-04-16 22:53:24 -0700755 if (err)
756 goto bad_inode;
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700757 inode->i_version++;
758 ufsi->i_lastfrag =
759 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
760 ufsi->i_dir_start_lookup = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 ufsi->i_osync = 0;
762
Evgeniy Dushistov826843a2006-06-25 05:47:21 -0700763 ufs_set_inode_ops(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
765 brelse(bh);
766
Evgeniy Dushistovabf5d152006-06-25 05:47:24 -0700767 UFSD("EXIT\n");
David Howellsb55c4602008-02-07 00:15:48 -0800768 unlock_new_inode(inode);
769 return inode;
Evgeniy Dushistov05f225d2006-06-27 02:53:59 -0700770
771bad_inode:
David Howellsb55c4602008-02-07 00:15:48 -0800772 iget_failed(inode);
773 return ERR_PTR(-EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774}
775
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800776static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800778 struct super_block *sb = inode->i_sb;
779 struct ufs_inode_info *ufsi = UFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
782 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
783
Eric W. Biederman722354652012-02-10 12:21:22 -0800784 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
785 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
Al Viro010d3312015-06-17 12:44:14 -0400786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
788 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
789 ufs_inode->ui_atime.tv_usec = 0;
790 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
791 ufs_inode->ui_ctime.tv_usec = 0;
792 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
793 ufs_inode->ui_mtime.tv_usec = 0;
794 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
795 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800796 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800798 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
800 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
801 }
802
803 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
804 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
805 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
806 } else if (inode->i_blocks) {
Duane Griffinf33219b2009-01-08 22:43:49 +0000807 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
808 sizeof(ufs_inode->ui_u2.ui_addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 }
810 else {
Duane Griffinf33219b2009-01-08 22:43:49 +0000811 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
812 sizeof(ufs_inode->ui_u2.ui_symlink));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 }
814
815 if (!inode->i_nlink)
816 memset (ufs_inode, 0, sizeof(struct ufs_inode));
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800817}
818
819static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
820{
821 struct super_block *sb = inode->i_sb;
822 struct ufs_inode_info *ufsi = UFS_I(inode);
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800823
824 UFSD("ENTER\n");
825 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
826 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
827
Eric W. Biederman722354652012-02-10 12:21:22 -0800828 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
829 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800830
831 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
Evgeniy Dushistov21898502007-03-16 13:38:07 -0800832 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
833 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
834 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
835 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
836 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
837 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800838
839 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
840 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
841 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
842
843 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
844 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
845 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
846 } else if (inode->i_blocks) {
Duane Griffinf33219b2009-01-08 22:43:49 +0000847 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
848 sizeof(ufs_inode->ui_u2.ui_addr));
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800849 } else {
Duane Griffinf33219b2009-01-08 22:43:49 +0000850 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
851 sizeof(ufs_inode->ui_u2.ui_symlink));
Evgeniy Dushistov3313e292007-02-12 00:54:31 -0800852 }
853
854 if (!inode->i_nlink)
855 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
856 UFSD("EXIT\n");
857}
858
859static int ufs_update_inode(struct inode * inode, int do_sync)
860{
861 struct super_block *sb = inode->i_sb;
862 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
863 struct buffer_head * bh;
864
865 UFSD("ENTER, ino %lu\n", inode->i_ino);
866
867 if (inode->i_ino < UFS_ROOTINO ||
868 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
869 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
870 return -1;
871 }
872
873 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
874 if (!bh) {
875 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
876 return -1;
877 }
878 if (uspi->fs_magic == UFS2_MAGIC) {
879 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
880
881 ufs2_update_inode(inode,
882 ufs2_inode + ufs_inotofsbo(inode->i_ino));
883 } else {
884 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
885
886 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
887 }
Al Viro010d3312015-06-17 12:44:14 -0400888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 mark_buffer_dirty(bh);
890 if (do_sync)
891 sync_dirty_buffer(bh);
892 brelse (bh);
Al Viro010d3312015-06-17 12:44:14 -0400893
Evgeniy Dushistovabf5d152006-06-25 05:47:24 -0700894 UFSD("EXIT\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 return 0;
896}
897
Christoph Hellwiga9185b42010-03-05 09:21:37 +0100898int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
Al Virof3e0f3d2015-06-16 02:35:14 -0400900 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
902
903int ufs_sync_inode (struct inode *inode)
904{
905 return ufs_update_inode (inode, 1);
906}
907
Al Viro58e82682010-06-05 19:40:56 -0400908void ufs_evict_inode(struct inode * inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
Al Viro58e82682010-06-05 19:40:56 -0400910 int want_delete = 0;
911
912 if (!inode->i_nlink && !is_bad_inode(inode))
913 want_delete = 1;
Evgeniy Dushistov10e5dce2006-07-01 04:36:24 -0700914
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700915 truncate_inode_pages_final(&inode->i_data);
Al Viro58e82682010-06-05 19:40:56 -0400916 if (want_delete) {
Al Viro58e82682010-06-05 19:40:56 -0400917 inode->i_size = 0;
Al Virod622f1672015-06-16 18:04:16 -0400918 if (inode->i_blocks)
919 ufs_truncate_blocks(inode);
Al Viro58e82682010-06-05 19:40:56 -0400920 }
921
922 invalidate_inode_buffers(inode);
Jan Karadbd57682012-05-03 14:48:02 +0200923 clear_inode(inode);
Al Viro58e82682010-06-05 19:40:56 -0400924
Al Virof3e0f3d2015-06-16 02:35:14 -0400925 if (want_delete)
Alexey Khoroshilov9ef7db72014-09-02 11:40:17 +0400926 ufs_free_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927}
Al Viro010d3312015-06-17 12:44:14 -0400928
Al Viroa138b4b2015-06-18 02:18:54 -0400929struct to_free {
930 struct inode *inode;
931 u64 to;
932 unsigned count;
933};
934
935static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
936{
937 if (ctx->count && ctx->to != from) {
938 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
939 ctx->count = 0;
940 }
941 ctx->count += count;
942 ctx->to = from + count;
943}
944
Al Viro010d3312015-06-17 12:44:14 -0400945#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
946#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
947
948static void ufs_trunc_direct(struct inode *inode)
949{
950 struct ufs_inode_info *ufsi = UFS_I(inode);
951 struct super_block * sb;
952 struct ufs_sb_private_info * uspi;
953 void *p;
954 u64 frag1, frag2, frag3, frag4, block1, block2;
Al Viroa138b4b2015-06-18 02:18:54 -0400955 struct to_free ctx = {.inode = inode};
Al Viro010d3312015-06-17 12:44:14 -0400956 unsigned i, tmp;
957
958 UFSD("ENTER: ino %lu\n", inode->i_ino);
959
960 sb = inode->i_sb;
961 uspi = UFS_SB(sb)->s_uspi;
962
Al Viro010d3312015-06-17 12:44:14 -0400963 frag1 = DIRECT_FRAGMENT;
964 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
965 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
966 frag3 = frag4 & ~uspi->s_fpbmask;
967 block1 = block2 = 0;
968 if (frag2 > frag3) {
969 frag2 = frag4;
970 frag3 = frag4 = 0;
971 } else if (frag2 < frag3) {
972 block1 = ufs_fragstoblks (frag2);
973 block2 = ufs_fragstoblks (frag3);
974 }
975
976 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
977 " frag3 %llu, frag4 %llu\n", inode->i_ino,
978 (unsigned long long)frag1, (unsigned long long)frag2,
979 (unsigned long long)block1, (unsigned long long)block2,
980 (unsigned long long)frag3, (unsigned long long)frag4);
981
982 if (frag1 >= frag2)
983 goto next1;
984
985 /*
986 * Free first free fragments
987 */
988 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
989 tmp = ufs_data_ptr_to_cpu(sb, p);
990 if (!tmp )
991 ufs_panic (sb, "ufs_trunc_direct", "internal error");
992 frag2 -= frag1;
993 frag1 = ufs_fragnum (frag1);
994
995 ufs_free_fragments(inode, tmp + frag1, frag2);
Al Viro010d3312015-06-17 12:44:14 -0400996
997next1:
998 /*
999 * Free whole blocks
1000 */
1001 for (i = block1 ; i < block2; i++) {
1002 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1003 tmp = ufs_data_ptr_to_cpu(sb, p);
1004 if (!tmp)
1005 continue;
1006 write_seqlock(&ufsi->meta_lock);
1007 ufs_data_ptr_clear(uspi, p);
1008 write_sequnlock(&ufsi->meta_lock);
1009
Al Viroa138b4b2015-06-18 02:18:54 -04001010 free_data(&ctx, tmp, uspi->s_fpb);
Al Viro010d3312015-06-17 12:44:14 -04001011 }
1012
Al Viroa138b4b2015-06-18 02:18:54 -04001013 free_data(&ctx, 0, 0);
Al Viro010d3312015-06-17 12:44:14 -04001014
1015 if (frag3 >= frag4)
1016 goto next3;
1017
1018 /*
1019 * Free last free fragments
1020 */
1021 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
1022 tmp = ufs_data_ptr_to_cpu(sb, p);
1023 if (!tmp )
1024 ufs_panic(sb, "ufs_truncate_direct", "internal error");
1025 frag4 = ufs_fragnum (frag4);
1026 write_seqlock(&ufsi->meta_lock);
1027 ufs_data_ptr_clear(uspi, p);
1028 write_sequnlock(&ufsi->meta_lock);
1029
1030 ufs_free_fragments (inode, tmp, frag4);
Al Viro010d3312015-06-17 12:44:14 -04001031 next3:
1032
1033 UFSD("EXIT: ino %lu\n", inode->i_ino);
1034}
1035
Al Viro163073d2015-06-18 20:07:08 -04001036static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
Al Viro010d3312015-06-17 12:44:14 -04001037{
1038 struct super_block *sb = inode->i_sb;
1039 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
Al Viro163073d2015-06-18 20:07:08 -04001040 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
Al Viro18ca51d2015-06-18 13:45:07 -04001041 unsigned i;
Al Viro010d3312015-06-17 12:44:14 -04001042
Al Viro163073d2015-06-18 20:07:08 -04001043 if (!ubh)
Al Viro010d3312015-06-17 12:44:14 -04001044 return;
Al Viro010d3312015-06-17 12:44:14 -04001045
Al Viro9e0fbbd2015-06-18 15:33:47 -04001046 if (--depth) {
Al Viro163073d2015-06-18 20:07:08 -04001047 for (i = 0; i < uspi->s_apb; i++) {
1048 void *p = ubh_get_data_ptr(uspi, ubh, i);
1049 u64 block = ufs_data_ptr_to_cpu(sb, p);
Al Virocc7231e2015-06-18 20:14:02 -04001050 if (block)
Al Viro163073d2015-06-18 20:07:08 -04001051 free_full_branch(inode, block, depth);
Al Viro6d1ebbc2015-06-18 17:11:49 -04001052 }
1053 } else {
1054 struct to_free ctx = {.inode = inode};
1055
1056 for (i = 0; i < uspi->s_apb; i++) {
Al Viro163073d2015-06-18 20:07:08 -04001057 void *p = ubh_get_data_ptr(uspi, ubh, i);
1058 u64 block = ufs_data_ptr_to_cpu(sb, p);
Al Virocc7231e2015-06-18 20:14:02 -04001059 if (block)
Al Viro163073d2015-06-18 20:07:08 -04001060 free_data(&ctx, block, uspi->s_fpb);
Al Viro6d1ebbc2015-06-18 17:11:49 -04001061 }
1062 free_data(&ctx, 0, 0);
1063 }
Al Viro6d1ebbc2015-06-18 17:11:49 -04001064
1065 ubh_bforget(ubh);
Al Viro163073d2015-06-18 20:07:08 -04001066 ufs_free_blocks(inode, ind_block, uspi->s_fpb);
Al Viro6d1ebbc2015-06-18 17:11:49 -04001067}
1068
Al Viro7b4e4f72015-06-18 19:13:02 -04001069static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
Al Viro6d1ebbc2015-06-18 17:11:49 -04001070{
1071 struct super_block *sb = inode->i_sb;
1072 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
Al Viro6d1ebbc2015-06-18 17:11:49 -04001073 unsigned i;
1074
Al Viro6d1ebbc2015-06-18 17:11:49 -04001075 if (--depth) {
Al Viro7b4e4f72015-06-18 19:13:02 -04001076 for (i = from; i < uspi->s_apb ; i++) {
Al Viro163073d2015-06-18 20:07:08 -04001077 void *p = ubh_get_data_ptr(uspi, ubh, i);
1078 u64 block = ufs_data_ptr_to_cpu(sb, p);
1079 if (block) {
1080 write_seqlock(&UFS_I(inode)->meta_lock);
1081 ufs_data_ptr_clear(uspi, p);
1082 write_sequnlock(&UFS_I(inode)->meta_lock);
1083 ubh_mark_buffer_dirty(ubh);
1084 free_full_branch(inode, block, depth);
1085 }
Al Viroa9657422015-06-18 16:13:56 -04001086 }
Al Viro9e0fbbd2015-06-18 15:33:47 -04001087 } else {
Al Viroa138b4b2015-06-18 02:18:54 -04001088 struct to_free ctx = {.inode = inode};
Al Viro9e0fbbd2015-06-18 15:33:47 -04001089
1090 for (i = from; i < uspi->s_apb; i++) {
Al Viro163073d2015-06-18 20:07:08 -04001091 void *p = ubh_get_data_ptr(uspi, ubh, i);
1092 u64 block = ufs_data_ptr_to_cpu(sb, p);
1093 if (block) {
1094 write_seqlock(&UFS_I(inode)->meta_lock);
1095 ufs_data_ptr_clear(uspi, p);
1096 write_sequnlock(&UFS_I(inode)->meta_lock);
1097 ubh_mark_buffer_dirty(ubh);
1098 free_data(&ctx, block, uspi->s_fpb);
Al Viro163073d2015-06-18 20:07:08 -04001099 }
Al Viro9e0fbbd2015-06-18 15:33:47 -04001100 }
Al Viroa138b4b2015-06-18 02:18:54 -04001101 free_data(&ctx, 0, 0);
Al Viro010d3312015-06-17 12:44:14 -04001102 }
Al Viro9e0fbbd2015-06-18 15:33:47 -04001103 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1104 ubh_sync_block(ubh);
1105 ubh_brelse(ubh);
Al Viro010d3312015-06-17 12:44:14 -04001106}
1107
1108static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1109{
1110 int err = 0;
1111 struct super_block *sb = inode->i_sb;
1112 struct address_space *mapping = inode->i_mapping;
1113 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1114 unsigned i, end;
1115 sector_t lastfrag;
1116 struct page *lastpage;
1117 struct buffer_head *bh;
1118 u64 phys64;
1119
1120 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1121
1122 if (!lastfrag)
1123 goto out;
1124
1125 lastfrag--;
1126
1127 lastpage = ufs_get_locked_page(mapping, lastfrag >>
1128 (PAGE_CACHE_SHIFT - inode->i_blkbits));
1129 if (IS_ERR(lastpage)) {
1130 err = -EIO;
1131 goto out;
1132 }
1133
1134 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
1135 bh = page_buffers(lastpage);
1136 for (i = 0; i < end; ++i)
1137 bh = bh->b_this_page;
1138
1139
1140 err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1141
1142 if (unlikely(err))
1143 goto out_unlock;
1144
1145 if (buffer_new(bh)) {
1146 clear_buffer_new(bh);
1147 unmap_underlying_metadata(bh->b_bdev,
1148 bh->b_blocknr);
1149 /*
1150 * we do not zeroize fragment, because of
1151 * if it maped to hole, it already contains zeroes
1152 */
1153 set_buffer_uptodate(bh);
1154 mark_buffer_dirty(bh);
1155 set_page_dirty(lastpage);
1156 }
1157
1158 if (lastfrag >= UFS_IND_FRAGMENT) {
1159 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1160 phys64 = bh->b_blocknr + 1;
1161 for (i = 0; i < end; ++i) {
1162 bh = sb_getblk(sb, i + phys64);
1163 lock_buffer(bh);
1164 memset(bh->b_data, 0, sb->s_blocksize);
1165 set_buffer_uptodate(bh);
1166 mark_buffer_dirty(bh);
1167 unlock_buffer(bh);
1168 sync_dirty_buffer(bh);
1169 brelse(bh);
1170 }
1171 }
1172out_unlock:
1173 ufs_put_locked_page(lastpage);
1174out:
1175 return err;
1176}
1177
1178static void __ufs_truncate_blocks(struct inode *inode)
1179{
1180 struct ufs_inode_info *ufsi = UFS_I(inode);
1181 struct super_block *sb = inode->i_sb;
1182 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
Al Viro7bad5932015-06-18 14:21:09 -04001183 unsigned offsets[4];
Al Viro31cd0432015-06-17 01:10:03 -04001184 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
Al Viro6775e242015-06-18 14:55:50 -04001185 int depth2;
Al Viro42432732015-06-18 15:47:17 -04001186 unsigned i;
Al Viro7b4e4f72015-06-18 19:13:02 -04001187 struct ufs_buffer_head *ubh[3];
1188 void *p;
1189 u64 block;
Al Viro6775e242015-06-18 14:55:50 -04001190
1191 if (!depth)
1192 return;
1193
1194 /* find the last non-zero in offsets[] */
1195 for (depth2 = depth - 1; depth2; depth2--)
1196 if (offsets[depth2])
1197 break;
Al Viro010d3312015-06-17 12:44:14 -04001198
1199 mutex_lock(&ufsi->truncate_mutex);
Al Viro42432732015-06-18 15:47:17 -04001200 if (depth == 1) {
Al Viro31cd0432015-06-17 01:10:03 -04001201 ufs_trunc_direct(inode);
Al Viro42432732015-06-18 15:47:17 -04001202 offsets[0] = UFS_IND_BLOCK;
1203 } else {
Al Viro7b4e4f72015-06-18 19:13:02 -04001204 /* get the blocks that should be partially emptied */
1205 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
1206 for (i = 0; i < depth2; i++) {
1207 offsets[i]++; /* next branch is fully freed */
1208 block = ufs_data_ptr_to_cpu(sb, p);
1209 if (!block)
1210 break;
1211 ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1212 if (!ubh[i]) {
1213 write_seqlock(&ufsi->meta_lock);
1214 ufs_data_ptr_clear(uspi, p);
1215 write_sequnlock(&ufsi->meta_lock);
1216 break;
1217 }
1218 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
1219 }
Al Virof53bd142015-06-18 20:17:32 -04001220 while (i--)
Al Viro7b4e4f72015-06-18 19:13:02 -04001221 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
Al Viro31cd0432015-06-17 01:10:03 -04001222 }
Al Viro42432732015-06-18 15:47:17 -04001223 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
Al Viro163073d2015-06-18 20:07:08 -04001224 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1225 block = ufs_data_ptr_to_cpu(sb, p);
1226 if (block) {
1227 write_seqlock(&ufsi->meta_lock);
1228 ufs_data_ptr_clear(uspi, p);
1229 write_sequnlock(&ufsi->meta_lock);
1230 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1231 }
Al Viro42432732015-06-18 15:47:17 -04001232 }
Al Viro010d3312015-06-17 12:44:14 -04001233 ufsi->i_lastfrag = DIRECT_FRAGMENT;
Al Virob6eede02015-06-18 20:09:39 -04001234 mark_inode_dirty(inode);
Al Viro010d3312015-06-17 12:44:14 -04001235 mutex_unlock(&ufsi->truncate_mutex);
1236}
1237
1238static int ufs_truncate(struct inode *inode, loff_t size)
1239{
1240 int err = 0;
1241
1242 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1243 inode->i_ino, (unsigned long long)size,
1244 (unsigned long long)i_size_read(inode));
1245
1246 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1247 S_ISLNK(inode->i_mode)))
1248 return -EINVAL;
1249 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1250 return -EPERM;
1251
1252 err = ufs_alloc_lastblock(inode, size);
1253
1254 if (err)
1255 goto out;
1256
1257 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1258
1259 truncate_setsize(inode, size);
1260
1261 __ufs_truncate_blocks(inode);
1262 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1263 mark_inode_dirty(inode);
1264out:
1265 UFSD("EXIT: err %d\n", err);
1266 return err;
1267}
1268
1269void ufs_truncate_blocks(struct inode *inode)
1270{
1271 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1272 S_ISLNK(inode->i_mode)))
1273 return;
1274 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1275 return;
1276 __ufs_truncate_blocks(inode);
1277}
1278
1279int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1280{
1281 struct inode *inode = d_inode(dentry);
1282 unsigned int ia_valid = attr->ia_valid;
1283 int error;
1284
1285 error = inode_change_ok(inode, attr);
1286 if (error)
1287 return error;
1288
1289 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1290 error = ufs_truncate(inode, attr->ia_size);
1291 if (error)
1292 return error;
1293 }
1294
1295 setattr_copy(inode, attr);
1296 mark_inode_dirty(inode);
1297 return 0;
1298}
1299
1300const struct inode_operations ufs_file_inode_operations = {
1301 .setattr = ufs_setattr,
1302};