blob: e011d597f12f6c99250a8e6394cdc48d9b6ef66d [file] [log] [blame]
Dave Chinner1fd71152013-08-12 20:49:35 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log.h"
22#include "xfs_trans.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h"
26#include "xfs_bmap_btree.h"
27#include "xfs_ialloc_btree.h"
28#include "xfs_dinode.h"
29#include "xfs_inode.h"
30#include "xfs_error.h"
31#include "xfs_cksum.h"
32#include "xfs_icache.h"
33#include "xfs_ialloc.h"
34
35/*
36 * Check that none of the inode's in the buffer have a next
37 * unlinked field of 0.
38 */
39#if defined(DEBUG)
40void
41xfs_inobp_check(
42 xfs_mount_t *mp,
43 xfs_buf_t *bp)
44{
45 int i;
46 int j;
47 xfs_dinode_t *dip;
48
49 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
50
51 for (i = 0; i < j; i++) {
52 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
53 i * mp->m_sb.sb_inodesize);
54 if (!dip->di_next_unlinked) {
55 xfs_alert(mp,
56 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
57 bp);
58 ASSERT(dip->di_next_unlinked);
59 }
60 }
61}
62#endif
63
Dave Chinnerd8914002013-08-27 11:39:37 +100064/*
65 * If we are doing readahead on an inode buffer, we might be in log recovery
66 * reading an inode allocation buffer that hasn't yet been replayed, and hence
67 * has not had the inode cores stamped into it. Hence for readahead, the buffer
68 * may be potentially invalid.
69 *
70 * If the readahead buffer is invalid, we don't want to mark it with an error,
71 * but we do want to clear the DONE status of the buffer so that a followup read
72 * will re-read it from disk. This will ensure that we don't get an unnecessary
73 * warnings during log recovery and we don't get unnecssary panics on debug
74 * kernels.
75 */
Dave Chinner1fd71152013-08-12 20:49:35 +100076static void
77xfs_inode_buf_verify(
Dave Chinnerd8914002013-08-27 11:39:37 +100078 struct xfs_buf *bp,
79 bool readahead)
Dave Chinner1fd71152013-08-12 20:49:35 +100080{
81 struct xfs_mount *mp = bp->b_target->bt_mount;
82 int i;
83 int ni;
84
85 /*
86 * Validate the magic number and version of every inode in the buffer
87 */
88 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
89 for (i = 0; i < ni; i++) {
90 int di_ok;
91 xfs_dinode_t *dip;
92
93 dip = (struct xfs_dinode *)xfs_buf_offset(bp,
94 (i << mp->m_sb.sb_inodelog));
95 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
96 XFS_DINODE_GOOD_VERSION(dip->di_version);
97 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
98 XFS_ERRTAG_ITOBP_INOTOBP,
99 XFS_RANDOM_ITOBP_INOTOBP))) {
Dave Chinnerd8914002013-08-27 11:39:37 +1000100 if (readahead) {
101 bp->b_flags &= ~XBF_DONE;
102 return;
103 }
104
Dave Chinner1fd71152013-08-12 20:49:35 +1000105 xfs_buf_ioerror(bp, EFSCORRUPTED);
106 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
107 mp, dip);
108#ifdef DEBUG
109 xfs_emerg(mp,
110 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
111 (unsigned long long)bp->b_bn, i,
112 be16_to_cpu(dip->di_magic));
113 ASSERT(0);
114#endif
115 }
116 }
117 xfs_inobp_check(mp, bp);
118}
119
120
121static void
122xfs_inode_buf_read_verify(
123 struct xfs_buf *bp)
124{
Dave Chinnerd8914002013-08-27 11:39:37 +1000125 xfs_inode_buf_verify(bp, false);
126}
127
128static void
129xfs_inode_buf_readahead_verify(
130 struct xfs_buf *bp)
131{
132 xfs_inode_buf_verify(bp, true);
Dave Chinner1fd71152013-08-12 20:49:35 +1000133}
134
135static void
136xfs_inode_buf_write_verify(
137 struct xfs_buf *bp)
138{
Dave Chinnerd8914002013-08-27 11:39:37 +1000139 xfs_inode_buf_verify(bp, false);
Dave Chinner1fd71152013-08-12 20:49:35 +1000140}
141
142const struct xfs_buf_ops xfs_inode_buf_ops = {
143 .verify_read = xfs_inode_buf_read_verify,
144 .verify_write = xfs_inode_buf_write_verify,
145};
146
Dave Chinnerd8914002013-08-27 11:39:37 +1000147const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
148 .verify_read = xfs_inode_buf_readahead_verify,
149 .verify_write = xfs_inode_buf_write_verify,
150};
151
Dave Chinner1fd71152013-08-12 20:49:35 +1000152
153/*
154 * This routine is called to map an inode to the buffer containing the on-disk
155 * version of the inode. It returns a pointer to the buffer containing the
156 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
157 * pointer to the on-disk inode within that buffer.
158 *
159 * If a non-zero error is returned, then the contents of bpp and dipp are
160 * undefined.
161 */
162int
163xfs_imap_to_bp(
164 struct xfs_mount *mp,
165 struct xfs_trans *tp,
166 struct xfs_imap *imap,
167 struct xfs_dinode **dipp,
168 struct xfs_buf **bpp,
169 uint buf_flags,
170 uint iget_flags)
171{
172 struct xfs_buf *bp;
173 int error;
174
175 buf_flags |= XBF_UNMAPPED;
176 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
177 (int)imap->im_len, buf_flags, &bp,
178 &xfs_inode_buf_ops);
179 if (error) {
180 if (error == EAGAIN) {
181 ASSERT(buf_flags & XBF_TRYLOCK);
182 return error;
183 }
184
185 if (error == EFSCORRUPTED &&
186 (iget_flags & XFS_IGET_UNTRUSTED))
187 return XFS_ERROR(EINVAL);
188
189 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
190 __func__, error);
191 return error;
192 }
193
194 *bpp = bp;
195 *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
196 return 0;
197}
198
199STATIC void
200xfs_dinode_from_disk(
201 xfs_icdinode_t *to,
202 xfs_dinode_t *from)
203{
204 to->di_magic = be16_to_cpu(from->di_magic);
205 to->di_mode = be16_to_cpu(from->di_mode);
206 to->di_version = from ->di_version;
207 to->di_format = from->di_format;
208 to->di_onlink = be16_to_cpu(from->di_onlink);
209 to->di_uid = be32_to_cpu(from->di_uid);
210 to->di_gid = be32_to_cpu(from->di_gid);
211 to->di_nlink = be32_to_cpu(from->di_nlink);
212 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
213 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
214 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
215 to->di_flushiter = be16_to_cpu(from->di_flushiter);
216 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
217 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
218 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
219 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
220 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
221 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
222 to->di_size = be64_to_cpu(from->di_size);
223 to->di_nblocks = be64_to_cpu(from->di_nblocks);
224 to->di_extsize = be32_to_cpu(from->di_extsize);
225 to->di_nextents = be32_to_cpu(from->di_nextents);
226 to->di_anextents = be16_to_cpu(from->di_anextents);
227 to->di_forkoff = from->di_forkoff;
228 to->di_aformat = from->di_aformat;
229 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
230 to->di_dmstate = be16_to_cpu(from->di_dmstate);
231 to->di_flags = be16_to_cpu(from->di_flags);
232 to->di_gen = be32_to_cpu(from->di_gen);
233
234 if (to->di_version == 3) {
235 to->di_changecount = be64_to_cpu(from->di_changecount);
236 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
237 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
238 to->di_flags2 = be64_to_cpu(from->di_flags2);
239 to->di_ino = be64_to_cpu(from->di_ino);
240 to->di_lsn = be64_to_cpu(from->di_lsn);
241 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
242 uuid_copy(&to->di_uuid, &from->di_uuid);
243 }
244}
245
246void
247xfs_dinode_to_disk(
248 xfs_dinode_t *to,
249 xfs_icdinode_t *from)
250{
251 to->di_magic = cpu_to_be16(from->di_magic);
252 to->di_mode = cpu_to_be16(from->di_mode);
253 to->di_version = from ->di_version;
254 to->di_format = from->di_format;
255 to->di_onlink = cpu_to_be16(from->di_onlink);
256 to->di_uid = cpu_to_be32(from->di_uid);
257 to->di_gid = cpu_to_be32(from->di_gid);
258 to->di_nlink = cpu_to_be32(from->di_nlink);
259 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
260 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
261 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
262 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
263 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
264 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
265 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
266 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
267 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
268 to->di_size = cpu_to_be64(from->di_size);
269 to->di_nblocks = cpu_to_be64(from->di_nblocks);
270 to->di_extsize = cpu_to_be32(from->di_extsize);
271 to->di_nextents = cpu_to_be32(from->di_nextents);
272 to->di_anextents = cpu_to_be16(from->di_anextents);
273 to->di_forkoff = from->di_forkoff;
274 to->di_aformat = from->di_aformat;
275 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
276 to->di_dmstate = cpu_to_be16(from->di_dmstate);
277 to->di_flags = cpu_to_be16(from->di_flags);
278 to->di_gen = cpu_to_be32(from->di_gen);
279
280 if (from->di_version == 3) {
281 to->di_changecount = cpu_to_be64(from->di_changecount);
282 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
283 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
284 to->di_flags2 = cpu_to_be64(from->di_flags2);
285 to->di_ino = cpu_to_be64(from->di_ino);
286 to->di_lsn = cpu_to_be64(from->di_lsn);
287 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
288 uuid_copy(&to->di_uuid, &from->di_uuid);
289 to->di_flushiter = 0;
290 } else {
291 to->di_flushiter = cpu_to_be16(from->di_flushiter);
292 }
293}
294
295static bool
296xfs_dinode_verify(
297 struct xfs_mount *mp,
298 struct xfs_inode *ip,
299 struct xfs_dinode *dip)
300{
301 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
302 return false;
303
304 /* only version 3 or greater inodes are extensively verified here */
305 if (dip->di_version < 3)
306 return true;
307
308 if (!xfs_sb_version_hascrc(&mp->m_sb))
309 return false;
310 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
311 offsetof(struct xfs_dinode, di_crc)))
312 return false;
313 if (be64_to_cpu(dip->di_ino) != ip->i_ino)
314 return false;
315 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
316 return false;
317 return true;
318}
319
320void
321xfs_dinode_calc_crc(
322 struct xfs_mount *mp,
323 struct xfs_dinode *dip)
324{
325 __uint32_t crc;
326
327 if (dip->di_version < 3)
328 return;
329
330 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
331 crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
332 offsetof(struct xfs_dinode, di_crc));
333 dip->di_crc = xfs_end_cksum(crc);
334}
335
336/*
337 * Read the disk inode attributes into the in-core inode structure.
338 *
339 * For version 5 superblocks, if we are initialising a new inode and we are not
340 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
341 * inode core with a random generation number. If we are keeping inodes around,
342 * we need to read the inode cluster to get the existing generation number off
343 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
344 * format) then log recovery is dependent on the di_flushiter field being
345 * initialised from the current on-disk value and hence we must also read the
346 * inode off disk.
347 */
348int
349xfs_iread(
350 xfs_mount_t *mp,
351 xfs_trans_t *tp,
352 xfs_inode_t *ip,
353 uint iget_flags)
354{
355 xfs_buf_t *bp;
356 xfs_dinode_t *dip;
357 int error;
358
359 /*
360 * Fill in the location information in the in-core inode.
361 */
362 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
363 if (error)
364 return error;
365
366 /* shortcut IO on inode allocation if possible */
367 if ((iget_flags & XFS_IGET_CREATE) &&
368 xfs_sb_version_hascrc(&mp->m_sb) &&
369 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
370 /* initialise the on-disk inode core */
371 memset(&ip->i_d, 0, sizeof(ip->i_d));
372 ip->i_d.di_magic = XFS_DINODE_MAGIC;
373 ip->i_d.di_gen = prandom_u32();
374 if (xfs_sb_version_hascrc(&mp->m_sb)) {
375 ip->i_d.di_version = 3;
376 ip->i_d.di_ino = ip->i_ino;
377 uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid);
378 } else
379 ip->i_d.di_version = 2;
380 return 0;
381 }
382
383 /*
384 * Get pointers to the on-disk inode and the buffer containing it.
385 */
386 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
387 if (error)
388 return error;
389
390 /* even unallocated inodes are verified */
391 if (!xfs_dinode_verify(mp, ip, dip)) {
392 xfs_alert(mp, "%s: validation failed for inode %lld failed",
393 __func__, ip->i_ino);
394
395 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
396 error = XFS_ERROR(EFSCORRUPTED);
397 goto out_brelse;
398 }
399
400 /*
401 * If the on-disk inode is already linked to a directory
402 * entry, copy all of the inode into the in-core inode.
403 * xfs_iformat_fork() handles copying in the inode format
404 * specific information.
405 * Otherwise, just get the truly permanent information.
406 */
407 if (dip->di_mode) {
408 xfs_dinode_from_disk(&ip->i_d, dip);
409 error = xfs_iformat_fork(ip, dip);
410 if (error) {
411#ifdef DEBUG
412 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
413 __func__, error);
414#endif /* DEBUG */
415 goto out_brelse;
416 }
417 } else {
418 /*
419 * Partial initialisation of the in-core inode. Just the bits
420 * that xfs_ialloc won't overwrite or relies on being correct.
421 */
422 ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
423 ip->i_d.di_version = dip->di_version;
424 ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
425 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
426
427 if (dip->di_version == 3) {
428 ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
429 uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
430 }
431
432 /*
433 * Make sure to pull in the mode here as well in
434 * case the inode is released without being used.
435 * This ensures that xfs_inactive() will see that
436 * the inode is already free and not try to mess
437 * with the uninitialized part of it.
438 */
439 ip->i_d.di_mode = 0;
440 }
441
442 /*
443 * The inode format changed when we moved the link count and
444 * made it 32 bits long. If this is an old format inode,
445 * convert it in memory to look like a new one. If it gets
446 * flushed to disk we will convert back before flushing or
447 * logging it. We zero out the new projid field and the old link
448 * count field. We'll handle clearing the pad field (the remains
449 * of the old uuid field) when we actually convert the inode to
450 * the new format. We don't change the version number so that we
451 * can distinguish this from a real new format inode.
452 */
453 if (ip->i_d.di_version == 1) {
454 ip->i_d.di_nlink = ip->i_d.di_onlink;
455 ip->i_d.di_onlink = 0;
456 xfs_set_projid(ip, 0);
457 }
458
459 ip->i_delayed_blks = 0;
460
461 /*
462 * Mark the buffer containing the inode as something to keep
463 * around for a while. This helps to keep recently accessed
464 * meta-data in-core longer.
465 */
466 xfs_buf_set_ref(bp, XFS_INO_REF);
467
468 /*
469 * Use xfs_trans_brelse() to release the buffer containing the on-disk
470 * inode, because it was acquired with xfs_trans_read_buf() in
471 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
472 * brelse(). If we're within a transaction, then xfs_trans_brelse()
473 * will only release the buffer if it is not dirty within the
474 * transaction. It will be OK to release the buffer in this case,
475 * because inodes on disk are never destroyed and we will be locking the
476 * new in-core inode before putting it in the cache where other
477 * processes can find it. Thus we don't have to worry about the inode
478 * being changed just because we released the buffer.
479 */
480 out_brelse:
481 xfs_trans_brelse(tp, bp);
482 return error;
483}