blob: 3d08589f5c60ecde42a3d1e74eea2ff83940cc7a [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -07002/*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -07004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -07005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_defer.h"
13#include "xfs_btree.h"
14#include "xfs_bit.h"
15#include "xfs_log_format.h"
16#include "xfs_trans.h"
17#include "xfs_sb.h"
18#include "xfs_inode.h"
19#include "xfs_inode_fork.h"
20#include "xfs_alloc.h"
21#include "xfs_rtalloc.h"
22#include "xfs_bmap.h"
23#include "xfs_bmap_util.h"
24#include "xfs_bmap_btree.h"
25#include "xfs_rmap.h"
Darrick J. Wong5e777b62018-03-23 10:06:53 -070026#include "xfs_rmap_btree.h"
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -080027#include "xfs_refcount.h"
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -070028#include "scrub/xfs_scrub.h"
29#include "scrub/scrub.h"
30#include "scrub/common.h"
31#include "scrub/btree.h"
32#include "scrub/trace.h"
33
34/* Set us up with an inode's bmap. */
35int
36xfs_scrub_setup_inode_bmap(
37 struct xfs_scrub_context *sc,
38 struct xfs_inode *ip)
39{
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -070040 int error;
41
42 error = xfs_scrub_get_inode(sc, ip);
43 if (error)
44 goto out;
45
46 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
47 xfs_ilock(sc->ip, sc->ilock_flags);
48
49 /*
50 * We don't want any ephemeral data fork updates sitting around
51 * while we inspect block mappings, so wait for directio to finish
52 * and flush dirty data if we have delalloc reservations.
53 */
54 if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
55 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
56 inode_dio_wait(VFS_I(sc->ip));
57 error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
58 if (error)
59 goto out;
60 }
61
62 /* Got the inode, lock it and we're ready to go. */
Darrick J. Wong0a9633f2018-05-29 22:18:08 -070063 error = xfs_scrub_trans_alloc(sc, 0);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -070064 if (error)
65 goto out;
66 sc->ilock_flags |= XFS_ILOCK_EXCL;
67 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
68
69out:
70 /* scrub teardown will unlock and release the inode */
71 return error;
72}
73
74/*
75 * Inode fork block mapping (BMBT) scrubber.
76 * More complex than the others because we have to scrub
77 * all the extents regardless of whether or not the fork
78 * is in btree format.
79 */
80
81struct xfs_scrub_bmap_info {
82 struct xfs_scrub_context *sc;
83 xfs_fileoff_t lastoff;
84 bool is_rt;
85 bool is_shared;
86 int whichfork;
87};
88
Darrick J. Wongd8526572018-01-16 18:53:08 -080089/* Look for a corresponding rmap for this irec. */
90static inline bool
91xfs_scrub_bmap_get_rmap(
92 struct xfs_scrub_bmap_info *info,
93 struct xfs_bmbt_irec *irec,
94 xfs_agblock_t agbno,
95 uint64_t owner,
96 struct xfs_rmap_irec *rmap)
97{
98 xfs_fileoff_t offset;
99 unsigned int rflags = 0;
100 int has_rmap;
101 int error;
102
103 if (info->whichfork == XFS_ATTR_FORK)
104 rflags |= XFS_RMAP_ATTR_FORK;
105
106 /*
107 * CoW staging extents are owned (on disk) by the refcountbt, so
108 * their rmaps do not have offsets.
109 */
110 if (info->whichfork == XFS_COW_FORK)
111 offset = 0;
112 else
113 offset = irec->br_startoff;
114
115 /*
116 * If the caller thinks this could be a shared bmbt extent (IOWs,
117 * any data fork extent of a reflink inode) then we have to use the
118 * range rmap lookup to make sure we get the correct owner/offset.
119 */
120 if (info->is_shared) {
121 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
122 owner, offset, rflags, rmap, &has_rmap);
123 if (!xfs_scrub_should_check_xref(info->sc, &error,
124 &info->sc->sa.rmap_cur))
125 return false;
126 goto out;
127 }
128
129 /*
130 * Otherwise, use the (faster) regular lookup.
131 */
132 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
133 offset, rflags, &has_rmap);
134 if (!xfs_scrub_should_check_xref(info->sc, &error,
135 &info->sc->sa.rmap_cur))
136 return false;
137 if (!has_rmap)
138 goto out;
139
140 error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
141 if (!xfs_scrub_should_check_xref(info->sc, &error,
142 &info->sc->sa.rmap_cur))
143 return false;
144
145out:
146 if (!has_rmap)
147 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
148 irec->br_startoff);
149 return has_rmap;
150}
151
152/* Make sure that we have rmapbt records for this extent. */
153STATIC void
154xfs_scrub_bmap_xref_rmap(
155 struct xfs_scrub_bmap_info *info,
156 struct xfs_bmbt_irec *irec,
157 xfs_agblock_t agbno)
158{
159 struct xfs_rmap_irec rmap;
160 unsigned long long rmap_end;
161 uint64_t owner;
162
Darrick J. Wong8389f3ff2018-05-14 06:34:31 -0700163 if (!info->sc->sa.rmap_cur || xfs_scrub_skip_xref(info->sc->sm))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800164 return;
165
166 if (info->whichfork == XFS_COW_FORK)
167 owner = XFS_RMAP_OWN_COW;
168 else
169 owner = info->sc->ip->i_ino;
170
171 /* Find the rmap record for this irec. */
172 if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
173 return;
174
175 /* Check the rmap. */
176 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
177 if (rmap.rm_startblock > agbno ||
178 agbno + irec->br_blockcount > rmap_end)
179 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
180 irec->br_startoff);
181
182 /*
183 * Check the logical offsets if applicable. CoW staging extents
184 * don't track logical offsets since the mappings only exist in
185 * memory.
186 */
187 if (info->whichfork != XFS_COW_FORK) {
188 rmap_end = (unsigned long long)rmap.rm_offset +
189 rmap.rm_blockcount;
190 if (rmap.rm_offset > irec->br_startoff ||
191 irec->br_startoff + irec->br_blockcount > rmap_end)
192 xfs_scrub_fblock_xref_set_corrupt(info->sc,
193 info->whichfork, irec->br_startoff);
194 }
195
196 if (rmap.rm_owner != owner)
197 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
198 irec->br_startoff);
199
200 /*
201 * Check for discrepancies between the unwritten flag in the irec and
202 * the rmap. Note that the (in-memory) CoW fork distinguishes between
203 * unwritten and written extents, but we don't track that in the rmap
204 * records because the blocks are owned (on-disk) by the refcountbt,
205 * which doesn't track unwritten state.
206 */
207 if (owner != XFS_RMAP_OWN_COW &&
208 irec->br_state == XFS_EXT_UNWRITTEN &&
209 !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
210 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
211 irec->br_startoff);
212
213 if (info->whichfork == XFS_ATTR_FORK &&
214 !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
215 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
216 irec->br_startoff);
217 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
218 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
219 irec->br_startoff);
220}
221
Darrick J. Wong166d7642018-01-16 18:53:05 -0800222/* Cross-reference a single rtdev extent record. */
223STATIC void
224xfs_scrub_bmap_rt_extent_xref(
225 struct xfs_scrub_bmap_info *info,
226 struct xfs_inode *ip,
227 struct xfs_btree_cur *cur,
228 struct xfs_bmbt_irec *irec)
229{
230 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
231 return;
Darrick J. Wong46d9bfb2018-01-16 18:53:10 -0800232
233 xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
234 irec->br_blockcount);
Darrick J. Wong166d7642018-01-16 18:53:05 -0800235}
236
237/* Cross-reference a single datadev extent record. */
238STATIC void
239xfs_scrub_bmap_extent_xref(
240 struct xfs_scrub_bmap_info *info,
241 struct xfs_inode *ip,
242 struct xfs_btree_cur *cur,
243 struct xfs_bmbt_irec *irec)
244{
Darrick J. Wong52dc4b42018-01-16 18:53:06 -0800245 struct xfs_mount *mp = info->sc->mp;
246 xfs_agnumber_t agno;
247 xfs_agblock_t agbno;
248 xfs_extlen_t len;
249 int error;
250
Darrick J. Wong166d7642018-01-16 18:53:05 -0800251 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
252 return;
Darrick J. Wong52dc4b42018-01-16 18:53:06 -0800253
254 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
255 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
256 len = irec->br_blockcount;
257
258 error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
259 if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
260 irec->br_startoff, &error))
261 return;
262
263 xfs_scrub_xref_is_used_space(info->sc, agbno, len);
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800264 xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
Darrick J. Wongd8526572018-01-16 18:53:08 -0800265 xfs_scrub_bmap_xref_rmap(info, irec, agbno);
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800266 switch (info->whichfork) {
267 case XFS_DATA_FORK:
268 if (xfs_is_reflink_inode(info->sc->ip))
269 break;
270 /* fall through */
271 case XFS_ATTR_FORK:
272 xfs_scrub_xref_is_not_shared(info->sc, agbno,
273 irec->br_blockcount);
274 break;
275 case XFS_COW_FORK:
276 xfs_scrub_xref_is_cow_staging(info->sc, agbno,
277 irec->br_blockcount);
278 break;
279 }
Darrick J. Wong52dc4b42018-01-16 18:53:06 -0800280
281 xfs_scrub_ag_free(info->sc, &info->sc->sa);
Darrick J. Wong166d7642018-01-16 18:53:05 -0800282}
283
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700284/* Scrub a single extent record. */
285STATIC int
286xfs_scrub_bmap_extent(
287 struct xfs_inode *ip,
288 struct xfs_btree_cur *cur,
289 struct xfs_scrub_bmap_info *info,
290 struct xfs_bmbt_irec *irec)
291{
292 struct xfs_mount *mp = info->sc->mp;
293 struct xfs_buf *bp = NULL;
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800294 xfs_filblks_t end;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700295 int error = 0;
296
297 if (cur)
298 xfs_btree_get_block(cur, 0, &bp);
299
300 /*
301 * Check for out-of-order extents. This record could have come
302 * from the incore list, for which there is no ordering check.
303 */
304 if (irec->br_startoff < info->lastoff)
305 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
306 irec->br_startoff);
307
308 /* There should never be a "hole" extent in either extent list. */
309 if (irec->br_startblock == HOLESTARTBLOCK)
310 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
311 irec->br_startoff);
312
313 /*
314 * Check for delalloc extents. We never iterate the ones in the
315 * in-core extent scan, and we should never see these in the bmbt.
316 */
317 if (isnullstartblock(irec->br_startblock))
318 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
319 irec->br_startoff);
320
321 /* Make sure the extent points to a valid place. */
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800322 if (irec->br_blockcount > MAXEXTLEN)
323 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
324 irec->br_startoff);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700325 if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
326 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
327 irec->br_startoff);
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800328 end = irec->br_startblock + irec->br_blockcount - 1;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700329 if (info->is_rt &&
330 (!xfs_verify_rtbno(mp, irec->br_startblock) ||
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800331 !xfs_verify_rtbno(mp, end)))
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700332 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
333 irec->br_startoff);
334 if (!info->is_rt &&
335 (!xfs_verify_fsbno(mp, irec->br_startblock) ||
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800336 !xfs_verify_fsbno(mp, end) ||
337 XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
338 XFS_FSB_TO_AGNO(mp, end)))
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700339 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
340 irec->br_startoff);
341
342 /* We don't allow unwritten extents on attr forks. */
343 if (irec->br_state == XFS_EXT_UNWRITTEN &&
344 info->whichfork == XFS_ATTR_FORK)
345 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
346 irec->br_startoff);
347
Darrick J. Wong166d7642018-01-16 18:53:05 -0800348 if (info->is_rt)
349 xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
350 else
351 xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
352
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700353 info->lastoff = irec->br_startoff + irec->br_blockcount;
354 return error;
355}
356
357/* Scrub a bmbt record. */
358STATIC int
359xfs_scrub_bmapbt_rec(
360 struct xfs_scrub_btree *bs,
361 union xfs_btree_rec *rec)
362{
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700363 struct xfs_bmbt_irec irec;
364 struct xfs_scrub_bmap_info *info = bs->private;
365 struct xfs_inode *ip = bs->cur->bc_private.b.ip;
366 struct xfs_buf *bp = NULL;
367 struct xfs_btree_block *block;
368 uint64_t owner;
369 int i;
370
371 /*
372 * Check the owners of the btree blocks up to the level below
373 * the root since the verifiers don't do that.
374 */
375 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
376 bs->cur->bc_ptrs[0] == 1) {
377 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
378 block = xfs_btree_get_block(bs->cur, i, &bp);
379 owner = be64_to_cpu(block->bb_u.l.bb_owner);
380 if (owner != ip->i_ino)
381 xfs_scrub_fblock_set_corrupt(bs->sc,
382 info->whichfork, 0);
383 }
384 }
385
386 /* Set up the in-core record and scrub it. */
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700387 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700388 return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
389}
390
391/* Scan the btree records. */
392STATIC int
393xfs_scrub_bmap_btree(
394 struct xfs_scrub_context *sc,
395 int whichfork,
396 struct xfs_scrub_bmap_info *info)
397{
398 struct xfs_owner_info oinfo;
399 struct xfs_mount *mp = sc->mp;
400 struct xfs_inode *ip = sc->ip;
401 struct xfs_btree_cur *cur;
402 int error;
403
404 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
405 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
406 error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info);
407 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR :
408 XFS_BTREE_NOERROR);
409 return error;
410}
411
Darrick J. Wong5e777b62018-03-23 10:06:53 -0700412struct xfs_scrub_bmap_check_rmap_info {
413 struct xfs_scrub_context *sc;
414 int whichfork;
415 struct xfs_iext_cursor icur;
416};
417
418/* Can we find bmaps that fit this rmap? */
419STATIC int
420xfs_scrub_bmap_check_rmap(
421 struct xfs_btree_cur *cur,
422 struct xfs_rmap_irec *rec,
423 void *priv)
424{
425 struct xfs_bmbt_irec irec;
426 struct xfs_scrub_bmap_check_rmap_info *sbcri = priv;
427 struct xfs_ifork *ifp;
428 struct xfs_scrub_context *sc = sbcri->sc;
429 bool have_map;
430
431 /* Is this even the right fork? */
432 if (rec->rm_owner != sc->ip->i_ino)
433 return 0;
434 if ((sbcri->whichfork == XFS_ATTR_FORK) ^
435 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
436 return 0;
437 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
438 return 0;
439
440 /* Now look up the bmbt record. */
441 ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
442 if (!ifp) {
443 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
444 rec->rm_offset);
445 goto out;
446 }
447 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
448 &sbcri->icur, &irec);
449 if (!have_map)
450 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
451 rec->rm_offset);
452 /*
453 * bmap extent record lengths are constrained to 2^21 blocks in length
454 * because of space constraints in the on-disk metadata structure.
455 * However, rmap extent record lengths are constrained only by AG
456 * length, so we have to loop through the bmbt to make sure that the
457 * entire rmap is covered by bmbt records.
458 */
459 while (have_map) {
460 if (irec.br_startoff != rec->rm_offset)
461 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
462 rec->rm_offset);
463 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
464 cur->bc_private.a.agno, rec->rm_startblock))
465 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
466 rec->rm_offset);
467 if (irec.br_blockcount > rec->rm_blockcount)
468 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
469 rec->rm_offset);
470 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
471 break;
472 rec->rm_startblock += irec.br_blockcount;
473 rec->rm_offset += irec.br_blockcount;
474 rec->rm_blockcount -= irec.br_blockcount;
475 if (rec->rm_blockcount == 0)
476 break;
477 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
478 if (!have_map)
479 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
480 rec->rm_offset);
481 }
482
483out:
484 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
485 return XFS_BTREE_QUERY_RANGE_ABORT;
486 return 0;
487}
488
489/* Make sure each rmap has a corresponding bmbt entry. */
490STATIC int
491xfs_scrub_bmap_check_ag_rmaps(
492 struct xfs_scrub_context *sc,
493 int whichfork,
494 xfs_agnumber_t agno)
495{
496 struct xfs_scrub_bmap_check_rmap_info sbcri;
497 struct xfs_btree_cur *cur;
498 struct xfs_buf *agf;
499 int error;
500
501 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
502 if (error)
503 return error;
504
505 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
506 if (!cur) {
507 error = -ENOMEM;
508 goto out_agf;
509 }
510
511 sbcri.sc = sc;
512 sbcri.whichfork = whichfork;
513 error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri);
514 if (error == XFS_BTREE_QUERY_RANGE_ABORT)
515 error = 0;
516
517 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
518out_agf:
519 xfs_trans_brelse(sc->tp, agf);
520 return error;
521}
522
523/* Make sure each rmap has a corresponding bmbt entry. */
524STATIC int
525xfs_scrub_bmap_check_rmaps(
526 struct xfs_scrub_context *sc,
527 int whichfork)
528{
529 loff_t size;
530 xfs_agnumber_t agno;
531 int error;
532
533 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
534 whichfork == XFS_COW_FORK ||
535 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
536 return 0;
537
538 /* Don't support realtime rmap checks yet. */
539 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
540 return 0;
541
542 /*
543 * Only do this for complex maps that are in btree format, or for
544 * situations where we would seem to have a size but zero extents.
545 * The inode repair code can zap broken iforks, which means we have
546 * to flag this bmap as corrupt if there are rmaps that need to be
547 * reattached.
548 */
549 switch (whichfork) {
550 case XFS_DATA_FORK:
551 size = i_size_read(VFS_I(sc->ip));
552 break;
553 case XFS_ATTR_FORK:
554 size = XFS_IFORK_Q(sc->ip);
555 break;
556 default:
557 size = 0;
558 break;
559 }
560 if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
561 (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
562 return 0;
563
564 for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
565 error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno);
566 if (error)
567 return error;
568 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
569 break;
570 }
571
572 return 0;
573}
574
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700575/*
576 * Scrub an inode fork's block mappings.
577 *
578 * First we scan every record in every btree block, if applicable.
579 * Then we unconditionally scan the incore extent cache.
580 */
581STATIC int
582xfs_scrub_bmap(
583 struct xfs_scrub_context *sc,
584 int whichfork)
585{
586 struct xfs_bmbt_irec irec;
Christoph Hellwig88aa5de2017-11-06 11:53:58 -0800587 struct xfs_scrub_bmap_info info = { NULL };
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700588 struct xfs_mount *mp = sc->mp;
589 struct xfs_inode *ip = sc->ip;
590 struct xfs_ifork *ifp;
591 xfs_fileoff_t endoff;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700592 struct xfs_iext_cursor icur;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700593 int error = 0;
594
595 ifp = XFS_IFORK_PTR(ip, whichfork);
596
597 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
598 info.whichfork = whichfork;
599 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
600 info.sc = sc;
601
602 switch (whichfork) {
603 case XFS_COW_FORK:
604 /* Non-existent CoW forks are ignorable. */
605 if (!ifp)
606 goto out;
607 /* No CoW forks on non-reflink inodes/filesystems. */
608 if (!xfs_is_reflink_inode(ip)) {
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700609 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700610 goto out;
611 }
612 break;
613 case XFS_ATTR_FORK:
614 if (!ifp)
Darrick J. Wong5e777b62018-03-23 10:06:53 -0700615 goto out_check_rmap;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700616 if (!xfs_sb_version_hasattr(&mp->m_sb) &&
617 !xfs_sb_version_hasattr2(&mp->m_sb))
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700618 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700619 break;
620 default:
621 ASSERT(whichfork == XFS_DATA_FORK);
622 break;
623 }
624
625 /* Check the fork values */
626 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
627 case XFS_DINODE_FMT_UUID:
628 case XFS_DINODE_FMT_DEV:
629 case XFS_DINODE_FMT_LOCAL:
630 /* No mappings to check. */
631 goto out;
632 case XFS_DINODE_FMT_EXTENTS:
633 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
634 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
635 goto out;
636 }
637 break;
638 case XFS_DINODE_FMT_BTREE:
639 if (whichfork == XFS_COW_FORK) {
640 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
641 goto out;
642 }
643
644 error = xfs_scrub_bmap_btree(sc, whichfork, &info);
645 if (error)
646 goto out;
647 break;
648 default:
649 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
650 goto out;
651 }
652
653 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
654 goto out;
655
656 /* Now try to scrub the in-memory extent list. */
657 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
658 error = xfs_iread_extents(sc->tp, ip, whichfork);
659 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
660 goto out;
661 }
662
663 /* Find the offset of the last extent in the mapping. */
664 error = xfs_bmap_last_offset(ip, &endoff, whichfork);
665 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
666 goto out;
667
668 /* Scrub extent records. */
669 info.lastoff = 0;
670 ifp = XFS_IFORK_PTR(ip, whichfork);
Darrick J. Wong2b9e9b52018-01-08 10:49:03 -0800671 for_each_xfs_iext(ifp, &icur, &irec) {
Darrick J. Wong8bc763c2018-05-14 06:34:32 -0700672 if (xfs_scrub_should_terminate(sc, &error) ||
673 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700674 break;
675 if (isnullstartblock(irec.br_startblock))
676 continue;
677 if (irec.br_startoff >= endoff) {
678 xfs_scrub_fblock_set_corrupt(sc, whichfork,
679 irec.br_startoff);
680 goto out;
681 }
682 error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec);
683 if (error)
684 goto out;
685 }
686
Darrick J. Wong5e777b62018-03-23 10:06:53 -0700687out_check_rmap:
688 error = xfs_scrub_bmap_check_rmaps(sc, whichfork);
689 if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error))
690 goto out;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700691out:
692 return error;
693}
694
695/* Scrub an inode's data fork. */
696int
697xfs_scrub_bmap_data(
698 struct xfs_scrub_context *sc)
699{
700 return xfs_scrub_bmap(sc, XFS_DATA_FORK);
701}
702
703/* Scrub an inode's attr fork. */
704int
705xfs_scrub_bmap_attr(
706 struct xfs_scrub_context *sc)
707{
708 return xfs_scrub_bmap(sc, XFS_ATTR_FORK);
709}
710
711/* Scrub an inode's CoW fork. */
712int
713xfs_scrub_bmap_cow(
714 struct xfs_scrub_context *sc)
715{
716 if (!xfs_is_reflink_inode(sc->ip))
717 return -ENOENT;
718
719 return xfs_scrub_bmap(sc, XFS_COW_FORK);
720}