blob: c6d763236ba79cab131c1c9186d54279c60fab84 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wongc7e693d2017-10-17 21:37:41 -07002/*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
Darrick J. Wongc7e693d2017-10-17 21:37:41 -07004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wongc7e693d2017-10-17 21:37:41 -07005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_defer.h"
13#include "xfs_btree.h"
14#include "xfs_bit.h"
15#include "xfs_log_format.h"
16#include "xfs_trans.h"
17#include "xfs_sb.h"
18#include "xfs_alloc.h"
19#include "xfs_ialloc.h"
20#include "xfs_rmap.h"
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -080021#include "xfs_refcount.h"
Darrick J. Wongc7e693d2017-10-17 21:37:41 -070022#include "scrub/xfs_scrub.h"
23#include "scrub/scrub.h"
24#include "scrub/common.h"
25#include "scrub/btree.h"
26#include "scrub/trace.h"
27
28/*
29 * Set us up to scrub reverse mapping btrees.
30 */
31int
32xfs_scrub_setup_ag_rmapbt(
33 struct xfs_scrub_context *sc,
34 struct xfs_inode *ip)
35{
36 return xfs_scrub_setup_ag_btree(sc, ip, false);
37}
38
39/* Reverse-mapping scrubber. */
40
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -080041/* Cross-reference a rmap against the refcount btree. */
42STATIC void
43xfs_scrub_rmapbt_xref_refc(
44 struct xfs_scrub_context *sc,
45 struct xfs_rmap_irec *irec)
46{
47 xfs_agblock_t fbno;
48 xfs_extlen_t flen;
49 bool non_inode;
50 bool is_bmbt;
51 bool is_attr;
52 bool is_unwritten;
53 int error;
54
Darrick J. Wong8389f3ff2018-05-14 06:34:31 -070055 if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -080056 return;
57
58 non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
59 is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
60 is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
61 is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
62
63 /* If this is shared, must be a data fork extent. */
64 error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
65 irec->rm_blockcount, &fbno, &flen, false);
66 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
67 return;
68 if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
69 xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
70}
71
Darrick J. Wong166d7642018-01-16 18:53:05 -080072/* Cross-reference with the other btrees. */
73STATIC void
74xfs_scrub_rmapbt_xref(
75 struct xfs_scrub_context *sc,
76 struct xfs_rmap_irec *irec)
77{
Darrick J. Wong52dc4b42018-01-16 18:53:06 -080078 xfs_agblock_t agbno = irec->rm_startblock;
79 xfs_extlen_t len = irec->rm_blockcount;
80
Darrick J. Wong166d7642018-01-16 18:53:05 -080081 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
82 return;
Darrick J. Wong52dc4b42018-01-16 18:53:06 -080083
84 xfs_scrub_xref_is_used_space(sc, agbno, len);
Darrick J. Wong2e6f2752018-01-16 18:53:07 -080085 if (irec->rm_owner == XFS_RMAP_OWN_INODES)
86 xfs_scrub_xref_is_inode_chunk(sc, agbno, len);
87 else
88 xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -080089 if (irec->rm_owner == XFS_RMAP_OWN_COW)
90 xfs_scrub_xref_is_cow_staging(sc, irec->rm_startblock,
91 irec->rm_blockcount);
92 else
93 xfs_scrub_rmapbt_xref_refc(sc, irec);
Darrick J. Wong166d7642018-01-16 18:53:05 -080094}
95
Darrick J. Wongc7e693d2017-10-17 21:37:41 -070096/* Scrub an rmapbt record. */
97STATIC int
98xfs_scrub_rmapbt_rec(
99 struct xfs_scrub_btree *bs,
100 union xfs_btree_rec *rec)
101{
102 struct xfs_mount *mp = bs->cur->bc_mp;
103 struct xfs_rmap_irec irec;
104 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
105 bool non_inode;
106 bool is_unwritten;
107 bool is_bmbt;
108 bool is_attr;
109 int error;
110
111 error = xfs_rmap_btrec_to_irec(rec, &irec);
112 if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, 0, &error))
113 goto out;
114
115 /* Check extent. */
116 if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock)
117 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
118
119 if (irec.rm_owner == XFS_RMAP_OWN_FS) {
120 /*
121 * xfs_verify_agbno returns false for static fs metadata.
122 * Since that only exists at the start of the AG, validate
123 * that by hand.
124 */
125 if (irec.rm_startblock != 0 ||
126 irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
127 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
128 } else {
129 /*
130 * Otherwise we must point somewhere past the static metadata
131 * but before the end of the FS. Run the regular check.
132 */
133 if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) ||
134 !xfs_verify_agbno(mp, agno, irec.rm_startblock +
135 irec.rm_blockcount - 1))
136 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
137 }
138
139 /* Check flags. */
140 non_inode = XFS_RMAP_NON_INODE_OWNER(irec.rm_owner);
141 is_bmbt = irec.rm_flags & XFS_RMAP_BMBT_BLOCK;
142 is_attr = irec.rm_flags & XFS_RMAP_ATTR_FORK;
143 is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN;
144
145 if (is_bmbt && irec.rm_offset != 0)
146 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
147
148 if (non_inode && irec.rm_offset != 0)
149 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
150
151 if (is_unwritten && (is_bmbt || non_inode || is_attr))
152 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
153
154 if (non_inode && (is_bmbt || is_unwritten || is_attr))
155 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
156
157 if (!non_inode) {
158 if (!xfs_verify_ino(mp, irec.rm_owner))
159 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
160 } else {
161 /* Non-inode owner within the magic values? */
162 if (irec.rm_owner <= XFS_RMAP_OWN_MIN ||
163 irec.rm_owner > XFS_RMAP_OWN_FS)
164 xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
165 }
Darrick J. Wong166d7642018-01-16 18:53:05 -0800166
167 xfs_scrub_rmapbt_xref(bs->sc, &irec);
Darrick J. Wongc7e693d2017-10-17 21:37:41 -0700168out:
169 return error;
170}
171
172/* Scrub the rmap btree for some AG. */
173int
174xfs_scrub_rmapbt(
175 struct xfs_scrub_context *sc)
176{
177 struct xfs_owner_info oinfo;
178
179 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
180 return xfs_scrub_btree(sc, sc->sa.rmap_cur, xfs_scrub_rmapbt_rec,
181 &oinfo, NULL);
182}
Darrick J. Wongd8526572018-01-16 18:53:08 -0800183
184/* xref check that the extent is owned by a given owner */
185static inline void
186xfs_scrub_xref_check_owner(
187 struct xfs_scrub_context *sc,
188 xfs_agblock_t bno,
189 xfs_extlen_t len,
190 struct xfs_owner_info *oinfo,
191 bool should_have_rmap)
192{
193 bool has_rmap;
194 int error;
195
Darrick J. Wong8389f3ff2018-05-14 06:34:31 -0700196 if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800197 return;
198
199 error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
200 &has_rmap);
201 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
202 return;
203 if (has_rmap != should_have_rmap)
204 xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
205}
206
207/* xref check that the extent is owned by a given owner */
208void
209xfs_scrub_xref_is_owned_by(
210 struct xfs_scrub_context *sc,
211 xfs_agblock_t bno,
212 xfs_extlen_t len,
213 struct xfs_owner_info *oinfo)
214{
215 xfs_scrub_xref_check_owner(sc, bno, len, oinfo, true);
216}
217
218/* xref check that the extent is not owned by a given owner */
219void
220xfs_scrub_xref_is_not_owned_by(
221 struct xfs_scrub_context *sc,
222 xfs_agblock_t bno,
223 xfs_extlen_t len,
224 struct xfs_owner_info *oinfo)
225{
226 xfs_scrub_xref_check_owner(sc, bno, len, oinfo, false);
227}
228
229/* xref check that the extent has no reverse mapping at all */
230void
231xfs_scrub_xref_has_no_owner(
232 struct xfs_scrub_context *sc,
233 xfs_agblock_t bno,
234 xfs_extlen_t len)
235{
236 bool has_rmap;
237 int error;
238
Darrick J. Wong8389f3ff2018-05-14 06:34:31 -0700239 if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800240 return;
241
242 error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
243 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
244 return;
245 if (has_rmap)
246 xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
247}