From c517b3aa02cff1dd688aa783b748e06c8aee1285 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 19 Jul 2018 12:29:11 -0700 Subject: xfs: shorten xfs_scrub_ prefix Shorten all the metadata checking xfs_scrub_ prefixes to xchk_. After this, the only xfs_scrub* symbols are the ones that pertain to both scrub and repair. Whitespace damage will be fixed in a subsequent patch. There are no functional changes. Signed-off-by: Darrick J. Wong Reviewed-by: Brian Foster --- fs/xfs/scrub/agheader.c | 318 ++++++++++++++++++++++++------------------------ fs/xfs/scrub/alloc.c | 56 ++++----- fs/xfs/scrub/attr.c | 106 ++++++++-------- fs/xfs/scrub/bmap.c | 176 +++++++++++++-------------- fs/xfs/scrub/btree.c | 166 ++++++++++++------------- fs/xfs/scrub/btree.h | 22 ++-- fs/xfs/scrub/common.c | 172 +++++++++++++------------- fs/xfs/scrub/common.h | 102 ++++++++-------- fs/xfs/scrub/dabtree.c | 124 +++++++++---------- fs/xfs/scrub/dabtree.h | 14 +-- fs/xfs/scrub/dir.c | 170 +++++++++++++------------- fs/xfs/scrub/ialloc.c | 138 ++++++++++----------- fs/xfs/scrub/inode.c | 160 ++++++++++++------------ fs/xfs/scrub/parent.c | 62 +++++----- fs/xfs/scrub/quota.c | 64 +++++----- fs/xfs/scrub/refcount.c | 114 ++++++++--------- fs/xfs/scrub/repair.c | 4 +- fs/xfs/scrub/rmap.c | 78 ++++++------ fs/xfs/scrub/rtbitmap.c | 30 ++--- fs/xfs/scrub/scrub.c | 142 ++++++++++----------- fs/xfs/scrub/scrub.h | 82 ++++++------- fs/xfs/scrub/symlink.c | 14 +-- fs/xfs/scrub/trace.c | 2 +- fs/xfs/scrub/trace.h | 70 +++++------ 24 files changed, 1196 insertions(+), 1190 deletions(-) diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c index 9bb0745f1ad2..c0625ec16d63 100644 --- a/fs/xfs/scrub/agheader.c +++ b/fs/xfs/scrub/agheader.c @@ -28,7 +28,7 @@ /* Cross-reference with the other btrees. */ STATIC void -xfs_scrub_superblock_xref( +xchk_superblock_xref( struct xfs_scrub_context *sc, struct xfs_buf *bp) { @@ -43,15 +43,15 @@ xfs_scrub_superblock_xref( agbno = XFS_SB_BLOCK(mp); - error = xfs_scrub_ag_init(sc, agno, &sc->sa); - if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error)) + error = xchk_ag_init(sc, agno, &sc->sa); + if (!xchk_xref_process_error(sc, agno, agbno, &error)) return; - xfs_scrub_xref_is_used_space(sc, agbno, 1); - xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_xref_is_used_space(sc, agbno, 1); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS); - xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); - xfs_scrub_xref_is_not_shared(sc, agbno, 1); + xchk_xref_is_owned_by(sc, agbno, 1, &oinfo); + xchk_xref_is_not_shared(sc, agbno, 1); /* scrub teardown will take care of sc->sa for us */ } @@ -65,7 +65,7 @@ xfs_scrub_superblock_xref( * sb 0 is ok and we can use its information to check everything else. */ int -xfs_scrub_superblock( +xchk_superblock( struct xfs_scrub_context *sc) { struct xfs_mount *mp = sc->mp; @@ -98,7 +98,7 @@ xfs_scrub_superblock( default: break; } - if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) + if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) return error; sb = XFS_BUF_TO_SBP(bp); @@ -110,46 +110,46 @@ xfs_scrub_superblock( * checked. */ if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); /* Check sb_versionnum bits that are set at mkfs time. */ vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS | @@ -163,7 +163,7 @@ xfs_scrub_superblock( XFS_SB_VERSION_DIRV2BIT); if ((sb->sb_versionnum & vernum_mask) != (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); /* Check sb_versionnum bits that can be set after mkfs time. */ vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT | @@ -171,40 +171,40 @@ xfs_scrub_superblock( XFS_SB_VERSION_QUOTABIT); if ((sb->sb_versionnum & vernum_mask) != (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname))) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_blocklog != mp->m_sb.sb_blocklog) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_sectlog != mp->m_sb.sb_sectlog) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_inodelog != mp->m_sb.sb_inodelog) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_inopblog != mp->m_sb.sb_inopblog) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_agblklog != mp->m_sb.sb_agblklog) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_rextslog != mp->m_sb.sb_rextslog) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); /* * Skip the summary counters since we track them in memory anyway. @@ -212,10 +212,10 @@ xfs_scrub_superblock( */ if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); /* * Skip the quota flags since repair will force quotacheck. @@ -223,46 +223,46 @@ xfs_scrub_superblock( */ if (sb->sb_flags != mp->m_sb.sb_flags) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); /* Do we see any invalid bits in sb_features2? */ if (!xfs_sb_version_hasmorebits(&mp->m_sb)) { if (sb->sb_features2 != 0) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); } else { v2_ok = XFS_SB_VERSION2_OKBITS; if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5) v2_ok |= XFS_SB_VERSION2_CRCBIT; if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok))) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_features2 != sb->sb_bad_features2) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); } /* Check sb_features2 flags that are set at mkfs time. */ @@ -272,26 +272,26 @@ xfs_scrub_superblock( XFS_SB_VERSION2_FTYPE); if ((sb->sb_features2 & features_mask) != (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); /* Check sb_features2 flags that can be set after mkfs time. */ features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT); if ((sb->sb_features2 & features_mask) != (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (!xfs_sb_version_hascrc(&mp->m_sb)) { /* all v5 fields must be zero */ if (memchr_inv(&sb->sb_features_compat, 0, sizeof(struct xfs_dsb) - offsetof(struct xfs_dsb, sb_features_compat))) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); } else { /* Check compat flags; all are set at mkfs time. */ features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN); if ((sb->sb_features_compat & features_mask) != (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); /* Check ro compat flags; all are set at mkfs time. */ features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN | @@ -301,7 +301,7 @@ xfs_scrub_superblock( if ((sb->sb_features_ro_compat & features_mask) != (cpu_to_be32(mp->m_sb.sb_features_ro_compat) & features_mask)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); /* Check incompat flags; all are set at mkfs time. */ features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN | @@ -311,22 +311,22 @@ xfs_scrub_superblock( if ((sb->sb_features_incompat & features_mask) != (cpu_to_be32(mp->m_sb.sb_features_incompat) & features_mask)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); /* Check log incompat flags; all are set at mkfs time. */ features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN); if ((sb->sb_features_log_incompat & features_mask) != (cpu_to_be32(mp->m_sb.sb_features_log_incompat) & features_mask)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); /* Don't care about sb_crc */ if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino)) - xfs_scrub_block_set_preen(sc, bp); + xchk_block_set_preen(sc, bp); /* Don't care about sb_lsn */ } @@ -334,15 +334,15 @@ xfs_scrub_superblock( if (xfs_sb_version_hasmetauuid(&mp->m_sb)) { /* The metadata UUID must be the same for all supers */ if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid)) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); } /* Everything else must be zero. */ if (memchr_inv(sb + 1, 0, BBTOB(bp->b_length) - sizeof(struct xfs_dsb))) - xfs_scrub_block_set_corrupt(sc, bp); + xchk_block_set_corrupt(sc, bp); - xfs_scrub_superblock_xref(sc, bp); + xchk_superblock_xref(sc, bp); return error; } @@ -351,7 +351,7 @@ xfs_scrub_superblock( /* Tally freespace record lengths. */ STATIC int -xfs_scrub_agf_record_bno_lengths( +xchk_agf_record_bno_lengths( struct xfs_btree_cur *cur, struct xfs_alloc_rec_incore *rec, void *priv) @@ -364,7 +364,7 @@ xfs_scrub_agf_record_bno_lengths( /* Check agf_freeblks */ static inline void -xfs_scrub_agf_xref_freeblks( +xchk_agf_xref_freeblks( struct xfs_scrub_context *sc) { struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); @@ -375,16 +375,16 @@ xfs_scrub_agf_xref_freeblks( return; error = xfs_alloc_query_all(sc->sa.bno_cur, - xfs_scrub_agf_record_bno_lengths, &blocks); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur)) + xchk_agf_record_bno_lengths, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) return; if (blocks != be32_to_cpu(agf->agf_freeblks)) - xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); } /* Cross reference the AGF with the cntbt (freespace by length btree) */ static inline void -xfs_scrub_agf_xref_cntbt( +xchk_agf_xref_cntbt( struct xfs_scrub_context *sc) { struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); @@ -398,25 +398,25 @@ xfs_scrub_agf_xref_cntbt( /* Any freespace at all? */ error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur)) + if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) return; if (!have) { if (agf->agf_freeblks != be32_to_cpu(0)) - xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); return; } /* Check agf_longest */ error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur)) + if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) return; if (!have || blocks != be32_to_cpu(agf->agf_longest)) - xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); } /* Check the btree block counts in the AGF against the btrees. */ STATIC void -xfs_scrub_agf_xref_btreeblks( +xchk_agf_xref_btreeblks( struct xfs_scrub_context *sc) { struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); @@ -428,11 +428,11 @@ xfs_scrub_agf_xref_btreeblks( /* Check agf_rmap_blocks; set up for agf_btreeblks check */ if (sc->sa.rmap_cur) { error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) return; btreeblks = blocks - 1; if (blocks != be32_to_cpu(agf->agf_rmap_blocks)) - xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); } else { btreeblks = 0; } @@ -447,22 +447,22 @@ xfs_scrub_agf_xref_btreeblks( /* Check agf_btreeblks */ error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur)) + if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) return; btreeblks += blocks - 1; error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur)) + if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) return; btreeblks += blocks - 1; if (btreeblks != be32_to_cpu(agf->agf_btreeblks)) - xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); } /* Check agf_refcount_blocks against tree size */ static inline void -xfs_scrub_agf_xref_refcblks( +xchk_agf_xref_refcblks( struct xfs_scrub_context *sc) { struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); @@ -473,15 +473,15 @@ xfs_scrub_agf_xref_refcblks( return; error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur)) + if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) return; if (blocks != be32_to_cpu(agf->agf_refcount_blocks)) - xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); } /* Cross-reference with the other btrees. */ STATIC void -xfs_scrub_agf_xref( +xchk_agf_xref( struct xfs_scrub_context *sc) { struct xfs_owner_info oinfo; @@ -494,26 +494,26 @@ xfs_scrub_agf_xref( agbno = XFS_AGF_BLOCK(mp); - error = xfs_scrub_ag_btcur_init(sc, &sc->sa); + error = xchk_ag_btcur_init(sc, &sc->sa); if (error) return; - xfs_scrub_xref_is_used_space(sc, agbno, 1); - xfs_scrub_agf_xref_freeblks(sc); - xfs_scrub_agf_xref_cntbt(sc); - xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_xref_is_used_space(sc, agbno, 1); + xchk_agf_xref_freeblks(sc); + xchk_agf_xref_cntbt(sc); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS); - xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); - xfs_scrub_agf_xref_btreeblks(sc); - xfs_scrub_xref_is_not_shared(sc, agbno, 1); - xfs_scrub_agf_xref_refcblks(sc); + xchk_xref_is_owned_by(sc, agbno, 1, &oinfo); + xchk_agf_xref_btreeblks(sc); + xchk_xref_is_not_shared(sc, agbno, 1); + xchk_agf_xref_refcblks(sc); /* scrub teardown will take care of sc->sa for us */ } /* Scrub the AGF. */ int -xfs_scrub_agf( +xchk_agf( struct xfs_scrub_context *sc) { struct xfs_mount *mp = sc->mp; @@ -529,54 +529,54 @@ xfs_scrub_agf( int error = 0; agno = sc->sa.agno = sc->sm->sm_agno; - error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp, + error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp, &sc->sa.agf_bp, &sc->sa.agfl_bp); - if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) + if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) goto out; - xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp); + xchk_buffer_recheck(sc, sc->sa.agf_bp); agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); /* Check the AG length */ eoag = be32_to_cpu(agf->agf_length); if (eoag != xfs_ag_block_count(mp, agno)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); /* Check the AGF btree roots and levels */ agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]); if (!xfs_verify_agbno(mp, agno, agbno)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]); if (!xfs_verify_agbno(mp, agno, agbno)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); if (level <= 0 || level > XFS_BTREE_MAXLEVELS) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); if (level <= 0 || level > XFS_BTREE_MAXLEVELS) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]); if (!xfs_verify_agbno(mp, agno, agbno)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); if (level <= 0 || level > XFS_BTREE_MAXLEVELS) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); } if (xfs_sb_version_hasreflink(&mp->m_sb)) { agbno = be32_to_cpu(agf->agf_refcount_root); if (!xfs_verify_agbno(mp, agno, agbno)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); level = be32_to_cpu(agf->agf_refcount_level); if (level <= 0 || level > XFS_BTREE_MAXLEVELS) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); } /* Check the AGFL counters */ @@ -588,16 +588,16 @@ xfs_scrub_agf( else fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1; if (agfl_count != 0 && fl_count != agfl_count) - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); - xfs_scrub_agf_xref(sc); + xchk_agf_xref(sc); out: return error; } /* AGFL */ -struct xfs_scrub_agfl_info { +struct xchk_agfl_info { struct xfs_owner_info oinfo; unsigned int sz_entries; unsigned int nr_entries; @@ -607,7 +607,7 @@ struct xfs_scrub_agfl_info { /* Cross-reference with the other btrees. */ STATIC void -xfs_scrub_agfl_block_xref( +xchk_agfl_block_xref( struct xfs_scrub_context *sc, xfs_agblock_t agbno, struct xfs_owner_info *oinfo) @@ -615,20 +615,20 @@ xfs_scrub_agfl_block_xref( if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return; - xfs_scrub_xref_is_used_space(sc, agbno, 1); - xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); - xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo); - xfs_scrub_xref_is_not_shared(sc, agbno, 1); + xchk_xref_is_used_space(sc, agbno, 1); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_xref_is_owned_by(sc, agbno, 1, oinfo); + xchk_xref_is_not_shared(sc, agbno, 1); } /* Scrub an AGFL block. */ STATIC int -xfs_scrub_agfl_block( +xchk_agfl_block( struct xfs_mount *mp, xfs_agblock_t agbno, void *priv) { - struct xfs_scrub_agfl_info *sai = priv; + struct xchk_agfl_info *sai = priv; struct xfs_scrub_context *sc = sai->sc; xfs_agnumber_t agno = sc->sa.agno; @@ -636,9 +636,9 @@ xfs_scrub_agfl_block( sai->nr_entries < sai->sz_entries) sai->entries[sai->nr_entries++] = agbno; else - xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp); + xchk_block_set_corrupt(sc, sc->sa.agfl_bp); - xfs_scrub_agfl_block_xref(sc, agbno, priv); + xchk_agfl_block_xref(sc, agbno, priv); if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return XFS_BTREE_QUERY_RANGE_ABORT; @@ -647,7 +647,7 @@ xfs_scrub_agfl_block( } static int -xfs_scrub_agblock_cmp( +xchk_agblock_cmp( const void *pa, const void *pb) { @@ -659,7 +659,7 @@ xfs_scrub_agblock_cmp( /* Cross-reference with the other btrees. */ STATIC void -xfs_scrub_agfl_xref( +xchk_agfl_xref( struct xfs_scrub_context *sc) { struct xfs_owner_info oinfo; @@ -672,15 +672,15 @@ xfs_scrub_agfl_xref( agbno = XFS_AGFL_BLOCK(mp); - error = xfs_scrub_ag_btcur_init(sc, &sc->sa); + error = xchk_ag_btcur_init(sc, &sc->sa); if (error) return; - xfs_scrub_xref_is_used_space(sc, agbno, 1); - xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_xref_is_used_space(sc, agbno, 1); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS); - xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); - xfs_scrub_xref_is_not_shared(sc, agbno, 1); + xchk_xref_is_owned_by(sc, agbno, 1, &oinfo); + xchk_xref_is_not_shared(sc, agbno, 1); /* * Scrub teardown will take care of sc->sa for us. Leave sc->sa @@ -690,10 +690,10 @@ xfs_scrub_agfl_xref( /* Scrub the AGFL. */ int -xfs_scrub_agfl( +xchk_agfl( struct xfs_scrub_context *sc) { - struct xfs_scrub_agfl_info sai; + struct xchk_agfl_info sai; struct xfs_agf *agf; xfs_agnumber_t agno; unsigned int agflcount; @@ -701,15 +701,15 @@ xfs_scrub_agfl( int error; agno = sc->sa.agno = sc->sm->sm_agno; - error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp, + error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp, &sc->sa.agf_bp, &sc->sa.agfl_bp); - if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) + if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) goto out; if (!sc->sa.agf_bp) return -EFSCORRUPTED; - xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp); + xchk_buffer_recheck(sc, sc->sa.agfl_bp); - xfs_scrub_agfl_xref(sc); + xchk_agfl_xref(sc); if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) goto out; @@ -718,7 +718,7 @@ xfs_scrub_agfl( agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); agflcount = be32_to_cpu(agf->agf_flcount); if (agflcount > xfs_agfl_size(sc->mp)) { - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); goto out; } memset(&sai, 0, sizeof(sai)); @@ -734,7 +734,7 @@ xfs_scrub_agfl( /* Check the blocks in the AGFL. */ xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG); error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp), - sc->sa.agfl_bp, xfs_scrub_agfl_block, &sai); + sc->sa.agfl_bp, xchk_agfl_block, &sai); if (error == XFS_BTREE_QUERY_RANGE_ABORT) { error = 0; goto out_free; @@ -743,16 +743,16 @@ xfs_scrub_agfl( goto out_free; if (agflcount != sai.nr_entries) { - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); goto out_free; } /* Sort entries, check for duplicates. */ sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), - xfs_scrub_agblock_cmp, NULL); + xchk_agblock_cmp, NULL); for (i = 1; i < sai.nr_entries; i++) { if (sai.entries[i] == sai.entries[i - 1]) { - xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); + xchk_block_set_corrupt(sc, sc->sa.agf_bp); break; } } @@ -767,7 +767,7 @@ out: /* Check agi_count/agi_freecount */ static inline void -xfs_scrub_agi_xref_icounts( +xchk_agi_xref_icounts( struct xfs_scrub_context *sc) { struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp); @@ -779,16 +779,16 @@ xfs_scrub_agi_xref_icounts( return; error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur)) + if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) return; if (be32_to_cpu(agi->agi_count) != icount || be32_to_cpu(agi->agi_freecount) != freecount) - xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); } /* Cross-reference with the other btrees. */ STATIC void -xfs_scrub_agi_xref( +xchk_agi_xref( struct xfs_scrub_context *sc) { struct xfs_owner_info oinfo; @@ -801,23 +801,23 @@ xfs_scrub_agi_xref( agbno = XFS_AGI_BLOCK(mp); - error = xfs_scrub_ag_btcur_init(sc, &sc->sa); + error = xchk_ag_btcur_init(sc, &sc->sa); if (error) return; - xfs_scrub_xref_is_used_space(sc, agbno, 1); - xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); - xfs_scrub_agi_xref_icounts(sc); + xchk_xref_is_used_space(sc, agbno, 1); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_agi_xref_icounts(sc); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS); - xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); - xfs_scrub_xref_is_not_shared(sc, agbno, 1); + xchk_xref_is_owned_by(sc, agbno, 1, &oinfo); + xchk_xref_is_not_shared(sc, agbno, 1); /* scrub teardown will take care of sc->sa for us */ } /* Scrub the AGI. */ int -xfs_scrub_agi( +xchk_agi( struct xfs_scrub_context *sc) { struct xfs_mount *mp = sc->mp; @@ -834,36 +834,36 @@ xfs_scrub_agi( int error = 0; agno = sc->sa.agno = sc->sm->sm_agno; - error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp, + error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp, &sc->sa.agf_bp, &sc->sa.agfl_bp); - if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) + if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) goto out; - xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp); + xchk_buffer_recheck(sc, sc->sa.agi_bp); agi = XFS_BUF_TO_AGI(sc->sa.agi_bp); /* Check the AG length */ eoag = be32_to_cpu(agi->agi_length); if (eoag != xfs_ag_block_count(mp, agno)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); /* Check btree roots and levels */ agbno = be32_to_cpu(agi->agi_root); if (!xfs_verify_agbno(mp, agno, agbno)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); level = be32_to_cpu(agi->agi_level); if (level <= 0 || level > XFS_BTREE_MAXLEVELS) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); if (xfs_sb_version_hasfinobt(&mp->m_sb)) { agbno = be32_to_cpu(agi->agi_free_root); if (!xfs_verify_agbno(mp, agno, agbno)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); level = be32_to_cpu(agi->agi_free_level); if (level <= 0 || level > XFS_BTREE_MAXLEVELS) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); } /* Check inode counters */ @@ -871,16 +871,16 @@ xfs_scrub_agi( icount = be32_to_cpu(agi->agi_count); if (icount > last_agino - first_agino + 1 || icount < be32_to_cpu(agi->agi_freecount)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); /* Check inode pointers */ agino = be32_to_cpu(agi->agi_newino); if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); agino = be32_to_cpu(agi->agi_dirino); if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); /* Check unlinked inode buckets */ for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { @@ -888,13 +888,13 @@ xfs_scrub_agi( if (agino == NULLAGINO) continue; if (!xfs_verify_agino(mp, agno, agino)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); } if (agi->agi_pad32 != cpu_to_be32(0)) - xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); + xchk_block_set_corrupt(sc, sc->sa.agi_bp); - xfs_scrub_agi_xref(sc); + xchk_agi_xref(sc); out: return error; } diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c index 50e4f7fa06f0..1f6e3a6a1fdd 100644 --- a/fs/xfs/scrub/alloc.c +++ b/fs/xfs/scrub/alloc.c @@ -28,11 +28,11 @@ * Set us up to scrub free space btrees. */ int -xfs_scrub_setup_ag_allocbt( +xchk_setup_ag_allocbt( struct xfs_scrub_context *sc, struct xfs_inode *ip) { - return xfs_scrub_setup_ag_btree(sc, ip, false); + return xchk_setup_ag_btree(sc, ip, false); } /* Free space btree scrubber. */ @@ -41,7 +41,7 @@ xfs_scrub_setup_ag_allocbt( * bnobt/cntbt record, respectively. */ STATIC void -xfs_scrub_allocbt_xref_other( +xchk_allocbt_xref_other( struct xfs_scrub_context *sc, xfs_agblock_t agbno, xfs_extlen_t len) @@ -56,32 +56,32 @@ xfs_scrub_allocbt_xref_other( pcur = &sc->sa.cnt_cur; else pcur = &sc->sa.bno_cur; - if (!*pcur || xfs_scrub_skip_xref(sc->sm)) + if (!*pcur || xchk_skip_xref(sc->sm)) return; error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec); - if (!xfs_scrub_should_check_xref(sc, &error, pcur)) + if (!xchk_should_check_xref(sc, &error, pcur)) return; if (!has_otherrec) { - xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); + xchk_btree_xref_set_corrupt(sc, *pcur, 0); return; } error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec); - if (!xfs_scrub_should_check_xref(sc, &error, pcur)) + if (!xchk_should_check_xref(sc, &error, pcur)) return; if (!has_otherrec) { - xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); + xchk_btree_xref_set_corrupt(sc, *pcur, 0); return; } if (fbno != agbno || flen != len) - xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); + xchk_btree_xref_set_corrupt(sc, *pcur, 0); } /* Cross-reference with the other btrees. */ STATIC void -xfs_scrub_allocbt_xref( +xchk_allocbt_xref( struct xfs_scrub_context *sc, xfs_agblock_t agbno, xfs_extlen_t len) @@ -89,16 +89,16 @@ xfs_scrub_allocbt_xref( if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return; - xfs_scrub_allocbt_xref_other(sc, agbno, len); - xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len); - xfs_scrub_xref_has_no_owner(sc, agbno, len); - xfs_scrub_xref_is_not_shared(sc, agbno, len); + xchk_allocbt_xref_other(sc, agbno, len); + xchk_xref_is_not_inode_chunk(sc, agbno, len); + xchk_xref_has_no_owner(sc, agbno, len); + xchk_xref_is_not_shared(sc, agbno, len); } /* Scrub a bnobt/cntbt record. */ STATIC int -xfs_scrub_allocbt_rec( - struct xfs_scrub_btree *bs, +xchk_allocbt_rec( + struct xchk_btree *bs, union xfs_btree_rec *rec) { struct xfs_mount *mp = bs->cur->bc_mp; @@ -113,16 +113,16 @@ xfs_scrub_allocbt_rec( if (bno + len <= bno || !xfs_verify_agbno(mp, agno, bno) || !xfs_verify_agbno(mp, agno, bno + len - 1)) - xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); - xfs_scrub_allocbt_xref(bs->sc, bno, len); + xchk_allocbt_xref(bs->sc, bno, len); return error; } /* Scrub the freespace btrees for some AG. */ STATIC int -xfs_scrub_allocbt( +xchk_allocbt( struct xfs_scrub_context *sc, xfs_btnum_t which) { @@ -131,26 +131,26 @@ xfs_scrub_allocbt( xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG); cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur; - return xfs_scrub_btree(sc, cur, xfs_scrub_allocbt_rec, &oinfo, NULL); + return xchk_btree(sc, cur, xchk_allocbt_rec, &oinfo, NULL); } int -xfs_scrub_bnobt( +xchk_bnobt( struct xfs_scrub_context *sc) { - return xfs_scrub_allocbt(sc, XFS_BTNUM_BNO); + return xchk_allocbt(sc, XFS_BTNUM_BNO); } int -xfs_scrub_cntbt( +xchk_cntbt( struct xfs_scrub_context *sc) { - return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT); + return xchk_allocbt(sc, XFS_BTNUM_CNT); } /* xref check that the extent is not free */ void -xfs_scrub_xref_is_used_space( +xchk_xref_is_used_space( struct xfs_scrub_context *sc, xfs_agblock_t agbno, xfs_extlen_t len) @@ -158,12 +158,12 @@ xfs_scrub_xref_is_used_space( bool is_freesp; int error; - if (!sc->sa.bno_cur || xfs_scrub_skip_xref(sc->sm)) + if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm)) return; error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp); - if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur)) + if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) return; if (is_freesp) - xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0); + xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0); } diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c index de51cf8a8516..0068bebddf3e 100644 --- a/fs/xfs/scrub/attr.c +++ b/fs/xfs/scrub/attr.c @@ -32,7 +32,7 @@ /* Set us up to scrub an inode's extended attributes. */ int -xfs_scrub_setup_xattr( +xchk_setup_xattr( struct xfs_scrub_context *sc, struct xfs_inode *ip) { @@ -50,12 +50,12 @@ xfs_scrub_setup_xattr( if (!sc->buf) return -ENOMEM; - return xfs_scrub_setup_inode_contents(sc, ip, 0); + return xchk_setup_inode_contents(sc, ip, 0); } /* Extended Attributes */ -struct xfs_scrub_xattr { +struct xchk_xattr { struct xfs_attr_list_context context; struct xfs_scrub_context *sc; }; @@ -69,22 +69,22 @@ struct xfs_scrub_xattr { * or if we get more or less data than we expected. */ static void -xfs_scrub_xattr_listent( +xchk_xattr_listent( struct xfs_attr_list_context *context, int flags, unsigned char *name, int namelen, int valuelen) { - struct xfs_scrub_xattr *sx; + struct xchk_xattr *sx; struct xfs_da_args args = { NULL }; int error = 0; - sx = container_of(context, struct xfs_scrub_xattr, context); + sx = container_of(context, struct xchk_xattr, context); if (flags & XFS_ATTR_INCOMPLETE) { /* Incomplete attr key, just mark the inode for preening. */ - xfs_scrub_ino_set_preen(sx->sc, context->dp->i_ino); + xchk_ino_set_preen(sx->sc, context->dp->i_ino); return; } @@ -106,11 +106,11 @@ xfs_scrub_xattr_listent( error = xfs_attr_get_ilocked(context->dp, &args); if (error == -EEXIST) error = 0; - if (!xfs_scrub_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno, + if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno, &error)) goto fail_xref; if (args.valuelen != valuelen) - xfs_scrub_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, + xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno); fail_xref: if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) @@ -126,7 +126,7 @@ fail_xref: * the smallest address */ STATIC bool -xfs_scrub_xattr_set_map( +xchk_xattr_set_map( struct xfs_scrub_context *sc, unsigned long *map, unsigned int start, @@ -154,7 +154,7 @@ xfs_scrub_xattr_set_map( * attr freemap has problems or points to used space. */ STATIC bool -xfs_scrub_xattr_check_freemap( +xchk_xattr_check_freemap( struct xfs_scrub_context *sc, unsigned long *map, struct xfs_attr3_icleaf_hdr *leafhdr) @@ -168,7 +168,7 @@ xfs_scrub_xattr_check_freemap( freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize); bitmap_zero(freemap, mapsize); for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { - if (!xfs_scrub_xattr_set_map(sc, freemap, + if (!xchk_xattr_set_map(sc, freemap, leafhdr->freemap[i].base, leafhdr->freemap[i].size)) return false; @@ -184,8 +184,8 @@ xfs_scrub_xattr_check_freemap( * Returns the number of bytes used for the name/value data. */ STATIC void -xfs_scrub_xattr_entry( - struct xfs_scrub_da_btree *ds, +xchk_xattr_entry( + struct xchk_da_btree *ds, int level, char *buf_end, struct xfs_attr_leafblock *leaf, @@ -204,17 +204,17 @@ xfs_scrub_xattr_entry( unsigned int namesize; if (ent->pad2 != 0) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); /* Hash values in order? */ if (be32_to_cpu(ent->hashval) < *last_hashval) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); *last_hashval = be32_to_cpu(ent->hashval); nameidx = be16_to_cpu(ent->nameidx); if (nameidx < leafhdr->firstused || nameidx >= mp->m_attr_geo->blksize) { - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); return; } @@ -225,27 +225,27 @@ xfs_scrub_xattr_entry( be16_to_cpu(lentry->valuelen)); name_end = (char *)lentry + namesize; if (lentry->namelen == 0) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); } else { rentry = xfs_attr3_leaf_name_remote(leaf, idx); namesize = xfs_attr_leaf_entsize_remote(rentry->namelen); name_end = (char *)rentry + namesize; if (rentry->namelen == 0 || rentry->valueblk == 0) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); } if (name_end > buf_end) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); - if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, nameidx, namesize)) - xfs_scrub_da_set_corrupt(ds, level); + if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize)) + xchk_da_set_corrupt(ds, level); if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) *usedbytes += namesize; } /* Scrub an attribute leaf. */ STATIC int -xfs_scrub_xattr_block( - struct xfs_scrub_da_btree *ds, +xchk_xattr_block( + struct xchk_da_btree *ds, int level) { struct xfs_attr3_icleaf_hdr leafhdr; @@ -275,10 +275,10 @@ xfs_scrub_xattr_block( if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 || leaf->hdr.info.hdr.pad != 0) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); } else { if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); } /* Check the leaf header */ @@ -286,44 +286,44 @@ xfs_scrub_xattr_block( hdrsize = xfs_attr3_leaf_hdr_size(leaf); if (leafhdr.usedbytes > mp->m_attr_geo->blksize) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); if (leafhdr.firstused > mp->m_attr_geo->blksize) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); if (leafhdr.firstused < hdrsize) - xfs_scrub_da_set_corrupt(ds, level); - if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, 0, hdrsize)) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); + if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize)) + xchk_da_set_corrupt(ds, level); if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) goto out; entries = xfs_attr3_leaf_entryp(leaf); if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize; for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { /* Mark the leaf entry itself. */ off = (char *)ent - (char *)leaf; - if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, off, + if (!xchk_xattr_set_map(ds->sc, usedmap, off, sizeof(xfs_attr_leaf_entry_t))) { - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); goto out; } /* Check the entry and nameval. */ - xfs_scrub_xattr_entry(ds, level, buf_end, leaf, &leafhdr, + xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr, usedmap, ent, i, &usedbytes, &last_hashval); if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) goto out; } - if (!xfs_scrub_xattr_check_freemap(ds->sc, usedmap, &leafhdr)) - xfs_scrub_da_set_corrupt(ds, level); + if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr)) + xchk_da_set_corrupt(ds, level); if (leafhdr.usedbytes != usedbytes) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); out: return 0; @@ -331,8 +331,8 @@ out: /* Scrub a attribute btree record. */ STATIC int -xfs_scrub_xattr_rec( - struct xfs_scrub_da_btree *ds, +xchk_xattr_rec( + struct xchk_da_btree *ds, int level, void *rec) { @@ -352,14 +352,14 @@ xfs_scrub_xattr_rec( blk = &ds->state->path.blk[level]; /* Check the whole block, if necessary. */ - error = xfs_scrub_xattr_block(ds, level); + error = xchk_xattr_block(ds, level); if (error) goto out; if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) goto out; /* Check the hash of the entry. */ - error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval); + error = xchk_da_btree_hash(ds, level, &ent->hashval); if (error) goto out; @@ -368,7 +368,7 @@ xfs_scrub_xattr_rec( hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr); nameidx = be16_to_cpu(ent->nameidx); if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) { - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); goto out; } @@ -377,12 +377,12 @@ xfs_scrub_xattr_rec( badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE | XFS_ATTR_INCOMPLETE); if ((ent->flags & badflags) != 0) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); if (ent->flags & XFS_ATTR_LOCAL) { lentry = (struct xfs_attr_leaf_name_local *) (((char *)bp->b_addr) + nameidx); if (lentry->namelen <= 0) { - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); goto out; } calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen); @@ -390,13 +390,13 @@ xfs_scrub_xattr_rec( rentry = (struct xfs_attr_leaf_name_remote *) (((char *)bp->b_addr) + nameidx); if (rentry->namelen <= 0) { - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); goto out; } calc_hash = xfs_da_hashname(rentry->name, rentry->namelen); } if (calc_hash != hash) - xfs_scrub_da_set_corrupt(ds, level); + xchk_da_set_corrupt(ds, level); out: return error; @@ -404,10 +404,10 @@ out: /* Scrub the extended attribute metadata. */ int -xfs_scrub_xattr( +xchk_xattr( struct xfs_scrub_context *sc) { - struct xfs_scrub_xattr sx; + struct xchk_xattr sx; struct attrlist_cursor_kern cursor = { 0 }; xfs_dablk_t last_checked = -1U; int error = 0; @@ -417,7 +417,7 @@ xfs_scrub_xattr( memset(&sx, 0, sizeof(sx)); /* Check attribute tree structure */ - error = xfs_scrub_da_btree(sc, XFS_ATTR_FORK, xfs_scrub_xattr_rec, + error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec, &last_checked); if (error) goto out; @@ -429,7 +429,7 @@ xfs_scrub_xattr( sx.context.dp = sc->ip; sx.context.cursor = &cursor; sx.context.resynch = 1; - sx.context.put_listent = xfs_scrub_xattr_listent; + sx.context.put_listent = xchk_xattr_listent; sx.context.tp = sc->tp; sx.context.flags = ATTR_INCOMPLETE; sx.sc = sc; @@ -438,7 +438,7 @@ xfs_scrub_xattr( * Look up every xattr in this file by name. * * Use the backend implementation of xfs_attr_list to call - * xfs_scrub_xattr_listent on every attribute key in this inode. + * xchk_xattr_listent on every attribute key in this inode. * In other words, we use the same iterator/callback mechanism * that listattr uses to scrub extended attributes, though in our * _listent function, we check the value of the attribute. @@ -451,7 +451,7 @@ xfs_scrub_xattr( * locking order. */ error = xfs_attr_list_int_ilocked(&sx.context); - if (!xfs_scrub_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error)) + if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error)) goto out; out: return error; diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index ebbfab173e97..19cfbd3910a2 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -33,13 +33,13 @@ /* Set us up with an inode's bmap. */ int -xfs_scrub_setup_inode_bmap( +xchk_setup_inode_bmap( struct xfs_scrub_context *sc, struct xfs_inode *ip) { int error; - error = xfs_scrub_get_inode(sc, ip); + error = xchk_get_inode(sc, ip); if (error) goto out; @@ -60,7 +60,7 @@ xfs_scrub_setup_inode_bmap( } /* Got the inode, lock it and we're ready to go. */ - error = xfs_scrub_trans_alloc(sc, 0); + error = xchk_trans_alloc(sc, 0); if (error) goto out; sc->ilock_flags |= XFS_ILOCK_EXCL; @@ -78,7 +78,7 @@ out: * is in btree format. */ -struct xfs_scrub_bmap_info { +struct xchk_bmap_info { struct xfs_scrub_context *sc; xfs_fileoff_t lastoff; bool is_rt; @@ -88,8 +88,8 @@ struct xfs_scrub_bmap_info { /* Look for a corresponding rmap for this irec. */ static inline bool -xfs_scrub_bmap_get_rmap( - struct xfs_scrub_bmap_info *info, +xchk_bmap_get_rmap( + struct xchk_bmap_info *info, struct xfs_bmbt_irec *irec, xfs_agblock_t agbno, uint64_t owner, @@ -120,7 +120,7 @@ xfs_scrub_bmap_get_rmap( if (info->is_shared) { error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno, owner, offset, rflags, rmap, &has_rmap); - if (!xfs_scrub_should_check_xref(info->sc, &error, + if (!xchk_should_check_xref(info->sc, &error, &info->sc->sa.rmap_cur)) return false; goto out; @@ -131,28 +131,28 @@ xfs_scrub_bmap_get_rmap( */ error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner, offset, rflags, &has_rmap); - if (!xfs_scrub_should_check_xref(info->sc, &error, + if (!xchk_should_check_xref(info->sc, &error, &info->sc->sa.rmap_cur)) return false; if (!has_rmap) goto out; error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap); - if (!xfs_scrub_should_check_xref(info->sc, &error, + if (!xchk_should_check_xref(info->sc, &error, &info->sc->sa.rmap_cur)) return false; out: if (!has_rmap) - xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); return has_rmap; } /* Make sure that we have rmapbt records for this extent. */ STATIC void -xfs_scrub_bmap_xref_rmap( - struct xfs_scrub_bmap_info *info, +xchk_bmap_xref_rmap( + struct xchk_bmap_info *info, struct xfs_bmbt_irec *irec, xfs_agblock_t agbno) { @@ -160,7 +160,7 @@ xfs_scrub_bmap_xref_rmap( unsigned long long rmap_end; uint64_t owner; - if (!info->sc->sa.rmap_cur || xfs_scrub_skip_xref(info->sc->sm)) + if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm)) return; if (info->whichfork == XFS_COW_FORK) @@ -169,14 +169,14 @@ xfs_scrub_bmap_xref_rmap( owner = info->sc->ip->i_ino; /* Find the rmap record for this irec. */ - if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap)) + if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap)) return; /* Check the rmap. */ rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; if (rmap.rm_startblock > agbno || agbno + irec->br_blockcount > rmap_end) - xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); /* @@ -189,12 +189,12 @@ xfs_scrub_bmap_xref_rmap( rmap.rm_blockcount; if (rmap.rm_offset > irec->br_startoff || irec->br_startoff + irec->br_blockcount > rmap_end) - xfs_scrub_fblock_xref_set_corrupt(info->sc, + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); } if (rmap.rm_owner != owner) - xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); /* @@ -207,22 +207,22 @@ xfs_scrub_bmap_xref_rmap( if (owner != XFS_RMAP_OWN_COW && irec->br_state == XFS_EXT_UNWRITTEN && !(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) - xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); if (info->whichfork == XFS_ATTR_FORK && !(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) - xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK) - xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); } /* Cross-reference a single rtdev extent record. */ STATIC void -xfs_scrub_bmap_rt_extent_xref( - struct xfs_scrub_bmap_info *info, +xchk_bmap_rt_extent_xref( + struct xchk_bmap_info *info, struct xfs_inode *ip, struct xfs_btree_cur *cur, struct xfs_bmbt_irec *irec) @@ -230,14 +230,14 @@ xfs_scrub_bmap_rt_extent_xref( if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return; - xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock, + xchk_xref_is_used_rt_space(info->sc, irec->br_startblock, irec->br_blockcount); } /* Cross-reference a single datadev extent record. */ STATIC void -xfs_scrub_bmap_extent_xref( - struct xfs_scrub_bmap_info *info, +xchk_bmap_extent_xref( + struct xchk_bmap_info *info, struct xfs_inode *ip, struct xfs_btree_cur *cur, struct xfs_bmbt_irec *irec) @@ -255,38 +255,38 @@ xfs_scrub_bmap_extent_xref( agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); len = irec->br_blockcount; - error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa); - if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork, + error = xchk_ag_init(info->sc, agno, &info->sc->sa); + if (!xchk_fblock_process_error(info->sc, info->whichfork, irec->br_startoff, &error)) return; - xfs_scrub_xref_is_used_space(info->sc, agbno, len); - xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len); - xfs_scrub_bmap_xref_rmap(info, irec, agbno); + xchk_xref_is_used_space(info->sc, agbno, len); + xchk_xref_is_not_inode_chunk(info->sc, agbno, len); + xchk_bmap_xref_rmap(info, irec, agbno); switch (info->whichfork) { case XFS_DATA_FORK: if (xfs_is_reflink_inode(info->sc->ip)) break; /* fall through */ case XFS_ATTR_FORK: - xfs_scrub_xref_is_not_shared(info->sc, agbno, + xchk_xref_is_not_shared(info->sc, agbno, irec->br_blockcount); break; case XFS_COW_FORK: - xfs_scrub_xref_is_cow_staging(info->sc, agbno, + xchk_xref_is_cow_staging(info->sc, agbno, irec->br_blockcount); break; } - xfs_scrub_ag_free(info->sc, &info->sc->sa); + xchk_ag_free(info->sc, &info->sc->sa); } /* Scrub a single extent record. */ STATIC int -xfs_scrub_bmap_extent( +xchk_bmap_extent( struct xfs_inode *ip, struct xfs_btree_cur *cur, - struct xfs_scrub_bmap_info *info, + struct xchk_bmap_info *info, struct xfs_bmbt_irec *irec) { struct xfs_mount *mp = info->sc->mp; @@ -302,12 +302,12 @@ xfs_scrub_bmap_extent( * from the incore list, for which there is no ordering check. */ if (irec->br_startoff < info->lastoff) - xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, + xchk_fblock_set_corrupt(info->sc, info->whichfork, irec->br_startoff); /* There should never be a "hole" extent in either extent list. */ if (irec->br_startblock == HOLESTARTBLOCK) - xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, + xchk_fblock_set_corrupt(info->sc, info->whichfork, irec->br_startoff); /* @@ -315,40 +315,40 @@ xfs_scrub_bmap_extent( * in-core extent scan, and we should never see these in the bmbt. */ if (isnullstartblock(irec->br_startblock)) - xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, + xchk_fblock_set_corrupt(info->sc, info->whichfork, irec->br_startoff); /* Make sure the extent points to a valid place. */ if (irec->br_blockcount > MAXEXTLEN) - xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, + xchk_fblock_set_corrupt(info->sc, info->whichfork, irec->br_startoff); if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock) - xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, + xchk_fblock_set_corrupt(info->sc, info->whichfork, irec->br_startoff); end = irec->br_startblock + irec->br_blockcount - 1; if (info->is_rt && (!xfs_verify_rtbno(mp, irec->br_startblock) || !xfs_verify_rtbno(mp, end))) - xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, + xchk_fblock_set_corrupt(info->sc, info->whichfork, irec->br_startoff); if (!info->is_rt && (!xfs_verify_fsbno(mp, irec->br_startblock) || !xfs_verify_fsbno(mp, end) || XFS_FSB_TO_AGNO(mp, irec->br_startblock) != XFS_FSB_TO_AGNO(mp, end))) - xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, + xchk_fblock_set_corrupt(info->sc, info->whichfork, irec->br_startoff); /* We don't allow unwritten extents on attr forks. */ if (irec->br_state == XFS_EXT_UNWRITTEN && info->whichfork == XFS_ATTR_FORK) - xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, + xchk_fblock_set_corrupt(info->sc, info->whichfork, irec->br_startoff); if (info->is_rt) - xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec); + xchk_bmap_rt_extent_xref(info, ip, cur, irec); else - xfs_scrub_bmap_extent_xref(info, ip, cur, irec); + xchk_bmap_extent_xref(info, ip, cur, irec); info->lastoff = irec->br_startoff + irec->br_blockcount; return error; @@ -356,12 +356,12 @@ xfs_scrub_bmap_extent( /* Scrub a bmbt record. */ STATIC int -xfs_scrub_bmapbt_rec( - struct xfs_scrub_btree *bs, +xchk_bmapbt_rec( + struct xchk_btree *bs, union xfs_btree_rec *rec) { struct xfs_bmbt_irec irec; - struct xfs_scrub_bmap_info *info = bs->private; + struct xchk_bmap_info *info = bs->private; struct xfs_inode *ip = bs->cur->bc_private.b.ip; struct xfs_buf *bp = NULL; struct xfs_btree_block *block; @@ -378,22 +378,22 @@ xfs_scrub_bmapbt_rec( block = xfs_btree_get_block(bs->cur, i, &bp); owner = be64_to_cpu(block->bb_u.l.bb_owner); if (owner != ip->i_ino) - xfs_scrub_fblock_set_corrupt(bs->sc, + xchk_fblock_set_corrupt(bs->sc, info->whichfork, 0); } } /* Set up the in-core record and scrub it. */ xfs_bmbt_disk_get_all(&rec->bmbt, &irec); - return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec); + return xchk_bmap_extent(ip, bs->cur, info, &irec); } /* Scan the btree records. */ STATIC int -xfs_scrub_bmap_btree( +xchk_bmap_btree( struct xfs_scrub_context *sc, int whichfork, - struct xfs_scrub_bmap_info *info) + struct xchk_bmap_info *info) { struct xfs_owner_info oinfo; struct xfs_mount *mp = sc->mp; @@ -403,12 +403,12 @@ xfs_scrub_bmap_btree( cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork); xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); - error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info); + error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info); xfs_btree_del_cursor(cur, error); return error; } -struct xfs_scrub_bmap_check_rmap_info { +struct xchk_bmap_check_rmap_info { struct xfs_scrub_context *sc; int whichfork; struct xfs_iext_cursor icur; @@ -416,13 +416,13 @@ struct xfs_scrub_bmap_check_rmap_info { /* Can we find bmaps that fit this rmap? */ STATIC int -xfs_scrub_bmap_check_rmap( +xchk_bmap_check_rmap( struct xfs_btree_cur *cur, struct xfs_rmap_irec *rec, void *priv) { struct xfs_bmbt_irec irec; - struct xfs_scrub_bmap_check_rmap_info *sbcri = priv; + struct xchk_bmap_check_rmap_info *sbcri = priv; struct xfs_ifork *ifp; struct xfs_scrub_context *sc = sbcri->sc; bool have_map; @@ -439,14 +439,14 @@ xfs_scrub_bmap_check_rmap( /* Now look up the bmbt record. */ ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork); if (!ifp) { - xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, + xchk_fblock_set_corrupt(sc, sbcri->whichfork, rec->rm_offset); goto out; } have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset, &sbcri->icur, &irec); if (!have_map) - xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, + xchk_fblock_set_corrupt(sc, sbcri->whichfork, rec->rm_offset); /* * bmap extent record lengths are constrained to 2^21 blocks in length @@ -457,14 +457,14 @@ xfs_scrub_bmap_check_rmap( */ while (have_map) { if (irec.br_startoff != rec->rm_offset) - xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, + xchk_fblock_set_corrupt(sc, sbcri->whichfork, rec->rm_offset); if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp, cur->bc_private.a.agno, rec->rm_startblock)) - xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, + xchk_fblock_set_corrupt(sc, sbcri->whichfork, rec->rm_offset); if (irec.br_blockcount > rec->rm_blockcount) - xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, + xchk_fblock_set_corrupt(sc, sbcri->whichfork, rec->rm_offset); if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) break; @@ -475,7 +475,7 @@ xfs_scrub_bmap_check_rmap( break; have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec); if (!have_map) - xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, + xchk_fblock_set_corrupt(sc, sbcri->whichfork, rec->rm_offset); } @@ -487,12 +487,12 @@ out: /* Make sure each rmap has a corresponding bmbt entry. */ STATIC int -xfs_scrub_bmap_check_ag_rmaps( +xchk_bmap_check_ag_rmaps( struct xfs_scrub_context *sc, int whichfork, xfs_agnumber_t agno) { - struct xfs_scrub_bmap_check_rmap_info sbcri; + struct xchk_bmap_check_rmap_info sbcri; struct xfs_btree_cur *cur; struct xfs_buf *agf; int error; @@ -509,7 +509,7 @@ xfs_scrub_bmap_check_ag_rmaps( sbcri.sc = sc; sbcri.whichfork = whichfork; - error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri); + error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri); if (error == XFS_BTREE_QUERY_RANGE_ABORT) error = 0; @@ -521,7 +521,7 @@ out_agf: /* Make sure each rmap has a corresponding bmbt entry. */ STATIC int -xfs_scrub_bmap_check_rmaps( +xchk_bmap_check_rmaps( struct xfs_scrub_context *sc, int whichfork) { @@ -561,7 +561,7 @@ xfs_scrub_bmap_check_rmaps( return 0; for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) { - error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno); + error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno); if (error) return error; if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) @@ -578,12 +578,12 @@ xfs_scrub_bmap_check_rmaps( * Then we unconditionally scan the incore extent cache. */ STATIC int -xfs_scrub_bmap( +xchk_bmap( struct xfs_scrub_context *sc, int whichfork) { struct xfs_bmbt_irec irec; - struct xfs_scrub_bmap_info info = { NULL }; + struct xchk_bmap_info info = { NULL }; struct xfs_mount *mp = sc->mp; struct xfs_inode *ip = sc->ip; struct xfs_ifork *ifp; @@ -605,7 +605,7 @@ xfs_scrub_bmap( goto out; /* No CoW forks on non-reflink inodes/filesystems. */ if (!xfs_is_reflink_inode(ip)) { - xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); + xchk_ino_set_corrupt(sc, sc->ip->i_ino); goto out; } break; @@ -614,7 +614,7 @@ xfs_scrub_bmap( goto out_check_rmap; if (!xfs_sb_version_hasattr(&mp->m_sb) && !xfs_sb_version_hasattr2(&mp->m_sb)) - xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); + xchk_ino_set_corrupt(sc, sc->ip->i_ino); break; default: ASSERT(whichfork == XFS_DATA_FORK); @@ -630,22 +630,22 @@ xfs_scrub_bmap( goto out; case XFS_DINODE_FMT_EXTENTS: if (!(ifp->if_flags & XFS_IFEXTENTS)) { - xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); + xchk_fblock_set_corrupt(sc, whichfork, 0); goto out; } break; case XFS_DINODE_FMT_BTREE: if (whichfork == XFS_COW_FORK) { - xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); + xchk_fblock_set_corrupt(sc, whichfork, 0); goto out; } - error = xfs_scrub_bmap_btree(sc, whichfork, &info); + error = xchk_bmap_btree(sc, whichfork, &info); if (error) goto out; break; default: - xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); + xchk_fblock_set_corrupt(sc, whichfork, 0); goto out; } @@ -655,37 +655,37 @@ xfs_scrub_bmap( /* Now try to scrub the in-memory extent list. */ if (!(ifp->if_flags & XFS_IFEXTENTS)) { error = xfs_iread_extents(sc->tp, ip, whichfork); - if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error)) + if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) goto out; } /* Find the offset of the last extent in the mapping. */ error = xfs_bmap_last_offset(ip, &endoff, whichfork); - if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error)) + if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) goto out; /* Scrub extent records. */ info.lastoff = 0; ifp = XFS_IFORK_PTR(ip, whichfork); for_each_xfs_iext(ifp, &icur, &irec) { - if (xfs_scrub_should_terminate(sc, &error) || + if (xchk_should_terminate(sc, &error) || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) break; if (isnullstartblock(irec.br_startblock)) continue; if (irec.br_startoff >= endoff) { - xfs_scrub_fblock_set_corrupt(sc, whichfork, + xchk_fblock_set_corrupt(sc, whichfork, irec.br_startoff); goto out; } - error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec); + error = xchk_bmap_extent(ip, NULL, &info, &irec); if (error) goto out; } out_check_rmap: - error = xfs_scrub_bmap_check_rmaps(sc, whichfork); - if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error)) + error = xchk_bmap_check_rmaps(sc, whichfork); + if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error)) goto out; out: return error; @@ -693,27 +693,27 @@ out: /* Scrub an inode's data fork. */ int -xfs_scrub_bmap_data( +xchk_bmap_data( struct xfs_scrub_context *sc) { - return xfs_scrub_bmap(sc, XFS_DATA_FORK); + return xchk_bmap(sc, XFS_DATA_FORK); } /* Scrub an inode's attr fork. */ int -xfs_scrub_bmap_attr( +xchk_bmap_attr( struct xfs_scrub_context *sc) { - return xfs_scrub_bmap(sc, XFS_ATTR_FORK); + return xchk_bmap(sc, XFS_ATTR_FORK); } /* Scrub an inode's CoW fork. */ int -xfs_scrub_bmap_cow( +xchk_bmap_cow( struct xfs_scrub_context *sc) { if (!xfs_is_reflink_inode(sc->ip)) return -ENOENT; - return xfs_scrub_bmap(sc, XFS_COW_FORK); + return xchk_bmap(sc, XFS_COW_FORK); } diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c index 5b472045f036..30fe9a147959 100644 --- a/fs/xfs/scrub/btree.c +++ b/fs/xfs/scrub/btree.c @@ -29,7 +29,7 @@ * operational errors in common.c. */ static bool -__xfs_scrub_btree_process_error( +__xchk_btree_process_error( struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, int level, @@ -43,7 +43,7 @@ __xfs_scrub_btree_process_error( switch (*error) { case -EDEADLOCK: /* Used to restart an op with deadlock avoidance. */ - trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); + trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); break; case -EFSBADCRC: case -EFSCORRUPTED: @@ -53,10 +53,10 @@