mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
M_IGEO(mp)->min_folio_order);
- XFS_STATS_INC(mp, vn_active);
+ XFS_STATS_INC(mp, xs_inodes_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(ip->i_ino == 0);
/* asserts to verify all state is correct here */
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
- XFS_STATS_DEC(ip->i_mount, vn_active);
+ XFS_STATS_DEC(ip->i_mount, xs_inodes_active);
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
struct xfs_mount *mp = ip->i_mount;
bool need_inactive;
- XFS_STATS_INC(mp, vn_reclaim);
+ XFS_STATS_INC(mp, xs_inode_mark_reclaimable);
/*
* We should never get here with any of the reclaim flags already set.
{ "xstrat", xfsstats_offset(xs_write_calls) },
{ "rw", xfsstats_offset(xs_attr_get) },
{ "attr", xfsstats_offset(xs_iflush_count)},
- { "icluster", xfsstats_offset(vn_active) },
+ { "icluster", xfsstats_offset(xs_inodes_active) },
{ "vnodes", xfsstats_offset(xb_get) },
{ "buf", xfsstats_offset(xs_abtb_2) },
{ "abtb2", xfsstats_offset(xs_abtc_2) },
void xfs_stats_clearall(struct xfsstats __percpu *stats)
{
int c;
- uint32_t vn_active;
+ uint32_t xs_inodes_active;
xfs_notice(NULL, "Clearing xfsstats");
for_each_possible_cpu(c) {
preempt_disable();
- /* save vn_active, it's a universal truth! */
- vn_active = per_cpu_ptr(stats, c)->s.vn_active;
+ /* save xs_inodes_active, it's a universal truth! */
+ xs_inodes_active = per_cpu_ptr(stats, c)->s.xs_inodes_active;
memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
- per_cpu_ptr(stats, c)->s.vn_active = vn_active;
+ per_cpu_ptr(stats, c)->s.xs_inodes_active = xs_inodes_active;
preempt_enable();
}
}
uint32_t xs_iflush_count;
uint32_t xs_icluster_flushcnt;
uint32_t xs_icluster_flushinode;
- uint32_t vn_active; /* # vnodes not on free lists */
- uint32_t vn_alloc; /* # times vn_alloc called */
- uint32_t vn_get; /* # times vn_get called */
- uint32_t vn_hold; /* # times vn_hold called */
- uint32_t vn_rele; /* # times vn_rele called */
- uint32_t vn_reclaim; /* # times vn_reclaim called */
- uint32_t vn_remove; /* # times vn_remove called */
- uint32_t vn_free; /* # times vn_free called */
+ uint32_t xs_inodes_active;
+ uint32_t __unused_vn_alloc;
+ uint32_t __unused_vn_get;
+ uint32_t __unused_vn_hold;
+ uint32_t xs_inode_destroy;
+ uint32_t xs_inode_destroy2; /* same as xs_inode_destroy */
+ uint32_t xs_inode_mark_reclaimable;
+ uint32_t __unused_vn_free;
uint32_t xb_get;
uint32_t xb_create;
uint32_t xb_get_locked;
trace_xfs_destroy_inode(ip);
ASSERT(!rwsem_is_locked(&inode->i_rwsem));
- XFS_STATS_INC(ip->i_mount, vn_rele);
- XFS_STATS_INC(ip->i_mount, vn_remove);
+ XFS_STATS_INC(ip->i_mount, xs_inode_destroy);
+ XFS_STATS_INC(ip->i_mount, xs_inode_destroy2);
xfs_inode_mark_reclaimable(ip);
}