mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
xfs: generalize the freespace and reserved blocks handling
xfs_{add,dec}_freecounter already handles the block and RT extent percpu counters, but it currently hardcodes the passed in counter. Add a freecounter abstraction that uses an enum to designate the counter and add wrappers that hide the actual percpu_counters. This will allow expanding the reserved block handling to the RT extent counter in the next step, and also prepares for adding yet another such counter that can share the code. Both these additions will be needed for the zoned allocator. Also switch the flooring of the frextents counter to 0 in statfs for the rthinherit case to a manual min_t call to match the handling of the fdblocks counter for normal file systems. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
This commit is contained in:
parent
cc3d2f55c4
commit
712bae9663
16 changed files with 151 additions and 73 deletions
|
@ -1927,7 +1927,7 @@ xfs_dialloc(
|
|||
* that we can immediately allocate, but then we allow allocation on the
|
||||
* second pass if we fail to find an AG with free inodes in it.
|
||||
*/
|
||||
if (percpu_counter_read_positive(&mp->m_fdblocks) <
|
||||
if (xfs_estimate_freecounter(mp, XC_FREE_BLOCKS) <
|
||||
mp->m_low_space[XFS_LOWSP_1_PCNT]) {
|
||||
ok_alloc = false;
|
||||
low_space = true;
|
||||
|
|
|
@ -95,7 +95,7 @@ xfs_metafile_resv_can_cover(
|
|||
* There aren't enough blocks left in the inode's reservation, but it
|
||||
* isn't critical unless there also isn't enough free space.
|
||||
*/
|
||||
return __percpu_counter_compare(&ip->i_mount->m_fdblocks,
|
||||
return xfs_compare_freecounter(ip->i_mount, XC_FREE_BLOCKS,
|
||||
rhs - ip->i_delayed_blks, 2048) >= 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1265,8 +1265,7 @@ xfs_log_sb(
|
|||
mp->m_sb.sb_ifree = min_t(uint64_t,
|
||||
percpu_counter_sum_positive(&mp->m_ifree),
|
||||
mp->m_sb.sb_icount);
|
||||
mp->m_sb.sb_fdblocks =
|
||||
percpu_counter_sum_positive(&mp->m_fdblocks);
|
||||
mp->m_sb.sb_fdblocks = xfs_sum_freecounter(mp, XC_FREE_BLOCKS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1275,9 +1274,10 @@ xfs_log_sb(
|
|||
* we handle nearly-lockless reservations, so we must use the _positive
|
||||
* variant here to avoid writing out nonsense frextents.
|
||||
*/
|
||||
if (xfs_has_rtgroups(mp))
|
||||
if (xfs_has_rtgroups(mp)) {
|
||||
mp->m_sb.sb_frextents =
|
||||
percpu_counter_sum_positive(&mp->m_frextents);
|
||||
xfs_sum_freecounter(mp, XC_FREE_RTEXTENTS);
|
||||
}
|
||||
|
||||
xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
|
||||
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
|
||||
|
|
|
@ -233,6 +233,23 @@ enum xfs_group_type {
|
|||
{ XG_TYPE_AG, "ag" }, \
|
||||
{ XG_TYPE_RTG, "rtg" }
|
||||
|
||||
enum xfs_free_counter {
|
||||
/*
|
||||
* Number of free blocks on the data device.
|
||||
*/
|
||||
XC_FREE_BLOCKS,
|
||||
|
||||
/*
|
||||
* Number of free RT extents on the RT device.
|
||||
*/
|
||||
XC_FREE_RTEXTENTS,
|
||||
XC_FREE_NR,
|
||||
};
|
||||
|
||||
#define XFS_FREECOUNTER_STR \
|
||||
{ XC_FREE_BLOCKS, "blocks" }, \
|
||||
{ XC_FREE_RTEXTENTS, "rtextents" }
|
||||
|
||||
/*
|
||||
* Type verifier functions
|
||||
*/
|
||||
|
|
|
@ -513,8 +513,8 @@ xchk_fscounters(
|
|||
/* Snapshot the percpu counters. */
|
||||
icount = percpu_counter_sum(&mp->m_icount);
|
||||
ifree = percpu_counter_sum(&mp->m_ifree);
|
||||
fdblocks = percpu_counter_sum(&mp->m_fdblocks);
|
||||
frextents = percpu_counter_sum(&mp->m_frextents);
|
||||
fdblocks = xfs_sum_freecounter_raw(mp, XC_FREE_BLOCKS);
|
||||
frextents = xfs_sum_freecounter_raw(mp, XC_FREE_RTEXTENTS);
|
||||
|
||||
/* No negative values, please! */
|
||||
if (icount < 0 || ifree < 0)
|
||||
|
@ -589,15 +589,16 @@ xchk_fscounters(
|
|||
try_again = true;
|
||||
}
|
||||
|
||||
if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks,
|
||||
fsc->fdblocks)) {
|
||||
if (!xchk_fscount_within_range(sc, fdblocks,
|
||||
&mp->m_free[XC_FREE_BLOCKS].count, fsc->fdblocks)) {
|
||||
if (fsc->frozen)
|
||||
xchk_set_corrupt(sc);
|
||||
else
|
||||
try_again = true;
|
||||
}
|
||||
|
||||
if (!xchk_fscount_within_range(sc, frextents, &mp->m_frextents,
|
||||
if (!xchk_fscount_within_range(sc, frextents,
|
||||
&mp->m_free[XC_FREE_RTEXTENTS].count,
|
||||
fsc->frextents - fsc->frextents_delayed)) {
|
||||
if (fsc->frozen)
|
||||
xchk_set_corrupt(sc);
|
||||
|
|
|
@ -64,7 +64,7 @@ xrep_fscounters(
|
|||
|
||||
percpu_counter_set(&mp->m_icount, fsc->icount);
|
||||
percpu_counter_set(&mp->m_ifree, fsc->ifree);
|
||||
percpu_counter_set(&mp->m_fdblocks, fsc->fdblocks);
|
||||
xfs_set_freecounter(mp, XC_FREE_BLOCKS, fsc->fdblocks);
|
||||
|
||||
/*
|
||||
* Online repair is only supported on v5 file systems, which require
|
||||
|
@ -74,7 +74,7 @@ xrep_fscounters(
|
|||
* track of the delalloc reservations separately, as they are are
|
||||
* subtracted from m_frextents, but not included in sb_frextents.
|
||||
*/
|
||||
percpu_counter_set(&mp->m_frextents,
|
||||
xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
|
||||
fsc->frextents - fsc->frextents_delayed);
|
||||
if (!xfs_has_rtgroups(mp))
|
||||
mp->m_sb.sb_frextents = fsc->frextents;
|
||||
|
|
|
@ -62,7 +62,7 @@ xrep_newbt_estimate_slack(
|
|||
free = sc->sa.pag->pagf_freeblks;
|
||||
sz = xfs_ag_block_count(sc->mp, pag_agno(sc->sa.pag));
|
||||
} else {
|
||||
free = percpu_counter_sum(&sc->mp->m_fdblocks);
|
||||
free = xfs_sum_freecounter_raw(sc->mp, XC_FREE_BLOCKS);
|
||||
sz = sc->mp->m_sb.sb_dblocks;
|
||||
}
|
||||
|
||||
|
|
|
@ -409,7 +409,7 @@ xfs_reserve_blocks(
|
|||
|
||||
/*
|
||||
* If the request is larger than the current reservation, reserve the
|
||||
* blocks before we update the reserve counters. Sample m_fdblocks and
|
||||
* blocks before we update the reserve counters. Sample m_free and
|
||||
* perform a partial reservation if the request exceeds free space.
|
||||
*
|
||||
* The code below estimates how many blocks it can request from
|
||||
|
@ -419,8 +419,8 @@ xfs_reserve_blocks(
|
|||
* space to fill it because mod_fdblocks will refill an undersized
|
||||
* reserve when it can.
|
||||
*/
|
||||
free = percpu_counter_sum(&mp->m_fdblocks) -
|
||||
xfs_fdblocks_unavailable(mp);
|
||||
free = xfs_sum_freecounter_raw(mp, XC_FREE_BLOCKS) -
|
||||
xfs_freecounter_unavailable(mp, XC_FREE_BLOCKS);
|
||||
delta = request - mp->m_resblks;
|
||||
mp->m_resblks = request;
|
||||
if (delta > 0 && free > 0) {
|
||||
|
|
|
@ -2076,7 +2076,7 @@ xfs_inodegc_want_queue_rt_file(
|
|||
if (!XFS_IS_REALTIME_INODE(ip))
|
||||
return false;
|
||||
|
||||
if (__percpu_counter_compare(&mp->m_frextents,
|
||||
if (xfs_compare_freecounter(mp, XC_FREE_RTEXTENTS,
|
||||
mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
|
||||
XFS_FDBLOCKS_BATCH) < 0)
|
||||
return true;
|
||||
|
@ -2104,7 +2104,7 @@ xfs_inodegc_want_queue_work(
|
|||
if (items > mp->m_ino_geo.inodes_per_cluster)
|
||||
return true;
|
||||
|
||||
if (__percpu_counter_compare(&mp->m_fdblocks,
|
||||
if (xfs_compare_freecounter(mp, XC_FREE_BLOCKS,
|
||||
mp->m_low_space[XFS_LOWSP_5_PCNT],
|
||||
XFS_FDBLOCKS_BATCH) < 0)
|
||||
return true;
|
||||
|
|
|
@ -1155,9 +1155,9 @@ xfs_ioctl_fs_counts(
|
|||
struct xfs_fsop_counts out = {
|
||||
.allocino = percpu_counter_read_positive(&mp->m_icount),
|
||||
.freeino = percpu_counter_read_positive(&mp->m_ifree),
|
||||
.freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
|
||||
xfs_fdblocks_unavailable(mp),
|
||||
.freertx = percpu_counter_read_positive(&mp->m_frextents),
|
||||
.freedata = xfs_estimate_freecounter(mp, XC_FREE_BLOCKS) -
|
||||
xfs_freecounter_unavailable(mp, XC_FREE_BLOCKS),
|
||||
.freertx = xfs_estimate_freecounter(mp, XC_FREE_RTEXTENTS),
|
||||
};
|
||||
|
||||
if (copy_to_user(uarg, &out, sizeof(out)))
|
||||
|
|
|
@ -431,13 +431,14 @@ xfs_quota_calc_throttle(
|
|||
|
||||
static int64_t
|
||||
xfs_iomap_freesp(
|
||||
struct percpu_counter *counter,
|
||||
struct xfs_mount *mp,
|
||||
unsigned int idx,
|
||||
uint64_t low_space[XFS_LOWSP_MAX],
|
||||
int *shift)
|
||||
{
|
||||
int64_t freesp;
|
||||
|
||||
freesp = percpu_counter_read_positive(counter);
|
||||
freesp = xfs_estimate_freecounter(mp, idx);
|
||||
if (freesp < low_space[XFS_LOWSP_5_PCNT]) {
|
||||
*shift = 2;
|
||||
if (freesp < low_space[XFS_LOWSP_4_PCNT])
|
||||
|
@ -536,10 +537,10 @@ xfs_iomap_prealloc_size(
|
|||
|
||||
if (unlikely(XFS_IS_REALTIME_INODE(ip)))
|
||||
freesp = xfs_rtbxlen_to_blen(mp,
|
||||
xfs_iomap_freesp(&mp->m_frextents,
|
||||
xfs_iomap_freesp(mp, XC_FREE_RTEXTENTS,
|
||||
mp->m_low_rtexts, &shift));
|
||||
else
|
||||
freesp = xfs_iomap_freesp(&mp->m_fdblocks, mp->m_low_space,
|
||||
freesp = xfs_iomap_freesp(mp, XC_FREE_BLOCKS, mp->m_low_space,
|
||||
&shift);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1220,13 +1220,31 @@ xfs_fs_writable(
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Estimate the amount of free space that is not available to userspace and is
|
||||
* not explicitly reserved from the incore fdblocks. This includes:
|
||||
*
|
||||
* - The minimum number of blocks needed to support splitting a bmap btree
|
||||
* - The blocks currently in use by the freespace btrees because they record
|
||||
* the actual blocks that will fill per-AG metadata space reservations
|
||||
*/
|
||||
uint64_t
|
||||
xfs_freecounter_unavailable(
|
||||
struct xfs_mount *mp,
|
||||
enum xfs_free_counter ctr)
|
||||
{
|
||||
if (ctr != XC_FREE_BLOCKS)
|
||||
return 0;
|
||||
return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
|
||||
}
|
||||
|
||||
void
|
||||
xfs_add_freecounter(
|
||||
struct xfs_mount *mp,
|
||||
struct percpu_counter *counter,
|
||||
enum xfs_free_counter ctr,
|
||||
uint64_t delta)
|
||||
{
|
||||
bool has_resv_pool = (counter == &mp->m_fdblocks);
|
||||
bool has_resv_pool = (ctr == XC_FREE_BLOCKS);
|
||||
uint64_t res_used;
|
||||
|
||||
/*
|
||||
|
@ -1234,7 +1252,7 @@ xfs_add_freecounter(
|
|||
* Most of the time the pool is full.
|
||||
*/
|
||||
if (!has_resv_pool || mp->m_resblks == mp->m_resblks_avail) {
|
||||
percpu_counter_add(counter, delta);
|
||||
percpu_counter_add(&mp->m_free[ctr].count, delta);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1245,24 +1263,27 @@ xfs_add_freecounter(
|
|||
} else {
|
||||
delta -= res_used;
|
||||
mp->m_resblks_avail = mp->m_resblks;
|
||||
percpu_counter_add(counter, delta);
|
||||
percpu_counter_add(&mp->m_free[ctr].count, delta);
|
||||
}
|
||||
spin_unlock(&mp->m_sb_lock);
|
||||
}
|
||||
|
||||
|
||||
/* Adjust in-core free blocks or RT extents. */
|
||||
int
|
||||
xfs_dec_freecounter(
|
||||
struct xfs_mount *mp,
|
||||
struct percpu_counter *counter,
|
||||
enum xfs_free_counter ctr,
|
||||
uint64_t delta,
|
||||
bool rsvd)
|
||||
{
|
||||
struct percpu_counter *counter = &mp->m_free[ctr].count;
|
||||
uint64_t set_aside = 0;
|
||||
s32 batch;
|
||||
bool has_resv_pool;
|
||||
|
||||
ASSERT(counter == &mp->m_fdblocks || counter == &mp->m_frextents);
|
||||
has_resv_pool = (counter == &mp->m_fdblocks);
|
||||
ASSERT(ctr < XC_FREE_NR);
|
||||
has_resv_pool = (ctr == XC_FREE_BLOCKS);
|
||||
if (rsvd)
|
||||
ASSERT(has_resv_pool);
|
||||
|
||||
|
@ -1292,7 +1313,7 @@ xfs_dec_freecounter(
|
|||
* slightly premature -ENOSPC.
|
||||
*/
|
||||
if (has_resv_pool)
|
||||
set_aside = xfs_fdblocks_unavailable(mp);
|
||||
set_aside = xfs_freecounter_unavailable(mp, ctr);
|
||||
percpu_counter_add_batch(counter, -((int64_t)delta), batch);
|
||||
if (__percpu_counter_compare(counter, set_aside,
|
||||
XFS_FDBLOCKS_BATCH) < 0) {
|
||||
|
|
|
@ -105,6 +105,11 @@ struct xfs_groups {
|
|||
uint64_t blkmask;
|
||||
};
|
||||
|
||||
struct xfs_freecounter {
|
||||
/* free blocks for general use: */
|
||||
struct percpu_counter count;
|
||||
};
|
||||
|
||||
/*
|
||||
* The struct xfsmount layout is optimised to separate read-mostly variables
|
||||
* from variables that are frequently modified. We put the read-mostly variables
|
||||
|
@ -222,8 +227,8 @@ typedef struct xfs_mount {
|
|||
spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
|
||||
struct percpu_counter m_icount; /* allocated inodes counter */
|
||||
struct percpu_counter m_ifree; /* free inodes counter */
|
||||
struct percpu_counter m_fdblocks; /* free block counter */
|
||||
struct percpu_counter m_frextents; /* free rt extent counter */
|
||||
|
||||
struct xfs_freecounter m_free[XC_FREE_NR];
|
||||
|
||||
/*
|
||||
* Count of data device blocks reserved for delayed allocations,
|
||||
|
@ -646,45 +651,74 @@ extern void xfs_unmountfs(xfs_mount_t *);
|
|||
*/
|
||||
#define XFS_FDBLOCKS_BATCH 1024
|
||||
|
||||
uint64_t xfs_freecounter_unavailable(struct xfs_mount *mp,
|
||||
enum xfs_free_counter ctr);
|
||||
|
||||
/*
|
||||
* Estimate the amount of free space that is not available to userspace and is
|
||||
* not explicitly reserved from the incore fdblocks. This includes:
|
||||
*
|
||||
* - The minimum number of blocks needed to support splitting a bmap btree
|
||||
* - The blocks currently in use by the freespace btrees because they record
|
||||
* the actual blocks that will fill per-AG metadata space reservations
|
||||
* Sum up the freecount, but never return negative values.
|
||||
*/
|
||||
static inline uint64_t
|
||||
xfs_fdblocks_unavailable(
|
||||
struct xfs_mount *mp)
|
||||
static inline s64 xfs_sum_freecounter(struct xfs_mount *mp,
|
||||
enum xfs_free_counter ctr)
|
||||
{
|
||||
return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
|
||||
return percpu_counter_sum_positive(&mp->m_free[ctr].count);
|
||||
}
|
||||
|
||||
int xfs_dec_freecounter(struct xfs_mount *mp, struct percpu_counter *counter,
|
||||
/*
|
||||
* Same as above, but does return negative values. Mostly useful for
|
||||
* special cases like repair and tracing.
|
||||
*/
|
||||
static inline s64 xfs_sum_freecounter_raw(struct xfs_mount *mp,
|
||||
enum xfs_free_counter ctr)
|
||||
{
|
||||
return percpu_counter_sum(&mp->m_free[ctr].count);
|
||||
}
|
||||
|
||||
/*
|
||||
* This just provides and estimate without the cpu-local updates, use
|
||||
* xfs_sum_freecounter for the exact value.
|
||||
*/
|
||||
static inline s64 xfs_estimate_freecounter(struct xfs_mount *mp,
|
||||
enum xfs_free_counter ctr)
|
||||
{
|
||||
return percpu_counter_read_positive(&mp->m_free[ctr].count);
|
||||
}
|
||||
|
||||
static inline int xfs_compare_freecounter(struct xfs_mount *mp,
|
||||
enum xfs_free_counter ctr, s64 rhs, s32 batch)
|
||||
{
|
||||
return __percpu_counter_compare(&mp->m_free[ctr].count, rhs, batch);
|
||||
}
|
||||
|
||||
static inline void xfs_set_freecounter(struct xfs_mount *mp,
|
||||
enum xfs_free_counter ctr, uint64_t val)
|
||||
{
|
||||
percpu_counter_set(&mp->m_free[ctr].count, val);
|
||||
}
|
||||
|
||||
int xfs_dec_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
|
||||
uint64_t delta, bool rsvd);
|
||||
void xfs_add_freecounter(struct xfs_mount *mp, struct percpu_counter *counter,
|
||||
void xfs_add_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
|
||||
uint64_t delta);
|
||||
|
||||
static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta,
|
||||
bool reserved)
|
||||
{
|
||||
return xfs_dec_freecounter(mp, &mp->m_fdblocks, delta, reserved);
|
||||
return xfs_dec_freecounter(mp, XC_FREE_BLOCKS, delta, reserved);
|
||||
}
|
||||
|
||||
static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta)
|
||||
{
|
||||
xfs_add_freecounter(mp, &mp->m_fdblocks, delta);
|
||||
xfs_add_freecounter(mp, XC_FREE_BLOCKS, delta);
|
||||
}
|
||||
|
||||
static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta)
|
||||
{
|
||||
return xfs_dec_freecounter(mp, &mp->m_frextents, delta, false);
|
||||
return xfs_dec_freecounter(mp, XC_FREE_RTEXTENTS, delta, false);
|
||||
}
|
||||
|
||||
static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta)
|
||||
{
|
||||
xfs_add_freecounter(mp, &mp->m_frextents, delta);
|
||||
xfs_add_freecounter(mp, XC_FREE_RTEXTENTS, delta);
|
||||
}
|
||||
|
||||
extern int xfs_readsb(xfs_mount_t *, int);
|
||||
|
|
|
@ -1519,7 +1519,7 @@ xfs_rtalloc_reinit_frextents(
|
|||
spin_lock(&mp->m_sb_lock);
|
||||
mp->m_sb.sb_frextents = val;
|
||||
spin_unlock(&mp->m_sb_lock);
|
||||
percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
|
||||
xfs_set_freecounter(mp, XC_FREE_RTEXTENTS, mp->m_sb.sb_frextents);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -834,10 +834,12 @@ xfs_statfs_data(
|
|||
struct kstatfs *st)
|
||||
{
|
||||
int64_t fdblocks =
|
||||
percpu_counter_sum(&mp->m_fdblocks);
|
||||
xfs_sum_freecounter(mp, XC_FREE_BLOCKS);
|
||||
|
||||
/* make sure st->f_bfree does not underflow */
|
||||
st->f_bfree = max(0LL, fdblocks - xfs_fdblocks_unavailable(mp));
|
||||
st->f_bfree = max(0LL,
|
||||
fdblocks - xfs_freecounter_unavailable(mp, XC_FREE_BLOCKS));
|
||||
|
||||
/*
|
||||
* sb_dblocks can change during growfs, but nothing cares about reporting
|
||||
* the old or new value during growfs.
|
||||
|
@ -856,7 +858,7 @@ xfs_statfs_rt(
|
|||
struct kstatfs *st)
|
||||
{
|
||||
st->f_bfree = xfs_rtbxlen_to_blen(mp,
|
||||
percpu_counter_sum_positive(&mp->m_frextents));
|
||||
xfs_sum_freecounter(mp, XC_FREE_RTEXTENTS));
|
||||
st->f_blocks = mp->m_sb.sb_rblocks;
|
||||
}
|
||||
|
||||
|
@ -1065,7 +1067,8 @@ static int
|
|||
xfs_init_percpu_counters(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
int error;
|
||||
int error;
|
||||
int i;
|
||||
|
||||
error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
|
||||
if (error)
|
||||
|
@ -1075,30 +1078,29 @@ xfs_init_percpu_counters(
|
|||
if (error)
|
||||
goto free_icount;
|
||||
|
||||
error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
|
||||
if (error)
|
||||
goto free_ifree;
|
||||
|
||||
error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
|
||||
if (error)
|
||||
goto free_fdblocks;
|
||||
goto free_ifree;
|
||||
|
||||
error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL);
|
||||
if (error)
|
||||
goto free_delalloc;
|
||||
|
||||
error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
|
||||
if (error)
|
||||
goto free_delalloc_rt;
|
||||
for (i = 0; i < XC_FREE_NR; i++) {
|
||||
error = percpu_counter_init(&mp->m_free[i].count, 0,
|
||||
GFP_KERNEL);
|
||||
if (error)
|
||||
goto free_freecounters;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_delalloc_rt:
|
||||
free_freecounters:
|
||||
while (--i > 0)
|
||||
percpu_counter_destroy(&mp->m_free[i].count);
|
||||
percpu_counter_destroy(&mp->m_delalloc_rtextents);
|
||||
free_delalloc:
|
||||
percpu_counter_destroy(&mp->m_delalloc_blks);
|
||||
free_fdblocks:
|
||||
percpu_counter_destroy(&mp->m_fdblocks);
|
||||
free_ifree:
|
||||
percpu_counter_destroy(&mp->m_ifree);
|
||||
free_icount:
|
||||
|
@ -1112,24 +1114,26 @@ xfs_reinit_percpu_counters(
|
|||
{
|
||||
percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
|
||||
percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
|
||||
percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
|
||||
percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
|
||||
xfs_set_freecounter(mp, XC_FREE_BLOCKS, mp->m_sb.sb_fdblocks);
|
||||
xfs_set_freecounter(mp, XC_FREE_RTEXTENTS, mp->m_sb.sb_frextents);
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_destroy_percpu_counters(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
enum xfs_free_counter i;
|
||||
|
||||
for (i = 0; i < XC_FREE_NR; i++)
|
||||
percpu_counter_destroy(&mp->m_free[i].count);
|
||||
percpu_counter_destroy(&mp->m_icount);
|
||||
percpu_counter_destroy(&mp->m_ifree);
|
||||
percpu_counter_destroy(&mp->m_fdblocks);
|
||||
ASSERT(xfs_is_shutdown(mp) ||
|
||||
percpu_counter_sum(&mp->m_delalloc_rtextents) == 0);
|
||||
percpu_counter_destroy(&mp->m_delalloc_rtextents);
|
||||
ASSERT(xfs_is_shutdown(mp) ||
|
||||
percpu_counter_sum(&mp->m_delalloc_blks) == 0);
|
||||
percpu_counter_destroy(&mp->m_delalloc_blks);
|
||||
percpu_counter_destroy(&mp->m_frextents);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -5622,7 +5622,7 @@ DECLARE_EVENT_CLASS(xfs_metafile_resv_class,
|
|||
|
||||
__entry->dev = mp->m_super->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->freeblks = percpu_counter_sum(&mp->m_fdblocks);
|
||||
__entry->freeblks = xfs_sum_freecounter_raw(mp, XC_FREE_BLOCKS);
|
||||
__entry->reserved = ip->i_delayed_blks;
|
||||
__entry->asked = ip->i_meta_resv_asked;
|
||||
__entry->used = ip->i_nblocks;
|
||||
|
|
Loading…
Add table
Reference in a new issue