mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
xfs: implement zoned garbage collection
RT groups on a zoned file system need to be completely empty before their space can be reused. This means that partially empty groups need to be emptied entirely to free up space if no entirely free groups are available. Add a garbage collection thread that moves all data out of the least used zone when not enough free zones are available, and which resets all zones that have been emptied. To find empty zone a simple set of 10 buckets based on the amount of space used in the zone is used. To empty zones, the rmap is walked to find the owners and the data is read and then written to the new place. To automatically defragment files the rmap records are sorted by inode and logical offset. This means defragmentation of parallel writes into a single zone happens automatically when performing garbage collection. Because holding the iolock over the entire GC cycle would inject very noticeable latency for other accesses to the inodes, the iolock is not taken while performing I/O. Instead the I/O completion handler checks that the mapping hasn't changed over the one recorded at the start of the GC cycle and doesn't update the mapping if it change. Co-developed-by: Hans Holmberg <hans.holmberg@wdc.com> Signed-off-by: Hans Holmberg <hans.holmberg@wdc.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
This commit is contained in:
parent
0bb2193056
commit
080d01c41d
13 changed files with 1425 additions and 5 deletions
|
@ -139,6 +139,7 @@ xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
|
|||
# xfs_rtbitmap is shared with libxfs
|
||||
xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o \
|
||||
xfs_zone_alloc.o \
|
||||
xfs_zone_gc.o \
|
||||
xfs_zone_space_resv.o
|
||||
|
||||
xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
|
||||
|
|
|
@ -19,10 +19,23 @@ struct xfs_group {
|
|||
#ifdef __KERNEL__
|
||||
/* -- kernel only structures below this line -- */
|
||||
|
||||
/*
|
||||
* Track freed but not yet committed extents.
|
||||
*/
|
||||
struct xfs_extent_busy_tree *xg_busy_extents;
|
||||
union {
|
||||
/*
|
||||
* For perags and non-zoned RT groups:
|
||||
* Track freed but not yet committed extents.
|
||||
*/
|
||||
struct xfs_extent_busy_tree *xg_busy_extents;
|
||||
|
||||
/*
|
||||
* For zoned RT groups:
|
||||
* List of groups that need a zone reset.
|
||||
*
|
||||
* The zonegc code forces a log flush of the rtrmap inode before
|
||||
* resetting the write pointer, so there is no need for
|
||||
* individual busy extent tracking.
|
||||
*/
|
||||
struct xfs_group *xg_next_reset;
|
||||
};
|
||||
|
||||
/*
|
||||
* Bitsets of per-ag metadata that have been checked and/or are sick.
|
||||
|
|
|
@ -58,6 +58,12 @@ struct xfs_rtgroup {
|
|||
*/
|
||||
#define XFS_RTG_FREE XA_MARK_0
|
||||
|
||||
/*
|
||||
* For zoned RT devices this is set on groups that are fully written and that
|
||||
* have unused blocks. Used by the garbage collection to pick targets.
|
||||
*/
|
||||
#define XFS_RTG_RECLAIMABLE XA_MARK_1
|
||||
|
||||
static inline struct xfs_rtgroup *to_rtg(struct xfs_group *xg)
|
||||
{
|
||||
return container_of(xg, struct xfs_rtgroup, rtg_group);
|
||||
|
|
|
@ -671,7 +671,7 @@ xfs_extent_busy_wait_all(
|
|||
while ((pag = xfs_perag_next(mp, pag)))
|
||||
xfs_extent_busy_wait_group(pag_group(pag));
|
||||
|
||||
if (xfs_has_rtgroups(mp))
|
||||
if (xfs_has_rtgroups(mp) && !xfs_has_zoned(mp))
|
||||
while ((rtg = xfs_rtgroup_next(mp, rtg)))
|
||||
xfs_extent_busy_wait_group(rtg_group(rtg));
|
||||
}
|
||||
|
|
|
@ -1090,6 +1090,8 @@ xfs_mountfs(
|
|||
error = xfs_fs_reserve_ag_blocks(mp);
|
||||
if (error && error != -ENOSPC)
|
||||
goto out_agresv;
|
||||
|
||||
xfs_zone_gc_start(mp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1178,6 +1180,8 @@ xfs_unmountfs(
|
|||
xfs_inodegc_flush(mp);
|
||||
|
||||
xfs_blockgc_stop(mp);
|
||||
if (!test_bit(XFS_OPSTATE_READONLY, &mp->m_opstate))
|
||||
xfs_zone_gc_stop(mp);
|
||||
xfs_fs_unreserve_ag_blocks(mp);
|
||||
xfs_qm_unmount_quotas(mp);
|
||||
if (xfs_has_zoned(mp))
|
||||
|
|
|
@ -556,6 +556,8 @@ __XFS_HAS_FEAT(nouuid, NOUUID)
|
|||
#define XFS_OPSTATE_RESUMING_QUOTAON 18
|
||||
/* Kernel has logged a warning about zoned RT device being used on this fs. */
|
||||
#define XFS_OPSTATE_WARNED_ZONED 19
|
||||
/* (Zoned) GC is in progress */
|
||||
#define XFS_OPSTATE_ZONEGC_RUNNING 20
|
||||
|
||||
#define __XFS_IS_OPSTATE(name, NAME) \
|
||||
static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
|
||||
|
@ -600,6 +602,7 @@ static inline bool xfs_clear_resuming_quotaon(struct xfs_mount *mp)
|
|||
#endif /* CONFIG_XFS_QUOTA */
|
||||
__XFS_IS_OPSTATE(done_with_log_incompat, UNSET_LOG_INCOMPAT)
|
||||
__XFS_IS_OPSTATE(using_logged_xattrs, USE_LARP)
|
||||
__XFS_IS_OPSTATE(zonegc_running, ZONEGC_RUNNING)
|
||||
|
||||
static inline bool
|
||||
xfs_should_warn(struct xfs_mount *mp, long nr)
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "xfs_exchmaps_item.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "xfs_rtalloc.h"
|
||||
#include "xfs_zone_alloc.h"
|
||||
#include "scrub/stats.h"
|
||||
#include "scrub/rcbag_btree.h"
|
||||
|
||||
|
@ -822,6 +823,7 @@ xfs_fs_sync_fs(
|
|||
if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
|
||||
xfs_inodegc_stop(mp);
|
||||
xfs_blockgc_stop(mp);
|
||||
xfs_zone_gc_stop(mp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -994,6 +996,7 @@ xfs_fs_freeze(
|
|||
if (ret && !xfs_is_readonly(mp)) {
|
||||
xfs_blockgc_start(mp);
|
||||
xfs_inodegc_start(mp);
|
||||
xfs_zone_gc_start(mp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1015,6 +1018,7 @@ xfs_fs_unfreeze(
|
|||
* filesystem.
|
||||
*/
|
||||
if (!xfs_is_readonly(mp)) {
|
||||
xfs_zone_gc_start(mp);
|
||||
xfs_blockgc_start(mp);
|
||||
xfs_inodegc_start(mp);
|
||||
}
|
||||
|
@ -1948,6 +1952,9 @@ xfs_remount_rw(
|
|||
/* Re-enable the background inode inactivation worker. */
|
||||
xfs_inodegc_start(mp);
|
||||
|
||||
/* Restart zone reclaim */
|
||||
xfs_zone_gc_start(mp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1992,6 +1999,9 @@ xfs_remount_ro(
|
|||
*/
|
||||
xfs_inodegc_stop(mp);
|
||||
|
||||
/* Stop zone reclaim */
|
||||
xfs_zone_gc_stop(mp);
|
||||
|
||||
/* Free the per-AG metadata reservation pool. */
|
||||
xfs_fs_unreserve_ag_blocks(mp);
|
||||
|
||||
|
|
|
@ -295,8 +295,11 @@ DECLARE_EVENT_CLASS(xfs_zone_class,
|
|||
DEFINE_EVENT(xfs_zone_class, name, \
|
||||
TP_PROTO(struct xfs_rtgroup *rtg), \
|
||||
TP_ARGS(rtg))
|
||||
DEFINE_ZONE_EVENT(xfs_zone_emptied);
|
||||
DEFINE_ZONE_EVENT(xfs_zone_full);
|
||||
DEFINE_ZONE_EVENT(xfs_zone_opened);
|
||||
DEFINE_ZONE_EVENT(xfs_zone_reset);
|
||||
DEFINE_ZONE_EVENT(xfs_zone_gc_target_opened);
|
||||
|
||||
TRACE_EVENT(xfs_zone_free_blocks,
|
||||
TP_PROTO(struct xfs_rtgroup *rtg, xfs_rgblock_t rgbno,
|
||||
|
@ -364,6 +367,28 @@ DEFINE_EVENT(xfs_zone_alloc_class, name, \
|
|||
DEFINE_ZONE_ALLOC_EVENT(xfs_zone_record_blocks);
|
||||
DEFINE_ZONE_ALLOC_EVENT(xfs_zone_alloc_blocks);
|
||||
|
||||
TRACE_EVENT(xfs_zone_gc_select_victim,
|
||||
TP_PROTO(struct xfs_rtgroup *rtg, unsigned int bucket),
|
||||
TP_ARGS(rtg, bucket),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_rgnumber_t, rgno)
|
||||
__field(xfs_rgblock_t, used)
|
||||
__field(unsigned int, bucket)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = rtg_mount(rtg)->m_super->s_dev;
|
||||
__entry->rgno = rtg_rgno(rtg);
|
||||
__entry->used = rtg_rmap(rtg)->i_used_blocks;
|
||||
__entry->bucket = bucket;
|
||||
),
|
||||
TP_printk("dev %d:%d rgno 0x%x used 0x%x bucket %u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->rgno,
|
||||
__entry->used,
|
||||
__entry->bucket)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xfs_zones_mount,
|
||||
TP_PROTO(struct xfs_mount *mp),
|
||||
TP_ARGS(mp),
|
||||
|
|
|
@ -35,6 +35,104 @@ xfs_open_zone_put(
|
|||
}
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
xfs_zone_bucket(
|
||||
struct xfs_mount *mp,
|
||||
uint32_t used_blocks)
|
||||
{
|
||||
return XFS_ZONE_USED_BUCKETS * used_blocks /
|
||||
mp->m_groups[XG_TYPE_RTG].blocks;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_zone_add_to_bucket(
|
||||
struct xfs_zone_info *zi,
|
||||
xfs_rgnumber_t rgno,
|
||||
uint32_t to_bucket)
|
||||
{
|
||||
__set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
|
||||
zi->zi_used_bucket_entries[to_bucket]++;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_zone_remove_from_bucket(
|
||||
struct xfs_zone_info *zi,
|
||||
xfs_rgnumber_t rgno,
|
||||
uint32_t from_bucket)
|
||||
{
|
||||
__clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
|
||||
zi->zi_used_bucket_entries[from_bucket]--;
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_zone_account_reclaimable(
|
||||
struct xfs_rtgroup *rtg,
|
||||
uint32_t freed)
|
||||
{
|
||||
struct xfs_group *xg = &rtg->rtg_group;
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_zone_info *zi = mp->m_zone_info;
|
||||
uint32_t used = rtg_rmap(rtg)->i_used_blocks;
|
||||
xfs_rgnumber_t rgno = rtg_rgno(rtg);
|
||||
uint32_t from_bucket = xfs_zone_bucket(mp, used + freed);
|
||||
uint32_t to_bucket = xfs_zone_bucket(mp, used);
|
||||
bool was_full = (used + freed == rtg_blocks(rtg));
|
||||
|
||||
/*
|
||||
* This can be called from log recovery, where the zone_info structure
|
||||
* hasn't been allocated yet. Skip all work as xfs_mount_zones will
|
||||
* add the zones to the right buckets before the file systems becomes
|
||||
* active.
|
||||
*/
|
||||
if (!zi)
|
||||
return;
|
||||
|
||||
if (!used) {
|
||||
/*
|
||||
* The zone is now empty, remove it from the bottom bucket and
|
||||
* trigger a reset.
|
||||
*/
|
||||
trace_xfs_zone_emptied(rtg);
|
||||
|
||||
if (!was_full)
|
||||
xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE);
|
||||
|
||||
spin_lock(&zi->zi_used_buckets_lock);
|
||||
if (!was_full)
|
||||
xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
|
||||
spin_unlock(&zi->zi_used_buckets_lock);
|
||||
|
||||
spin_lock(&zi->zi_reset_list_lock);
|
||||
xg->xg_next_reset = zi->zi_reset_list;
|
||||
zi->zi_reset_list = xg;
|
||||
spin_unlock(&zi->zi_reset_list_lock);
|
||||
|
||||
if (zi->zi_gc_thread)
|
||||
wake_up_process(zi->zi_gc_thread);
|
||||
} else if (was_full) {
|
||||
/*
|
||||
* The zone transitioned from full, mark it up as reclaimable
|
||||
* and wake up GC which might be waiting for zones to reclaim.
|
||||
*/
|
||||
spin_lock(&zi->zi_used_buckets_lock);
|
||||
xfs_zone_add_to_bucket(zi, rgno, to_bucket);
|
||||
spin_unlock(&zi->zi_used_buckets_lock);
|
||||
|
||||
xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE);
|
||||
if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
|
||||
wake_up_process(zi->zi_gc_thread);
|
||||
} else if (to_bucket != from_bucket) {
|
||||
/*
|
||||
* Move the zone to a new bucket if it dropped below the
|
||||
* threshold.
|
||||
*/
|
||||
spin_lock(&zi->zi_used_buckets_lock);
|
||||
xfs_zone_add_to_bucket(zi, rgno, to_bucket);
|
||||
xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
|
||||
spin_unlock(&zi->zi_used_buckets_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_open_zone_mark_full(
|
||||
struct xfs_open_zone *oz)
|
||||
|
@ -42,6 +140,7 @@ xfs_open_zone_mark_full(
|
|||
struct xfs_rtgroup *rtg = oz->oz_rtg;
|
||||
struct xfs_mount *mp = rtg_mount(rtg);
|
||||
struct xfs_zone_info *zi = mp->m_zone_info;
|
||||
uint32_t used = rtg_rmap(rtg)->i_used_blocks;
|
||||
|
||||
trace_xfs_zone_full(rtg);
|
||||
|
||||
|
@ -59,6 +158,8 @@ xfs_open_zone_mark_full(
|
|||
xfs_open_zone_put(oz);
|
||||
|
||||
wake_up_all(&zi->zi_zone_wait);
|
||||
if (used < rtg_blocks(rtg))
|
||||
xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -244,6 +345,13 @@ xfs_zone_free_blocks(
|
|||
trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
|
||||
|
||||
rmapip->i_used_blocks -= len;
|
||||
/*
|
||||
* Don't add open zones to the reclaimable buckets. The I/O completion
|
||||
* for writing the last block will take care of accounting for already
|
||||
* unused blocks instead.
|
||||
*/
|
||||
if (!READ_ONCE(rtg->rtg_open_zone))
|
||||
xfs_zone_account_reclaimable(rtg, len);
|
||||
xfs_add_frextents(mp, len);
|
||||
xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
|
||||
return 0;
|
||||
|
@ -395,6 +503,9 @@ xfs_try_open_zone(
|
|||
*/
|
||||
wake_up_all(&zi->zi_zone_wait);
|
||||
|
||||
if (xfs_zoned_need_gc(mp))
|
||||
wake_up_process(zi->zi_gc_thread);
|
||||
|
||||
trace_xfs_zone_opened(oz->oz_rtg);
|
||||
return oz;
|
||||
}
|
||||
|
@ -702,6 +813,7 @@ xfs_init_zone(
|
|||
struct xfs_zone_info *zi = mp->m_zone_info;
|
||||
uint64_t used = rtg_rmap(rtg)->i_used_blocks;
|
||||
xfs_rgblock_t write_pointer, highest_rgbno;
|
||||
int error;
|
||||
|
||||
if (zone && !xfs_zone_validate(zone, rtg, &write_pointer))
|
||||
return -EFSCORRUPTED;
|
||||
|
@ -728,6 +840,18 @@ xfs_init_zone(
|
|||
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no used blocks, but the zone is not in empty state yet
|
||||
* we lost power before the zoned reset. In that case finish the work
|
||||
* here.
|
||||
*/
|
||||
if (write_pointer == rtg_blocks(rtg) && used == 0) {
|
||||
error = xfs_zone_gc_reset_sync(rtg);
|
||||
if (error)
|
||||
return error;
|
||||
write_pointer = 0;
|
||||
}
|
||||
|
||||
if (write_pointer == 0) {
|
||||
/* zone is empty */
|
||||
atomic_inc(&zi->zi_nr_free_zones);
|
||||
|
@ -746,6 +870,7 @@ xfs_init_zone(
|
|||
iz->reclaimable += write_pointer - used;
|
||||
} else if (used < rtg_blocks(rtg)) {
|
||||
/* zone fully written, but has freed blocks */
|
||||
xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
|
||||
iz->reclaimable += (rtg_blocks(rtg) - used);
|
||||
}
|
||||
|
||||
|
@ -856,11 +981,20 @@ xfs_calc_open_zones(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long *
|
||||
xfs_alloc_bucket_bitmap(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
|
||||
sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
|
||||
}
|
||||
|
||||
static struct xfs_zone_info *
|
||||
xfs_alloc_zone_info(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
struct xfs_zone_info *zi;
|
||||
int i;
|
||||
|
||||
zi = kzalloc(sizeof(*zi), GFP_KERNEL);
|
||||
if (!zi)
|
||||
|
@ -871,14 +1005,30 @@ xfs_alloc_zone_info(
|
|||
spin_lock_init(&zi->zi_open_zones_lock);
|
||||
spin_lock_init(&zi->zi_reservation_lock);
|
||||
init_waitqueue_head(&zi->zi_zone_wait);
|
||||
spin_lock_init(&zi->zi_used_buckets_lock);
|
||||
for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
|
||||
zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
|
||||
if (!zi->zi_used_bucket_bitmap[i])
|
||||
goto out_free_bitmaps;
|
||||
}
|
||||
return zi;
|
||||
|
||||
out_free_bitmaps:
|
||||
while (--i > 0)
|
||||
kvfree(zi->zi_used_bucket_bitmap[i]);
|
||||
kfree(zi);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_free_zone_info(
|
||||
struct xfs_zone_info *zi)
|
||||
{
|
||||
int i;
|
||||
|
||||
xfs_free_open_zones(zi);
|
||||
for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
|
||||
kvfree(zi->zi_used_bucket_bitmap[i]);
|
||||
kfree(zi);
|
||||
}
|
||||
|
||||
|
@ -943,6 +1093,10 @@ xfs_mount_zones(
|
|||
xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
|
||||
xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
|
||||
iz.available + iz.reclaimable);
|
||||
|
||||
error = xfs_zone_gc_mount(mp);
|
||||
if (error)
|
||||
goto out_free_zone_info;
|
||||
return 0;
|
||||
|
||||
out_free_zone_info:
|
||||
|
@ -954,5 +1108,6 @@ void
|
|||
xfs_unmount_zones(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
xfs_zone_gc_unmount(mp);
|
||||
xfs_free_zone_info(mp->m_zone_info);
|
||||
}
|
||||
|
|
|
@ -48,6 +48,8 @@ uint64_t xfs_zoned_default_resblks(struct xfs_mount *mp,
|
|||
#ifdef CONFIG_XFS_RT
|
||||
int xfs_mount_zones(struct xfs_mount *mp);
|
||||
void xfs_unmount_zones(struct xfs_mount *mp);
|
||||
void xfs_zone_gc_start(struct xfs_mount *mp);
|
||||
void xfs_zone_gc_stop(struct xfs_mount *mp);
|
||||
#else
|
||||
static inline int xfs_mount_zones(struct xfs_mount *mp)
|
||||
{
|
||||
|
@ -56,6 +58,12 @@ static inline int xfs_mount_zones(struct xfs_mount *mp)
|
|||
static inline void xfs_unmount_zones(struct xfs_mount *mp)
|
||||
{
|
||||
}
|
||||
static inline void xfs_zone_gc_start(struct xfs_mount *mp)
|
||||
{
|
||||
}
|
||||
static inline void xfs_zone_gc_stop(struct xfs_mount *mp)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
#endif /* _XFS_ZONE_ALLOC_H */
|
||||
|
|
1165
fs/xfs/xfs_zone_gc.c
Normal file
1165
fs/xfs/xfs_zone_gc.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -40,6 +40,13 @@ struct xfs_open_zone {
|
|||
struct xfs_rtgroup *oz_rtg;
|
||||
};
|
||||
|
||||
/*
|
||||
* Number of bitmap buckets to track reclaimable zones. There are 10 buckets
|
||||
* so that each 10% of the usable capacity get their own bucket and GC can
|
||||
* only has to walk the bitmaps of the lesser used zones if there are any.
|
||||
*/
|
||||
#define XFS_ZONE_USED_BUCKETS 10u
|
||||
|
||||
struct xfs_zone_info {
|
||||
/*
|
||||
* List of pending space reservations:
|
||||
|
@ -82,10 +89,24 @@ struct xfs_zone_info {
|
|||
*/
|
||||
spinlock_t zi_reset_list_lock;
|
||||
struct xfs_group *zi_reset_list;
|
||||
|
||||
/*
|
||||
* A set of bitmaps to bucket-sort reclaimable zones by used blocks to help
|
||||
* garbage collection to quickly find the best candidate for reclaim.
|
||||
*/
|
||||
spinlock_t zi_used_buckets_lock;
|
||||
unsigned int zi_used_bucket_entries[XFS_ZONE_USED_BUCKETS];
|
||||
unsigned long *zi_used_bucket_bitmap[XFS_ZONE_USED_BUCKETS];
|
||||
|
||||
};
|
||||
|
||||
struct xfs_open_zone *xfs_open_zone(struct xfs_mount *mp, bool is_gc);
|
||||
|
||||
int xfs_zone_gc_reset_sync(struct xfs_rtgroup *rtg);
|
||||
bool xfs_zoned_need_gc(struct xfs_mount *mp);
|
||||
int xfs_zone_gc_mount(struct xfs_mount *mp);
|
||||
void xfs_zone_gc_unmount(struct xfs_mount *mp);
|
||||
|
||||
void xfs_zoned_resv_wake_all(struct xfs_mount *mp);
|
||||
|
||||
#endif /* _XFS_ZONE_PRIV_H */
|
||||
|
|
|
@ -159,6 +159,15 @@ xfs_zoned_reserve_available(
|
|||
if (error != -ENOSPC)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If there is no reclaimable group left and we aren't still
|
||||
* processing a pending GC request give up as we're fully out
|
||||
* of space.
|
||||
*/
|
||||
if (!xfs_group_marked(mp, XG_TYPE_RTG, XFS_RTG_RECLAIMABLE) &&
|
||||
!xfs_is_zonegc_running(mp))
|
||||
break;
|
||||
|
||||
spin_unlock(&zi->zi_reservation_lock);
|
||||
schedule();
|
||||
spin_lock(&zi->zi_reservation_lock);
|
||||
|
|
Loading…
Add table
Reference in a new issue