mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
bcachefs: Rename enum alloc_reserve -> bch_watermark
This is prep work for consolidating with JOURNAL_WATERMARK. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
e9d017234f
commit
e53a961c6b
15 changed files with 101 additions and 103 deletions
|
@ -220,7 +220,7 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
|
|||
u64 free = max_t(s64, 0,
|
||||
u.d[BCH_DATA_free].buckets
|
||||
+ u.d[BCH_DATA_need_discard].buckets
|
||||
- bch2_dev_buckets_reserved(ca, RESERVE_stripe));
|
||||
- bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
|
||||
|
||||
return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
|
||||
}
|
||||
|
|
|
@ -44,9 +44,9 @@ static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
|
|||
}
|
||||
}
|
||||
|
||||
const char * const bch2_alloc_reserves[] = {
|
||||
const char * const bch2_watermarks[] = {
|
||||
#define x(t) #t,
|
||||
BCH_ALLOC_RESERVES()
|
||||
BCH_WATERMARKS()
|
||||
#undef x
|
||||
NULL
|
||||
};
|
||||
|
@ -188,13 +188,13 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
|
||||
static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
|
||||
{
|
||||
switch (reserve) {
|
||||
case RESERVE_btree:
|
||||
case RESERVE_btree_movinggc:
|
||||
switch (watermark) {
|
||||
case BCH_WATERMARK_btree:
|
||||
case BCH_WATERMARK_btree_copygc:
|
||||
return 0;
|
||||
case RESERVE_movinggc:
|
||||
case BCH_WATERMARK_copygc:
|
||||
return OPEN_BUCKETS_COUNT / 4;
|
||||
default:
|
||||
return OPEN_BUCKETS_COUNT / 2;
|
||||
|
@ -203,7 +203,7 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
|
|||
|
||||
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
u64 bucket,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
const struct bch_alloc_v4 *a,
|
||||
struct bucket_alloc_state *s,
|
||||
struct closure *cl)
|
||||
|
@ -233,7 +233,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
|
|||
|
||||
spin_lock(&c->freelist_lock);
|
||||
|
||||
if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
|
||||
if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
|
||||
if (cl)
|
||||
closure_wait(&c->open_buckets_wait, cl);
|
||||
|
||||
|
@ -284,7 +284,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
|
|||
}
|
||||
|
||||
static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
|
||||
enum alloc_reserve reserve, u64 free_entry,
|
||||
enum bch_watermark watermark, u64 free_entry,
|
||||
struct bucket_alloc_state *s,
|
||||
struct bkey_s_c freespace_k,
|
||||
struct closure *cl)
|
||||
|
@ -374,7 +374,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
|
|||
}
|
||||
}
|
||||
|
||||
ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
|
||||
ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
|
||||
if (!ob)
|
||||
iter.path->preserve = false;
|
||||
err:
|
||||
|
@ -394,7 +394,7 @@ err:
|
|||
static noinline struct open_bucket *
|
||||
bch2_bucket_alloc_early(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct bucket_alloc_state *s,
|
||||
struct closure *cl)
|
||||
{
|
||||
|
@ -424,7 +424,7 @@ again:
|
|||
|
||||
s->buckets_seen++;
|
||||
|
||||
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
|
||||
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
|
||||
if (ob)
|
||||
break;
|
||||
}
|
||||
|
@ -445,7 +445,7 @@ again:
|
|||
|
||||
static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct bucket_alloc_state *s,
|
||||
struct closure *cl)
|
||||
{
|
||||
|
@ -474,7 +474,7 @@ again:
|
|||
|
||||
s->buckets_seen++;
|
||||
|
||||
ob = try_alloc_bucket(trans, ca, reserve,
|
||||
ob = try_alloc_bucket(trans, ca, watermark,
|
||||
alloc_cursor, s, k, cl);
|
||||
if (ob) {
|
||||
iter.path->preserve = false;
|
||||
|
@ -507,7 +507,7 @@ again:
|
|||
*/
|
||||
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl,
|
||||
struct bch_dev_usage *usage)
|
||||
{
|
||||
|
@ -519,7 +519,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
|||
bool waiting = false;
|
||||
again:
|
||||
bch2_dev_usage_read_fast(ca, usage);
|
||||
avail = dev_buckets_free(ca, *usage, reserve);
|
||||
avail = dev_buckets_free(ca, *usage, watermark);
|
||||
|
||||
if (usage->d[BCH_DATA_need_discard].buckets > avail)
|
||||
bch2_do_discards(c);
|
||||
|
@ -548,8 +548,8 @@ again:
|
|||
closure_wake_up(&c->freelist_wait);
|
||||
alloc:
|
||||
ob = likely(freespace)
|
||||
? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
|
||||
: bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
|
||||
? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
|
||||
: bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
|
||||
|
||||
if (s.skipped_need_journal_commit * 2 > avail)
|
||||
bch2_journal_flush_async(&c->journal, NULL);
|
||||
|
@ -564,7 +564,7 @@ err:
|
|||
|
||||
if (!IS_ERR(ob))
|
||||
trace_and_count(c, bucket_alloc, ca,
|
||||
bch2_alloc_reserves[reserve],
|
||||
bch2_watermarks[watermark],
|
||||
ob->bucket,
|
||||
usage->d[BCH_DATA_free].buckets,
|
||||
avail,
|
||||
|
@ -575,7 +575,7 @@ err:
|
|||
"");
|
||||
else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
|
||||
trace_and_count(c, bucket_alloc_fail, ca,
|
||||
bch2_alloc_reserves[reserve],
|
||||
bch2_watermarks[watermark],
|
||||
0,
|
||||
usage->d[BCH_DATA_free].buckets,
|
||||
avail,
|
||||
|
@ -589,14 +589,14 @@ err:
|
|||
}
|
||||
|
||||
struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_dev_usage usage;
|
||||
struct open_bucket *ob;
|
||||
|
||||
bch2_trans_do(c, NULL, NULL, 0,
|
||||
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
|
||||
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, watermark,
|
||||
cl, &usage)));
|
||||
return ob;
|
||||
}
|
||||
|
@ -629,7 +629,7 @@ static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
|
|||
struct bch_dev_usage *usage)
|
||||
{
|
||||
u64 *v = stripe->next_alloc + ca->dev_idx;
|
||||
u64 free_space = dev_buckets_available(ca, RESERVE_none);
|
||||
u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
|
||||
u64 free_space_inv = free_space
|
||||
? div64_u64(1ULL << 48, free_space)
|
||||
: 1ULL << 48;
|
||||
|
@ -692,7 +692,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
|||
bool *have_cache,
|
||||
unsigned flags,
|
||||
enum bch_data_type data_type,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
|
@ -725,7 +725,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
|||
continue;
|
||||
}
|
||||
|
||||
ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage);
|
||||
ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
|
||||
if (!IS_ERR(ob))
|
||||
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
|
||||
percpu_ref_put(&ca->ref);
|
||||
|
@ -766,7 +766,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
|
|||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
struct closure *cl)
|
||||
{
|
||||
|
@ -784,7 +784,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
|
|||
if (ec_open_bucket(c, ptrs))
|
||||
return 0;
|
||||
|
||||
h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl);
|
||||
h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
|
||||
if (IS_ERR(h))
|
||||
return PTR_ERR(h);
|
||||
if (!h)
|
||||
|
@ -879,7 +879,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
|||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache, bool ec,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
@ -901,7 +901,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
|||
u64 avail;
|
||||
|
||||
bch2_dev_usage_read_fast(ca, &usage);
|
||||
avail = dev_buckets_free(ca, usage, reserve);
|
||||
avail = dev_buckets_free(ca, usage, watermark);
|
||||
if (!avail)
|
||||
continue;
|
||||
|
||||
|
@ -931,7 +931,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
|||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
struct closure *_cl)
|
||||
{
|
||||
|
@ -962,7 +962,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
|||
|
||||
ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, erasure_code, reserve, flags);
|
||||
have_cache, erasure_code, watermark, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -971,7 +971,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
|||
target,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache,
|
||||
reserve, flags, _cl);
|
||||
watermark, flags, _cl);
|
||||
} else {
|
||||
retry_blocking:
|
||||
/*
|
||||
|
@ -980,7 +980,7 @@ retry_blocking:
|
|||
*/
|
||||
ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
|
||||
nr_replicas, nr_effective, have_cache,
|
||||
flags, wp->data_type, reserve, cl);
|
||||
flags, wp->data_type, watermark, cl);
|
||||
if (ret &&
|
||||
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
|
||||
!bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
|
||||
|
@ -1003,7 +1003,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
|||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
struct closure *cl)
|
||||
{
|
||||
|
@ -1013,7 +1013,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
|||
ret = __open_bucket_add_buckets(trans, ptrs, wp,
|
||||
devs_have, target, erasure_code,
|
||||
nr_replicas, nr_effective, have_cache,
|
||||
reserve, flags, cl);
|
||||
watermark, flags, cl);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
|
||||
bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
|
||||
bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
|
||||
|
@ -1026,7 +1026,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
|||
ret = __open_bucket_add_buckets(trans, ptrs, wp,
|
||||
devs_have, target, false,
|
||||
nr_replicas, nr_effective, have_cache,
|
||||
reserve, flags, cl);
|
||||
watermark, flags, cl);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
|
@ -1263,7 +1263,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
|
|||
struct bch_devs_list *devs_have,
|
||||
unsigned nr_replicas,
|
||||
unsigned nr_replicas_required,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
struct closure *cl,
|
||||
struct write_point **wp_ret)
|
||||
|
@ -1296,7 +1296,7 @@ retry:
|
|||
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
|
||||
target, erasure_code,
|
||||
nr_replicas, &nr_effective,
|
||||
&have_cache, reserve,
|
||||
&have_cache, watermark,
|
||||
flags, NULL);
|
||||
if (!ret ||
|
||||
bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
|
@ -1315,14 +1315,14 @@ retry:
|
|||
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
|
||||
0, erasure_code,
|
||||
nr_replicas, &nr_effective,
|
||||
&have_cache, reserve,
|
||||
&have_cache, watermark,
|
||||
flags, cl);
|
||||
} else {
|
||||
allocate_blocking:
|
||||
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
|
||||
target, erasure_code,
|
||||
nr_replicas, &nr_effective,
|
||||
&have_cache, reserve,
|
||||
&have_cache, watermark,
|
||||
flags, cl);
|
||||
}
|
||||
alloc_done:
|
||||
|
|
|
@ -14,7 +14,7 @@ struct bch_dev;
|
|||
struct bch_fs;
|
||||
struct bch_devs_List;
|
||||
|
||||
extern const char * const bch2_alloc_reserves[];
|
||||
extern const char * const bch2_watermarks[];
|
||||
|
||||
void bch2_reset_alloc_cursors(struct bch_fs *);
|
||||
|
||||
|
@ -31,7 +31,7 @@ void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
|
|||
long bch2_bucket_alloc_new_fs(struct bch_dev *);
|
||||
|
||||
struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
|
||||
enum alloc_reserve, struct closure *);
|
||||
enum bch_watermark, struct closure *);
|
||||
|
||||
static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
|
||||
struct open_bucket *ob)
|
||||
|
@ -152,7 +152,7 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64
|
|||
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
|
||||
struct dev_stripe_state *, struct bch_devs_mask *,
|
||||
unsigned, unsigned *, bool *, unsigned,
|
||||
enum bch_data_type, enum alloc_reserve,
|
||||
enum bch_data_type, enum bch_watermark,
|
||||
struct closure *);
|
||||
|
||||
int bch2_alloc_sectors_start_trans(struct btree_trans *,
|
||||
|
@ -160,7 +160,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *,
|
|||
struct write_point_specifier,
|
||||
struct bch_devs_list *,
|
||||
unsigned, unsigned,
|
||||
enum alloc_reserve,
|
||||
enum bch_watermark,
|
||||
unsigned,
|
||||
struct closure *,
|
||||
struct write_point **);
|
||||
|
|
|
@ -16,20 +16,18 @@ struct bucket_alloc_state {
|
|||
u64 skipped_nouse;
|
||||
};
|
||||
|
||||
struct ec_bucket_buf;
|
||||
|
||||
#define BCH_ALLOC_RESERVES() \
|
||||
x(btree_movinggc) \
|
||||
#define BCH_WATERMARKS() \
|
||||
x(btree_copygc) \
|
||||
x(btree) \
|
||||
x(movinggc) \
|
||||
x(none) \
|
||||
x(copygc) \
|
||||
x(normal) \
|
||||
x(stripe)
|
||||
|
||||
enum alloc_reserve {
|
||||
#define x(name) RESERVE_##name,
|
||||
BCH_ALLOC_RESERVES()
|
||||
enum bch_watermark {
|
||||
#define x(name) BCH_WATERMARK_##name,
|
||||
BCH_WATERMARKS()
|
||||
#undef x
|
||||
RESERVE_NR,
|
||||
BCH_WATERMARK_NR,
|
||||
};
|
||||
|
||||
#define OPEN_BUCKETS_COUNT 1024
|
||||
|
|
|
@ -247,15 +247,15 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
|
|||
struct open_buckets ob = { .nr = 0 };
|
||||
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
|
||||
unsigned nr_reserve;
|
||||
enum alloc_reserve alloc_reserve;
|
||||
enum bch_watermark alloc_reserve;
|
||||
int ret;
|
||||
|
||||
if (flags & BTREE_INSERT_USE_RESERVE) {
|
||||
nr_reserve = 0;
|
||||
alloc_reserve = RESERVE_btree_movinggc;
|
||||
alloc_reserve = BCH_WATERMARK_btree_copygc;
|
||||
} else {
|
||||
nr_reserve = BTREE_NODE_RESERVE;
|
||||
alloc_reserve = RESERVE_btree;
|
||||
alloc_reserve = BCH_WATERMARK_btree;
|
||||
}
|
||||
|
||||
mutex_lock(&c->btree_reserve_cache_lock);
|
||||
|
|
|
@ -150,26 +150,26 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
|
|||
|
||||
void bch2_dev_usage_init(struct bch_dev *);
|
||||
|
||||
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reserve reserve)
|
||||
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
|
||||
{
|
||||
s64 reserved = 0;
|
||||
|
||||
switch (reserve) {
|
||||
case RESERVE_NR:
|
||||
switch (watermark) {
|
||||
case BCH_WATERMARK_NR:
|
||||
unreachable();
|
||||
case RESERVE_stripe:
|
||||
case BCH_WATERMARK_stripe:
|
||||
reserved += ca->mi.nbuckets >> 6;
|
||||
fallthrough;
|
||||
case RESERVE_none:
|
||||
case BCH_WATERMARK_normal:
|
||||
reserved += ca->mi.nbuckets >> 6;
|
||||
fallthrough;
|
||||
case RESERVE_movinggc:
|
||||
case BCH_WATERMARK_copygc:
|
||||
reserved += ca->nr_btree_reserve;
|
||||
fallthrough;
|
||||
case RESERVE_btree:
|
||||
case BCH_WATERMARK_btree:
|
||||
reserved += ca->nr_btree_reserve;
|
||||
fallthrough;
|
||||
case RESERVE_btree_movinggc:
|
||||
case BCH_WATERMARK_btree_copygc:
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -178,17 +178,17 @@ static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reser
|
|||
|
||||
static inline u64 dev_buckets_free(struct bch_dev *ca,
|
||||
struct bch_dev_usage usage,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
return max_t(s64, 0,
|
||||
usage.d[BCH_DATA_free].buckets -
|
||||
ca->nr_open_buckets -
|
||||
bch2_dev_buckets_reserved(ca, reserve));
|
||||
bch2_dev_buckets_reserved(ca, watermark));
|
||||
}
|
||||
|
||||
static inline u64 __dev_buckets_available(struct bch_dev *ca,
|
||||
struct bch_dev_usage usage,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
return max_t(s64, 0,
|
||||
usage.d[BCH_DATA_free].buckets
|
||||
|
@ -196,13 +196,13 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
|
|||
+ usage.d[BCH_DATA_need_gc_gens].buckets
|
||||
+ usage.d[BCH_DATA_need_discard].buckets
|
||||
- ca->nr_open_buckets
|
||||
- bch2_dev_buckets_reserved(ca, reserve));
|
||||
- bch2_dev_buckets_reserved(ca, watermark));
|
||||
}
|
||||
|
||||
static inline u64 dev_buckets_available(struct bch_dev *ca,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
return __dev_buckets_available(ca, bch2_dev_usage_read(ca), reserve);
|
||||
return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
|
||||
}
|
||||
|
||||
/* Filesystem usage: */
|
||||
|
|
|
@ -381,7 +381,7 @@ void bch2_update_unwritten_extent(struct btree_trans *trans,
|
|||
&update->op.devs_have,
|
||||
update->op.nr_replicas,
|
||||
update->op.nr_replicas,
|
||||
update->op.alloc_reserve,
|
||||
update->op.watermark,
|
||||
0, &cl, &wp);
|
||||
if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
|
||||
bch2_trans_unlock(trans);
|
||||
|
@ -459,7 +459,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
|||
bch2_compression_opt_to_type[io_opts.background_compression ?:
|
||||
io_opts.compression];
|
||||
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
|
||||
m->op.alloc_reserve = RESERVE_movinggc;
|
||||
m->op.watermark = BCH_WATERMARK_copygc;
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
|
|
|
@ -1333,7 +1333,7 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
|
|||
static struct ec_stripe_head *
|
||||
ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
|
||||
unsigned algo, unsigned redundancy,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
struct ec_stripe_head *h;
|
||||
struct bch_dev *ca;
|
||||
|
@ -1349,7 +1349,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
|
|||
h->target = target;
|
||||
h->algo = algo;
|
||||
h->redundancy = redundancy;
|
||||
h->reserve = reserve;
|
||||
h->watermark = watermark;
|
||||
|
||||
rcu_read_lock();
|
||||
h->devs = target_rw_devs(c, BCH_DATA_user, target);
|
||||
|
@ -1384,7 +1384,7 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
|
|||
unsigned target,
|
||||
unsigned algo,
|
||||
unsigned redundancy,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct ec_stripe_head *h;
|
||||
|
@ -1406,21 +1406,21 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
|
|||
if (h->target == target &&
|
||||
h->algo == algo &&
|
||||
h->redundancy == redundancy &&
|
||||
h->reserve == reserve) {
|
||||
h->watermark == watermark) {
|
||||
ret = bch2_trans_mutex_lock(trans, &h->lock);
|
||||
if (ret)
|
||||
h = ERR_PTR(ret);
|
||||
goto found;
|
||||
}
|
||||
|
||||
h = ec_new_stripe_head_alloc(c, target, algo, redundancy, reserve);
|
||||
h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
|
||||
found:
|
||||
mutex_unlock(&c->ec_stripe_head_lock);
|
||||
return h;
|
||||
}
|
||||
|
||||
static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
|
||||
enum alloc_reserve reserve, struct closure *cl)
|
||||
enum bch_watermark watermark, struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_devs_mask devs = h->devs;
|
||||
|
@ -1453,7 +1453,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
|
|||
&nr_have_parity,
|
||||
&have_cache, 0,
|
||||
BCH_DATA_parity,
|
||||
reserve,
|
||||
watermark,
|
||||
cl);
|
||||
|
||||
open_bucket_for_each(c, &buckets, ob, i) {
|
||||
|
@ -1480,7 +1480,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
|
|||
&nr_have_data,
|
||||
&have_cache, 0,
|
||||
BCH_DATA_user,
|
||||
reserve,
|
||||
watermark,
|
||||
cl);
|
||||
|
||||
open_bucket_for_each(c, &buckets, ob, i) {
|
||||
|
@ -1658,7 +1658,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
|||
unsigned target,
|
||||
unsigned algo,
|
||||
unsigned redundancy,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
|
@ -1666,7 +1666,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
|||
bool waiting = false;
|
||||
int ret;
|
||||
|
||||
h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, reserve);
|
||||
h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
|
||||
if (!h)
|
||||
bch_err(c, "no stripe head");
|
||||
if (IS_ERR_OR_NULL(h))
|
||||
|
@ -1687,7 +1687,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
|||
goto alloc_existing;
|
||||
|
||||
/* First, try to allocate a full stripe: */
|
||||
ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?:
|
||||
ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
|
||||
__bch2_ec_stripe_head_reserve(trans, h);
|
||||
if (!ret)
|
||||
goto allocate_buf;
|
||||
|
@ -1706,8 +1706,8 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
|||
if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
|
||||
goto err;
|
||||
|
||||
if (reserve == RESERVE_movinggc) {
|
||||
ret = new_stripe_alloc_buckets(trans, h, reserve, NULL) ?:
|
||||
if (watermark == BCH_WATERMARK_copygc) {
|
||||
ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
|
||||
__bch2_ec_stripe_head_reserve(trans, h);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -1723,10 +1723,10 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
|||
closure_wake_up(&c->freelist_wait);
|
||||
alloc_existing:
|
||||
/*
|
||||
* Retry allocating buckets, with the reserve watermark for this
|
||||
* Retry allocating buckets, with the watermark for this
|
||||
* particular write:
|
||||
*/
|
||||
ret = new_stripe_alloc_buckets(trans, h, reserve, cl);
|
||||
ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -1880,7 +1880,7 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
|
|||
list_for_each_entry(h, &c->ec_stripe_head_list, list) {
|
||||
prt_printf(out, "target %u algo %u redundancy %u %s:\n",
|
||||
h->target, h->algo, h->redundancy,
|
||||
bch2_alloc_reserves[h->reserve]);
|
||||
bch2_watermarks[h->watermark]);
|
||||
|
||||
if (h->s)
|
||||
prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
|
||||
|
@ -1898,7 +1898,7 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
|
|||
s->idx, s->nr_data, s->nr_parity,
|
||||
atomic_read(&s->ref[STRIPE_REF_io]),
|
||||
atomic_read(&s->ref[STRIPE_REF_stripe]),
|
||||
bch2_alloc_reserves[s->h->reserve]);
|
||||
bch2_watermarks[s->h->watermark]);
|
||||
}
|
||||
mutex_unlock(&c->ec_stripe_new_lock);
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ struct ec_stripe_head {
|
|||
unsigned target;
|
||||
unsigned algo;
|
||||
unsigned redundancy;
|
||||
enum alloc_reserve reserve;
|
||||
enum bch_watermark watermark;
|
||||
|
||||
struct bch_devs_mask devs;
|
||||
unsigned nr_active_devs;
|
||||
|
@ -211,7 +211,7 @@ int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
|
|||
void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
|
||||
struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
|
||||
unsigned, unsigned, unsigned,
|
||||
enum alloc_reserve, struct closure *);
|
||||
enum bch_watermark, struct closure *);
|
||||
|
||||
void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
|
||||
void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
|
||||
|
|
|
@ -451,7 +451,7 @@ retry:
|
|||
&devs_have,
|
||||
opts.data_replicas,
|
||||
opts.data_replicas,
|
||||
RESERVE_none, 0, &cl, &wp);
|
||||
BCH_WATERMARK_normal, 0, &cl, &wp);
|
||||
if (ret) {
|
||||
bch2_trans_unlock(trans);
|
||||
closure_sync(&cl);
|
||||
|
@ -1696,7 +1696,7 @@ again:
|
|||
&op->devs_have,
|
||||
op->nr_replicas,
|
||||
op->nr_replicas_required,
|
||||
op->alloc_reserve,
|
||||
op->watermark,
|
||||
op->flags,
|
||||
(op->flags & (BCH_WRITE_ALLOC_NOWAIT|
|
||||
BCH_WRITE_ONLY_SPECIFIED_DEVS))
|
||||
|
|
|
@ -59,7 +59,7 @@ enum bch_write_flags {
|
|||
|
||||
static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
|
||||
{
|
||||
return op->alloc_reserve == RESERVE_movinggc
|
||||
return op->watermark == BCH_WATERMARK_copygc
|
||||
? op->c->copygc_wq
|
||||
: op->c->btree_update_wq;
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
|
|||
op->compression_type = bch2_compression_opt_to_type[opts.compression];
|
||||
op->nr_replicas = 0;
|
||||
op->nr_replicas_required = c->opts.data_replicas_required;
|
||||
op->alloc_reserve = RESERVE_none;
|
||||
op->watermark = BCH_WATERMARK_normal;
|
||||
op->incompressible = 0;
|
||||
op->open_buckets.nr = 0;
|
||||
op->devs_have.nr = 0;
|
||||
|
|
|
@ -119,7 +119,7 @@ struct bch_write_op {
|
|||
unsigned compression_type:4;
|
||||
unsigned nr_replicas:4;
|
||||
unsigned nr_replicas_required:4;
|
||||
unsigned alloc_reserve:3;
|
||||
unsigned watermark:3;
|
||||
unsigned incompressible:1;
|
||||
unsigned stripe_waited:1;
|
||||
|
||||
|
|
|
@ -828,7 +828,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
|||
break;
|
||||
}
|
||||
} else {
|
||||
ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, cl);
|
||||
ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
|
||||
ret = PTR_ERR_OR_ZERO(ob[nr_got]);
|
||||
if (ret)
|
||||
break;
|
||||
|
|
|
@ -271,7 +271,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
|
|||
for_each_rw_member(ca, c, dev_idx) {
|
||||
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
|
||||
|
||||
fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) *
|
||||
fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
|
||||
ca->mi.bucket_size) >> 1);
|
||||
fragmented = 0;
|
||||
|
||||
|
|
|
@ -850,8 +850,8 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
|||
|
||||
prt_printf(out, "reserves:");
|
||||
prt_newline(out);
|
||||
for (i = 0; i < RESERVE_NR; i++) {
|
||||
prt_str(out, bch2_alloc_reserves[i]);
|
||||
for (i = 0; i < BCH_WATERMARK_NR; i++) {
|
||||
prt_str(out, bch2_watermarks[i]);
|
||||
prt_tab(out);
|
||||
prt_u64(out, bch2_dev_buckets_reserved(ca, i));
|
||||
prt_tab_rjust(out);
|
||||
|
|
Loading…
Add table
Reference in a new issue