mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
bcachefs: move some checks to expensive_debug_checks
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
76a0537bf1
commit
f13f5a8c83
4 changed files with 21 additions and 8 deletions
|
@ -257,6 +257,8 @@ do { \
|
|||
BCH_DEBUG_PARAM(expensive_debug_checks, \
|
||||
"Enables various runtime debugging checks that " \
|
||||
"significantly affect performance") \
|
||||
BCH_DEBUG_PARAM(debug_check_iterators, \
|
||||
"Enables extra verification for btree iterators") \
|
||||
BCH_DEBUG_PARAM(debug_check_bkeys, \
|
||||
"Run bkey_debugcheck (primarily checking GC/allocation "\
|
||||
"information) when iterating over keys") \
|
||||
|
|
|
@ -1023,7 +1023,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
|
|||
k = p;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
|
||||
if (btree_keys_expensive_checks(b)) {
|
||||
BUG_ON(ret >= orig_k);
|
||||
|
||||
for (i = ret ? bkey_next(ret) : btree_bkey_first(b, t);
|
||||
|
@ -1644,10 +1644,11 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
|
|||
void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
|
||||
struct btree *b)
|
||||
{
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
bch2_btree_node_iter_verify(iter, b);
|
||||
bch2_btree_node_iter_next_check(iter, b);
|
||||
#endif
|
||||
if (btree_keys_expensive_checks(b)) {
|
||||
bch2_btree_node_iter_verify(iter, b);
|
||||
bch2_btree_node_iter_next_check(iter, b);
|
||||
}
|
||||
|
||||
__bch2_btree_node_iter_advance(iter, b);
|
||||
}
|
||||
|
||||
|
@ -1710,7 +1711,7 @@ found:
|
|||
iter->data[0].k = __btree_node_key_to_offset(b, prev);
|
||||
iter->data[0].end = end;
|
||||
out:
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
|
||||
if (btree_keys_expensive_checks(b)) {
|
||||
struct btree_node_iter iter2 = *iter;
|
||||
|
||||
if (prev)
|
||||
|
|
|
@ -429,6 +429,9 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
|
|||
struct btree_node_iter tmp = l->iter;
|
||||
struct bkey_packed *k;
|
||||
|
||||
if (!debug_check_iterators(iter->trans->c))
|
||||
return;
|
||||
|
||||
if (iter->uptodate > BTREE_ITER_NEED_PEEK)
|
||||
return;
|
||||
|
||||
|
@ -475,6 +478,9 @@ void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
|
|||
{
|
||||
struct btree_iter *linked;
|
||||
|
||||
if (!debug_check_iterators(iter->trans->c))
|
||||
return;
|
||||
|
||||
trans_for_each_iter_with_node(iter->trans, b, linked)
|
||||
__bch2_btree_iter_verify(linked, b);
|
||||
}
|
||||
|
|
|
@ -788,7 +788,8 @@ static bool bch2_extent_merge_inline(struct bch_fs *,
|
|||
struct bkey_packed *,
|
||||
bool);
|
||||
|
||||
static void verify_extent_nonoverlapping(struct btree *b,
|
||||
static void verify_extent_nonoverlapping(struct bch_fs *c,
|
||||
struct btree *b,
|
||||
struct btree_node_iter *_iter,
|
||||
struct bkey_i *insert)
|
||||
{
|
||||
|
@ -797,6 +798,9 @@ static void verify_extent_nonoverlapping(struct btree *b,
|
|||
struct bkey_packed *k;
|
||||
struct bkey uk;
|
||||
|
||||
if (!expensive_debug_checks(c))
|
||||
return;
|
||||
|
||||
iter = *_iter;
|
||||
k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
|
||||
BUG_ON(k &&
|
||||
|
@ -847,7 +851,7 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
|
|||
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
|
||||
|
||||
EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
|
||||
verify_extent_nonoverlapping(l->b, &l->iter, insert);
|
||||
verify_extent_nonoverlapping(c, l->b, &l->iter, insert);
|
||||
|
||||
node_iter = l->iter;
|
||||
k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_discard);
|
||||
|
|
Loading…
Add table
Reference in a new issue