mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
bcachefs: Don't pass around may_drop_locks
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
b7607ce98f
commit
b03b81dfd2
7 changed files with 38 additions and 49 deletions
|
@ -654,8 +654,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
|||
*/
|
||||
struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
|
||||
const struct bkey_i *k, unsigned level,
|
||||
enum six_lock_type lock_type,
|
||||
bool may_drop_locks)
|
||||
enum six_lock_type lock_type)
|
||||
{
|
||||
struct btree_cache *bc = &c->btree_cache;
|
||||
struct btree *b;
|
||||
|
@ -722,8 +721,7 @@ retry:
|
|||
if (btree_node_read_locked(iter, level + 1))
|
||||
btree_node_unlock(iter, level + 1);
|
||||
|
||||
if (!btree_node_lock(b, k->k.p, level, iter,
|
||||
lock_type, may_drop_locks))
|
||||
if (!btree_node_lock(b, k->k.p, level, iter, lock_type))
|
||||
return ERR_PTR(-EINTR);
|
||||
|
||||
if (unlikely(PTR_HASH(&b->key) != PTR_HASH(k) ||
|
||||
|
@ -772,9 +770,9 @@ retry:
|
|||
struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
||||
struct btree_iter *iter,
|
||||
struct btree *b,
|
||||
bool may_drop_locks,
|
||||
enum btree_node_sibling sib)
|
||||
{
|
||||
struct btree_trans *trans = iter->trans;
|
||||
struct btree *parent;
|
||||
struct btree_node_iter node_iter;
|
||||
struct bkey_packed *k;
|
||||
|
@ -786,8 +784,10 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
|||
if (!parent)
|
||||
return NULL;
|
||||
|
||||
if (!bch2_btree_node_relock(iter, level + 1))
|
||||
goto out_upgrade;
|
||||
if (!bch2_btree_node_relock(iter, level + 1)) {
|
||||
ret = ERR_PTR(-EINTR);
|
||||
goto out;
|
||||
}
|
||||
|
||||
node_iter = iter->l[parent->c.level].iter;
|
||||
|
||||
|
@ -804,19 +804,20 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
|||
bch2_bkey_unpack(parent, &tmp.k, k);
|
||||
|
||||
ret = bch2_btree_node_get(c, iter, &tmp.k, level,
|
||||
SIX_LOCK_intent, may_drop_locks);
|
||||
SIX_LOCK_intent);
|
||||
|
||||
if (PTR_ERR_OR_ZERO(ret) == -EINTR && may_drop_locks) {
|
||||
if (PTR_ERR_OR_ZERO(ret) == -EINTR &&
|
||||
!(iter->flags & BTREE_ITER_NOUNLOCK)) {
|
||||
struct btree_iter *linked;
|
||||
|
||||
if (!bch2_btree_node_relock(iter, level + 1))
|
||||
goto out_upgrade;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We might have got -EINTR because trylock failed, and we're
|
||||
* holding other locks that would cause us to deadlock:
|
||||
*/
|
||||
trans_for_each_iter(iter->trans, linked)
|
||||
trans_for_each_iter(trans, linked)
|
||||
if (btree_iter_cmp(iter, linked) < 0)
|
||||
__bch2_btree_iter_unlock(linked);
|
||||
|
||||
|
@ -824,7 +825,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
|||
btree_node_unlock(iter, level);
|
||||
|
||||
ret = bch2_btree_node_get(c, iter, &tmp.k, level,
|
||||
SIX_LOCK_intent, may_drop_locks);
|
||||
SIX_LOCK_intent);
|
||||
|
||||
/*
|
||||
* before btree_iter_relock() calls btree_iter_verify_locks():
|
||||
|
@ -841,17 +842,16 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
|||
}
|
||||
}
|
||||
|
||||
bch2_btree_trans_relock(iter->trans);
|
||||
bch2_btree_trans_relock(trans);
|
||||
}
|
||||
out:
|
||||
if (btree_lock_want(iter, level + 1) == BTREE_NODE_UNLOCKED)
|
||||
btree_node_unlock(iter, level + 1);
|
||||
|
||||
bch2_btree_trans_verify_locks(iter->trans);
|
||||
if (PTR_ERR_OR_ZERO(ret) == -EINTR)
|
||||
bch2_btree_iter_upgrade(iter, level + 2);
|
||||
|
||||
BUG_ON((!may_drop_locks || !IS_ERR(ret)) &&
|
||||
(iter->uptodate >= BTREE_ITER_NEED_RELOCK ||
|
||||
!btree_node_locked(iter, level)));
|
||||
BUG_ON(!IS_ERR(ret) && !btree_node_locked(iter, level));
|
||||
|
||||
if (!IS_ERR_OR_NULL(ret)) {
|
||||
struct btree *n1 = ret, *n2 = b;
|
||||
|
@ -864,12 +864,9 @@ out:
|
|||
n2->data->min_key));
|
||||
}
|
||||
|
||||
bch2_btree_trans_verify_locks(trans);
|
||||
|
||||
return ret;
|
||||
out_upgrade:
|
||||
if (may_drop_locks)
|
||||
bch2_btree_iter_upgrade(iter, level + 2, true);
|
||||
ret = ERR_PTR(-EINTR);
|
||||
goto out;
|
||||
}
|
||||
|
||||
void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
|
||||
|
|
|
@ -23,11 +23,10 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *);
|
|||
|
||||
struct btree *bch2_btree_node_get(struct bch_fs *, struct btree_iter *,
|
||||
const struct bkey_i *, unsigned,
|
||||
enum six_lock_type, bool);
|
||||
enum six_lock_type);
|
||||
|
||||
struct btree *bch2_btree_node_get_sibling(struct bch_fs *, struct btree_iter *,
|
||||
struct btree *, bool,
|
||||
enum btree_node_sibling);
|
||||
struct btree *, enum btree_node_sibling);
|
||||
|
||||
void bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
|
||||
const struct bkey_i *, unsigned);
|
||||
|
|
|
@ -196,8 +196,7 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
|
|||
bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
|
||||
unsigned level,
|
||||
struct btree_iter *iter,
|
||||
enum six_lock_type type,
|
||||
bool may_drop_locks)
|
||||
enum six_lock_type type)
|
||||
{
|
||||
struct btree_iter *linked;
|
||||
bool ret = true;
|
||||
|
@ -225,7 +224,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
|
|||
*/
|
||||
if (type == SIX_LOCK_intent &&
|
||||
linked->nodes_locked != linked->nodes_intent_locked) {
|
||||
if (may_drop_locks) {
|
||||
if (!(iter->flags & BTREE_ITER_NOUNLOCK)) {
|
||||
linked->locks_want = max_t(unsigned,
|
||||
linked->locks_want,
|
||||
__fls(linked->nodes_locked) + 1);
|
||||
|
@ -241,7 +240,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
|
|||
*/
|
||||
if (linked->btree_id == iter->btree_id &&
|
||||
level > __fls(linked->nodes_locked)) {
|
||||
if (may_drop_locks) {
|
||||
if (!(iter->flags & BTREE_ITER_NOUNLOCK)) {
|
||||
linked->locks_want =
|
||||
max(level + 1, max_t(unsigned,
|
||||
linked->locks_want,
|
||||
|
@ -858,7 +857,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
|
|||
|
||||
lock_type = __btree_lock_want(iter, iter->level);
|
||||
if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
|
||||
iter, lock_type, true)))
|
||||
iter, lock_type)))
|
||||
return -EINTR;
|
||||
|
||||
if (likely(b == c->btree_roots[iter->btree_id].b &&
|
||||
|
@ -922,7 +921,7 @@ static inline int btree_iter_down(struct btree_iter *iter)
|
|||
bch2_bkey_unpack(l->b, &tmp.k,
|
||||
bch2_btree_node_iter_peek(&l->iter, l->b));
|
||||
|
||||
b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type, true);
|
||||
b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type);
|
||||
if (unlikely(IS_ERR(b)))
|
||||
return PTR_ERR(b);
|
||||
|
||||
|
|
|
@ -112,13 +112,12 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
|
|||
bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *, unsigned);
|
||||
|
||||
static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
|
||||
unsigned new_locks_want,
|
||||
bool may_drop_locks)
|
||||
unsigned new_locks_want)
|
||||
{
|
||||
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
|
||||
|
||||
return iter->locks_want < new_locks_want
|
||||
? (may_drop_locks
|
||||
? (!(iter->flags & BTREE_ITER_NOUNLOCK)
|
||||
? __bch2_btree_iter_upgrade(iter, new_locks_want)
|
||||
: __bch2_btree_iter_upgrade_nounlock(iter, new_locks_want))
|
||||
: iter->uptodate <= BTREE_ITER_NEED_PEEK;
|
||||
|
|
|
@ -175,20 +175,18 @@ static inline bool btree_node_lock_increment(struct btree_iter *iter,
|
|||
}
|
||||
|
||||
bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
|
||||
struct btree_iter *, enum six_lock_type, bool);
|
||||
struct btree_iter *, enum six_lock_type);
|
||||
|
||||
static inline bool btree_node_lock(struct btree *b, struct bpos pos,
|
||||
unsigned level,
|
||||
struct btree_iter *iter,
|
||||
enum six_lock_type type,
|
||||
bool may_drop_locks)
|
||||
enum six_lock_type type)
|
||||
{
|
||||
EBUG_ON(level >= BTREE_MAX_DEPTH);
|
||||
|
||||
return likely(six_trylock_type(&b->c.lock, type)) ||
|
||||
btree_node_lock_increment(iter, b, level, type) ||
|
||||
__bch2_btree_node_lock(b, pos, level, iter,
|
||||
type, may_drop_locks);
|
||||
__bch2_btree_node_lock(b, pos, level, iter, type);
|
||||
}
|
||||
|
||||
bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
|
||||
|
|
|
@ -1585,8 +1585,7 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
|
|||
* XXX: figure out how far we might need to split,
|
||||
* instead of locking/reserving all the way to the root:
|
||||
*/
|
||||
if (!bch2_btree_iter_upgrade(iter, U8_MAX,
|
||||
!(flags & BTREE_INSERT_NOUNLOCK))) {
|
||||
if (!bch2_btree_iter_upgrade(iter, U8_MAX)) {
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1649,8 +1648,7 @@ retry:
|
|||
goto out;
|
||||
|
||||
/* XXX: can't be holding read locks */
|
||||
m = bch2_btree_node_get_sibling(c, iter, b,
|
||||
!(flags & BTREE_INSERT_NOUNLOCK), sib);
|
||||
m = bch2_btree_node_get_sibling(c, iter, b, sib);
|
||||
if (IS_ERR(m)) {
|
||||
ret = PTR_ERR(m);
|
||||
goto err;
|
||||
|
@ -1697,8 +1695,7 @@ retry:
|
|||
!down_read_trylock(&c->gc_lock))
|
||||
goto err_cycle_gc_lock;
|
||||
|
||||
if (!bch2_btree_iter_upgrade(iter, U8_MAX,
|
||||
!(flags & BTREE_INSERT_NOUNLOCK))) {
|
||||
if (!bch2_btree_iter_upgrade(iter, U8_MAX)) {
|
||||
ret = -EINTR;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
@ -1760,7 +1757,7 @@ retry:
|
|||
if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
|
||||
up_read(&c->gc_lock);
|
||||
out:
|
||||
bch2_btree_trans_verify_locks(iter->trans);
|
||||
bch2_btree_trans_verify_locks(trans);
|
||||
|
||||
/*
|
||||
* Don't downgrade locks here: we're called after successful insert,
|
||||
|
@ -1872,7 +1869,7 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
|
|||
|
||||
closure_init_stack(&cl);
|
||||
|
||||
bch2_btree_iter_upgrade(iter, U8_MAX, true);
|
||||
bch2_btree_iter_upgrade(iter, U8_MAX);
|
||||
|
||||
if (!(flags & BTREE_INSERT_GC_LOCK_HELD)) {
|
||||
if (!down_read_trylock(&c->gc_lock)) {
|
||||
|
@ -2044,7 +2041,7 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
|
|||
|
||||
closure_init_stack(&cl);
|
||||
|
||||
if (!bch2_btree_iter_upgrade(iter, U8_MAX, true))
|
||||
if (!bch2_btree_iter_upgrade(iter, U8_MAX))
|
||||
return -EINTR;
|
||||
|
||||
if (!down_read_trylock(&c->gc_lock)) {
|
||||
|
|
|
@ -816,7 +816,7 @@ static int __bch2_trans_commit(struct btree_trans *trans,
|
|||
unsigned old_locks_want = i->iter->locks_want;
|
||||
unsigned old_uptodate = i->iter->uptodate;
|
||||
|
||||
if (!bch2_btree_iter_upgrade(i->iter, 1, true)) {
|
||||
if (!bch2_btree_iter_upgrade(i->iter, 1)) {
|
||||
trans_restart(" (failed upgrade, locks_want %u uptodate %u)",
|
||||
old_locks_want, old_uptodate);
|
||||
ret = -EINTR;
|
||||
|
|
Loading…
Add table
Reference in a new issue