mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
bcachefs: Rework iter->pos handling
- Rework some of the helper comparison functions for consistency - Currently trying to refactor all the logic that's different for extents in the btree iterator code. The main difference is that for non extents we search for a key greater than or equal to the search key, while for extents we search for a key strictly greater than the search key (iter->pos). So that logic is now handled by btree_iter_search_key(), which computes the real search key based on iter->pos and whether or not we're searching for a key >= or > iter->pos. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
f2e8c69fcb
commit
9626aeb167
4 changed files with 72 additions and 106 deletions
|
@ -1380,21 +1380,21 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b,
|
|||
{
|
||||
if (lossy_packed_search)
|
||||
while (m != btree_bkey_last(b, t) &&
|
||||
bkey_iter_cmp_p_or_unp(b, search, lossy_packed_search,
|
||||
m) > 0)
|
||||
bkey_iter_cmp_p_or_unp(b, m,
|
||||
lossy_packed_search, search) < 0)
|
||||
m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
|
||||
|
||||
if (!packed_search)
|
||||
while (m != btree_bkey_last(b, t) &&
|
||||
bkey_iter_pos_cmp(b, search, m) > 0)
|
||||
bkey_iter_pos_cmp(b, m, search) < 0)
|
||||
m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
|
||||
|
||||
if (btree_keys_expensive_checks(b)) {
|
||||
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
|
||||
|
||||
BUG_ON(prev &&
|
||||
bkey_iter_cmp_p_or_unp(b, search, packed_search,
|
||||
prev) <= 0);
|
||||
bkey_iter_cmp_p_or_unp(b, prev,
|
||||
packed_search, search) >= 0);
|
||||
}
|
||||
|
||||
return m;
|
||||
|
|
|
@ -360,7 +360,7 @@ void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
|
|||
static inline int bkey_cmp_p_or_unp(const struct btree *b,
|
||||
const struct bkey_packed *l,
|
||||
const struct bkey_packed *r_packed,
|
||||
struct bpos *r)
|
||||
const struct bpos *r)
|
||||
{
|
||||
EBUG_ON(r_packed && !bkey_packed(r_packed));
|
||||
|
||||
|
@ -464,7 +464,7 @@ static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter)
|
|||
* XXX: only need to compare pointers for keys that are both within a
|
||||
* btree_node_iterator - we need to break ties for prev() to work correctly
|
||||
*/
|
||||
static inline int bkey_iter_cmp(struct btree *b,
|
||||
static inline int bkey_iter_cmp(const struct btree *b,
|
||||
const struct bkey_packed *l,
|
||||
const struct bkey_packed *r)
|
||||
{
|
||||
|
@ -473,7 +473,7 @@ static inline int bkey_iter_cmp(struct btree *b,
|
|||
?: cmp_int(l, r);
|
||||
}
|
||||
|
||||
static inline int btree_node_iter_cmp(struct btree *b,
|
||||
static inline int btree_node_iter_cmp(const struct btree *b,
|
||||
struct btree_node_iter_set l,
|
||||
struct btree_node_iter_set r)
|
||||
{
|
||||
|
@ -482,22 +482,22 @@ static inline int btree_node_iter_cmp(struct btree *b,
|
|||
__btree_node_offset_to_key(b, r.k));
|
||||
}
|
||||
|
||||
/* These assume l (the search key) is not a deleted key: */
|
||||
static inline int bkey_iter_pos_cmp(struct btree *b,
|
||||
struct bpos *l,
|
||||
const struct bkey_packed *r)
|
||||
/* These assume r (the search key) is not a deleted key: */
|
||||
static inline int bkey_iter_pos_cmp(const struct btree *b,
|
||||
const struct bkey_packed *l,
|
||||
const struct bpos *r)
|
||||
{
|
||||
return -bkey_cmp_left_packed(b, r, l)
|
||||
?: (int) bkey_deleted(r);
|
||||
return bkey_cmp_left_packed(b, l, r)
|
||||
?: -((int) bkey_deleted(l));
|
||||
}
|
||||
|
||||
static inline int bkey_iter_cmp_p_or_unp(struct btree *b,
|
||||
struct bpos *l,
|
||||
const struct bkey_packed *l_packed,
|
||||
const struct bkey_packed *r)
|
||||
static inline int bkey_iter_cmp_p_or_unp(const struct btree *b,
|
||||
const struct bkey_packed *l,
|
||||
const struct bkey_packed *r_packed,
|
||||
const struct bpos *r)
|
||||
{
|
||||
return -bkey_cmp_p_or_unp(b, r, l_packed, l)
|
||||
?: (int) bkey_deleted(r);
|
||||
return bkey_cmp_p_or_unp(b, l, r_packed, r)
|
||||
?: -((int) bkey_deleted(l));
|
||||
}
|
||||
|
||||
static inline struct bkey_packed *
|
||||
|
|
|
@ -11,10 +11,6 @@
|
|||
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *,
|
||||
struct btree_iter_level *,
|
||||
struct bkey *);
|
||||
|
||||
#define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
|
||||
#define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
|
||||
#define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
|
||||
|
@ -29,37 +25,14 @@ static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
|
|||
(unsigned long) iter->l[l].b >= 128;
|
||||
}
|
||||
|
||||
/* Returns < 0 if @k is before iter pos, > 0 if @k is after */
|
||||
static inline int __btree_iter_pos_cmp(struct btree_iter *iter,
|
||||
const struct btree *b,
|
||||
const struct bkey_packed *k,
|
||||
bool interior_node)
|
||||
static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
|
||||
{
|
||||
int cmp = bkey_cmp_left_packed(b, k, &iter->pos);
|
||||
struct bpos pos = iter->pos;
|
||||
|
||||
if (cmp)
|
||||
return cmp;
|
||||
if (bkey_deleted(k))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Normally, for extents we want the first key strictly greater than
|
||||
* the iterator position - with the exception that for interior nodes,
|
||||
* we don't want to advance past the last key if the iterator position
|
||||
* is POS_MAX:
|
||||
*/
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS &&
|
||||
(!interior_node ||
|
||||
bkey_cmp_left_packed_byval(b, k, POS_MAX)))
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int btree_iter_pos_cmp(struct btree_iter *iter,
|
||||
const struct btree *b,
|
||||
const struct bkey_packed *k)
|
||||
{
|
||||
return __btree_iter_pos_cmp(iter, b, k, b->c.level != 0);
|
||||
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
bkey_cmp(pos, POS_MAX))
|
||||
pos = bkey_successor(pos);
|
||||
return pos;
|
||||
}
|
||||
|
||||
/* Btree node locking: */
|
||||
|
@ -415,6 +388,7 @@ void bch2_trans_unlock(struct btree_trans *trans)
|
|||
static void __bch2_btree_iter_verify(struct btree_iter *iter,
|
||||
struct btree *b)
|
||||
{
|
||||
struct bpos pos = btree_iter_search_key(iter);
|
||||
struct btree_iter_level *l = &iter->l[b->c.level];
|
||||
struct btree_node_iter tmp = l->iter;
|
||||
struct bkey_packed *k;
|
||||
|
@ -437,17 +411,17 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
|
|||
k = b->c.level || iter->flags & BTREE_ITER_IS_EXTENTS
|
||||
? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_discard)
|
||||
: bch2_btree_node_iter_prev_all(&tmp, b);
|
||||
if (k && btree_iter_pos_cmp(iter, b, k) > 0) {
|
||||
if (k && bkey_iter_pos_cmp(b, k, &pos) >= 0) {
|
||||
char buf[100];
|
||||
struct bkey uk = bkey_unpack_key(b, k);
|
||||
|
||||
bch2_bkey_to_text(&PBUF(buf), &uk);
|
||||
panic("prev key should be before iter pos:\n%s\n%llu:%llu\n",
|
||||
panic("iterator should be before prev key:\n%s\n%llu:%llu\n",
|
||||
buf, iter->pos.inode, iter->pos.offset);
|
||||
}
|
||||
|
||||
k = bch2_btree_node_iter_peek_all(&l->iter, b);
|
||||
if (k && btree_iter_pos_cmp(iter, b, k) < 0) {
|
||||
if (k && bkey_iter_pos_cmp(b, k, &pos) < 0) {
|
||||
char buf[100];
|
||||
struct bkey uk = bkey_unpack_key(b, k);
|
||||
|
||||
|
@ -495,15 +469,19 @@ static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
|
|||
}
|
||||
|
||||
static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
|
||||
struct btree *b,
|
||||
struct bkey_packed *where)
|
||||
struct btree *b,
|
||||
struct bkey_packed *where)
|
||||
{
|
||||
struct btree_node_iter *node_iter = &iter->l[0].iter;
|
||||
struct btree_iter_level *l = &iter->l[b->c.level];
|
||||
struct bpos pos = btree_iter_search_key(iter);
|
||||
|
||||
if (where == bch2_btree_node_iter_peek_all(node_iter, b)) {
|
||||
bkey_disassemble(b, where, &iter->k);
|
||||
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
|
||||
}
|
||||
if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
|
||||
return;
|
||||
|
||||
if (bkey_iter_pos_cmp(l->b, where, &pos) < 0)
|
||||
bch2_btree_node_iter_advance(&l->iter, l->b);
|
||||
|
||||
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
|
||||
}
|
||||
|
||||
void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
|
||||
|
@ -535,6 +513,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
|
|||
bool iter_current_key_modified =
|
||||
orig_iter_pos >= offset &&
|
||||
orig_iter_pos <= offset + clobber_u64s;
|
||||
struct bpos iter_pos = btree_iter_search_key(iter);
|
||||
|
||||
btree_node_iter_for_each(node_iter, set)
|
||||
if (set->end == old_end)
|
||||
|
@ -542,7 +521,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
|
|||
|
||||
/* didn't find the bset in the iterator - might have to readd it: */
|
||||
if (new_u64s &&
|
||||
btree_iter_pos_cmp(iter, b, where) > 0) {
|
||||
bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) {
|
||||
bch2_btree_node_iter_push(node_iter, b, where, end);
|
||||
goto fixup_done;
|
||||
} else {
|
||||
|
@ -557,7 +536,7 @@ found:
|
|||
return;
|
||||
|
||||
if (new_u64s &&
|
||||
btree_iter_pos_cmp(iter, b, where) > 0) {
|
||||
bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) {
|
||||
set->k = offset;
|
||||
} else if (set->k < offset + clobber_u64s) {
|
||||
set->k = offset + new_u64s;
|
||||
|
@ -702,11 +681,12 @@ static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
|
|||
struct btree_iter_level *l,
|
||||
int max_advance)
|
||||
{
|
||||
struct bpos pos = btree_iter_search_key(iter);
|
||||
struct bkey_packed *k;
|
||||
int nr_advanced = 0;
|
||||
|
||||
while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
|
||||
btree_iter_pos_cmp(iter, l->b, k) < 0) {
|
||||
bkey_iter_pos_cmp(l->b, k, &pos) < 0) {
|
||||
if (max_advance > 0 && nr_advanced >= max_advance)
|
||||
return false;
|
||||
|
||||
|
@ -765,13 +745,7 @@ static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
|
|||
static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
|
||||
struct btree *b)
|
||||
{
|
||||
int cmp = bkey_cmp(b->key.k.p, iter->pos);
|
||||
|
||||
if (!cmp &&
|
||||
(iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
bkey_cmp(b->key.k.p, POS_MAX))
|
||||
cmp = -1;
|
||||
return cmp < 0;
|
||||
return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0;
|
||||
}
|
||||
|
||||
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
|
||||
|
@ -785,16 +759,10 @@ static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
|
|||
static inline void __btree_iter_init(struct btree_iter *iter,
|
||||
unsigned level)
|
||||
{
|
||||
struct bpos pos = btree_iter_search_key(iter);
|
||||
struct btree_iter_level *l = &iter->l[level];
|
||||
|
||||
bch2_btree_node_iter_init(&l->iter, l->b, &iter->pos);
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
btree_iter_advance_to_pos(iter, l, -1);
|
||||
|
||||
/* Skip to first non whiteout: */
|
||||
if (level)
|
||||
bch2_btree_node_iter_peek(&l->iter, l->b);
|
||||
bch2_btree_node_iter_init(&l->iter, l->b, &pos);
|
||||
|
||||
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
|
||||
}
|
||||
|
@ -1564,9 +1532,7 @@ __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
|
|||
int ret;
|
||||
|
||||
recheck:
|
||||
while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k &&
|
||||
bkey_cmp(k.k->p, iter->pos) <= 0)
|
||||
bch2_btree_node_iter_advance(&l->iter, l->b);
|
||||
btree_iter_advance_to_pos(iter, l, -1);
|
||||
|
||||
/*
|
||||
* iterator is now at the correct position for inserting at iter->pos,
|
||||
|
@ -1575,9 +1541,27 @@ recheck:
|
|||
*/
|
||||
|
||||
node_iter = l->iter;
|
||||
if (k.k && bkey_whiteout(k.k))
|
||||
k = __btree_iter_unpack(iter, l, &iter->k,
|
||||
bch2_btree_node_iter_peek(&node_iter, l->b));
|
||||
k = __btree_iter_unpack(iter, l, &iter->k,
|
||||
bch2_btree_node_iter_peek(&node_iter, l->b));
|
||||
|
||||
if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
|
||||
/*
|
||||
* If there wasn't actually a hole, want the iterator to be
|
||||
* pointed at the key we found:
|
||||
*
|
||||
* XXX: actually, we shouldn't be changing the iterator here:
|
||||
* the iterator needs to be correct for inserting at iter->pos,
|
||||
* and there may be whiteouts between iter->pos and what this
|
||||
* iterator points at:
|
||||
*/
|
||||
l->iter = node_iter;
|
||||
|
||||
EBUG_ON(bkey_cmp(k.k->p, iter->pos) <= 0);
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
__bch2_btree_iter_verify(iter, l->b);
|
||||
return k;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we got to the end of the node, check if we need to traverse to the
|
||||
|
@ -1592,24 +1576,6 @@ recheck:
|
|||
goto recheck;
|
||||
}
|
||||
|
||||
if (k.k &&
|
||||
!bkey_whiteout(k.k) &&
|
||||
bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
|
||||
/*
|
||||
* if we skipped forward to find the first non whiteout and
|
||||
* there _wasn't_ actually a hole, we want the iterator to be
|
||||
* pointed at the key we found:
|
||||
*/
|
||||
l->iter = node_iter;
|
||||
|
||||
EBUG_ON(bkey_cmp(k.k->p, iter->pos) < 0);
|
||||
EBUG_ON(bkey_deleted(k.k));
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
__bch2_btree_iter_verify(iter, l->b);
|
||||
return k;
|
||||
}
|
||||
|
||||
/* hole */
|
||||
|
||||
/* holes can't span inode numbers: */
|
||||
|
|
|
@ -1191,7 +1191,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
|
|||
BTREE_TRIGGER_GC);
|
||||
|
||||
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
|
||||
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
|
||||
bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
|
||||
bch2_btree_node_iter_advance(node_iter, b);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Reference in a new issue