mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
dm vdo vio-pool: add a pool pointer to pooled_vio
This allows us to simplify the return_vio_to_pool interface. Also, we don't need to use vdo_forget on local variables or arguments that are about to go out of scope anyway. Signed-off-by: Ken Raeburn <raeburn@redhat.com> Signed-off-by: Matthew Sakai <msakai@redhat.com> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
This commit is contained in:
parent
148a9cec84
commit
2b515cea77
4 changed files with 19 additions and 17 deletions
|
@ -1544,7 +1544,7 @@ static void write_page_if_not_dirtied(struct vdo_waiter *waiter, void *context)
|
|||
|
||||
static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
|
||||
{
|
||||
return_vio_to_pool(zone->vio_pool, vio);
|
||||
return_vio_to_pool(vio);
|
||||
check_for_drain_complete(zone);
|
||||
}
|
||||
|
||||
|
@ -1837,7 +1837,7 @@ static void finish_block_map_page_load(struct vdo_completion *completion)
|
|||
|
||||
if (!vdo_copy_valid_page(vio->data, nonce, pbn, page))
|
||||
vdo_format_block_map_page(page, nonce, pbn, false);
|
||||
return_vio_to_pool(zone->vio_pool, pooled);
|
||||
return_vio_to_pool(pooled);
|
||||
|
||||
/* Release our claim to the load and wake any waiters */
|
||||
release_page_lock(data_vio, "load");
|
||||
|
@ -1851,10 +1851,9 @@ static void handle_io_error(struct vdo_completion *completion)
|
|||
struct vio *vio = as_vio(completion);
|
||||
struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
|
||||
struct data_vio *data_vio = completion->parent;
|
||||
struct block_map_zone *zone = pooled->context;
|
||||
|
||||
vio_record_metadata_io_error(vio);
|
||||
return_vio_to_pool(zone->vio_pool, pooled);
|
||||
return_vio_to_pool(pooled);
|
||||
abort_load(data_vio, result);
|
||||
}
|
||||
|
||||
|
@ -2499,7 +2498,7 @@ static void finish_cursor(struct cursor *cursor)
|
|||
struct cursors *cursors = cursor->parent;
|
||||
struct vdo_completion *completion = cursors->completion;
|
||||
|
||||
return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio));
|
||||
return_vio_to_pool(vdo_forget(cursor->vio));
|
||||
if (--cursors->active_roots > 0)
|
||||
return;
|
||||
|
||||
|
|
|
@ -414,8 +414,7 @@ static void complete_reaping(struct vdo_completion *completion)
|
|||
{
|
||||
struct slab_journal *journal = completion->parent;
|
||||
|
||||
return_vio_to_pool(journal->slab->allocator->vio_pool,
|
||||
vio_as_pooled_vio(as_vio(vdo_forget(completion))));
|
||||
return_vio_to_pool(vio_as_pooled_vio(as_vio(completion)));
|
||||
finish_reaping(journal);
|
||||
reap_slab_journal(journal);
|
||||
}
|
||||
|
@ -698,7 +697,7 @@ static void complete_write(struct vdo_completion *completion)
|
|||
sequence_number_t committed = get_committing_sequence_number(pooled);
|
||||
|
||||
list_del_init(&pooled->list_entry);
|
||||
return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled));
|
||||
return_vio_to_pool(pooled);
|
||||
|
||||
if (result != VDO_SUCCESS) {
|
||||
vio_record_metadata_io_error(as_vio(completion));
|
||||
|
@ -1076,7 +1075,7 @@ static void finish_reference_block_write(struct vdo_completion *completion)
|
|||
/* Release the slab journal lock. */
|
||||
adjust_slab_journal_block_reference(&slab->journal,
|
||||
block->slab_journal_lock_to_release, -1);
|
||||
return_vio_to_pool(slab->allocator->vio_pool, pooled);
|
||||
return_vio_to_pool(pooled);
|
||||
|
||||
/*
|
||||
* We can't clear the is_writing flag earlier as releasing the slab journal lock may cause
|
||||
|
@ -1170,7 +1169,7 @@ static void handle_io_error(struct vdo_completion *completion)
|
|||
struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab;
|
||||
|
||||
vio_record_metadata_io_error(vio);
|
||||
return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
|
||||
return_vio_to_pool(vio_as_pooled_vio(vio));
|
||||
slab->active_count--;
|
||||
vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
|
||||
check_if_slab_drained(slab);
|
||||
|
@ -2242,7 +2241,7 @@ static void finish_reference_block_load(struct vdo_completion *completion)
|
|||
struct vdo_slab *slab = block->slab;
|
||||
|
||||
unpack_reference_block((struct packed_reference_block *) vio->data, block);
|
||||
return_vio_to_pool(slab->allocator->vio_pool, pooled);
|
||||
return_vio_to_pool(pooled);
|
||||
slab->active_count--;
|
||||
clear_provisional_references(block);
|
||||
|
||||
|
@ -2429,7 +2428,7 @@ static void finish_loading_journal(struct vdo_completion *completion)
|
|||
initialize_journal_state(journal);
|
||||
}
|
||||
|
||||
return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
|
||||
return_vio_to_pool(vio_as_pooled_vio(vio));
|
||||
vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab));
|
||||
}
|
||||
|
||||
|
@ -2449,7 +2448,7 @@ static void handle_load_error(struct vdo_completion *completion)
|
|||
struct vio *vio = as_vio(completion);
|
||||
|
||||
vio_record_metadata_io_error(vio);
|
||||
return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio));
|
||||
return_vio_to_pool(vio_as_pooled_vio(vio));
|
||||
vdo_finish_loading_with_result(&journal->slab->state, result);
|
||||
}
|
||||
|
||||
|
|
|
@ -345,6 +345,7 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
|
|||
}
|
||||
|
||||
pooled->context = context;
|
||||
pooled->pool = pool;
|
||||
list_add_tail(&pooled->pool_entry, &pool->available);
|
||||
}
|
||||
|
||||
|
@ -419,12 +420,13 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
|
|||
}
|
||||
|
||||
/**
|
||||
* return_vio_to_pool() - Return a vio to the pool
|
||||
* @pool: The vio pool.
|
||||
* return_vio_to_pool() - Return a vio to its pool
|
||||
* @vio: The pooled vio to return.
|
||||
*/
|
||||
void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
|
||||
void return_vio_to_pool(struct pooled_vio *vio)
|
||||
{
|
||||
struct vio_pool *pool = vio->pool;
|
||||
|
||||
VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
|
||||
"vio pool entry returned on same thread as it was acquired");
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ struct pooled_vio {
|
|||
void *context;
|
||||
/* The list entry used by the pool */
|
||||
struct list_head pool_entry;
|
||||
/* The pool this vio is allocated from */
|
||||
struct vio_pool *pool;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -194,6 +196,6 @@ int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t th
|
|||
void free_vio_pool(struct vio_pool *pool);
|
||||
bool __must_check is_vio_pool_busy(struct vio_pool *pool);
|
||||
void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
|
||||
void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio);
|
||||
void return_vio_to_pool(struct pooled_vio *vio);
|
||||
|
||||
#endif /* VIO_H */
|
||||
|
|
Loading…
Add table
Reference in a new issue