io_uring: introduce io_cache_free() helper

Add a helper function io_cache_free() that returns an allocation to a
io_alloc_cache, falling back on kfree() if the io_alloc_cache is full.
This is the inverse of io_cache_alloc(), which takes an allocation from
an io_alloc_cache and falls back on kmalloc() if the cache is empty.

Convert 4 callers to use the helper.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Suggested-by: Li Zetao <lizetao1@huawei.com>
Link: https://lore.kernel.org/r/20250304194814.2346705-1-csander@purestorage.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Caleb Sander Mateos 2025-03-04 12:48:12 -07:00 committed by Jens Axboe
parent fe21a4532e
commit 0d83b8a9f1
4 changed files with 13 additions and 15 deletions

View file

@ -68,4 +68,10 @@ static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
return io_cache_alloc_new(cache, gfp);
}
static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
{
if (!io_alloc_cache_put(cache, obj))
kfree(obj);
}
#endif

View file

@ -53,12 +53,10 @@ static void __io_futex_complete(struct io_kiocb *req, io_tw_token_t tw)
static void io_futex_complete(struct io_kiocb *req, io_tw_token_t tw)
{
struct io_futex_data *ifd = req->async_data;
struct io_ring_ctx *ctx = req->ctx;
io_tw_lock(ctx, tw);
if (!io_alloc_cache_put(&ctx->futex_cache, ifd))
kfree(ifd);
io_cache_free(&ctx->futex_cache, req->async_data);
__io_futex_complete(req, tw);
}

View file

@ -1422,8 +1422,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
if (apoll->double_poll)
kfree(apoll->double_poll);
if (!io_alloc_cache_put(&ctx->apoll_cache, apoll))
kfree(apoll);
io_cache_free(&ctx->apoll_cache, apoll);
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)

View file

@ -124,8 +124,9 @@ static struct io_mapped_ubuf *io_alloc_imu(struct io_ring_ctx *ctx,
static void io_free_imu(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
{
if (imu->nr_bvecs > IO_CACHED_BVECS_SEGS ||
!io_alloc_cache_put(&ctx->imu_cache, imu))
if (imu->nr_bvecs <= IO_CACHED_BVECS_SEGS)
io_cache_free(&ctx->imu_cache, imu);
else
kvfree(imu);
}
@ -487,12 +488,6 @@ int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
static void io_free_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
{
if (!io_alloc_cache_put(&ctx->node_cache, node))
kfree(node);
}
void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
{
if (node->tag)
@ -510,7 +505,7 @@ void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
break;
}
io_free_node(ctx, node);
io_cache_free(&ctx->node_cache, node);
}
int io_sqe_files_unregister(struct io_ring_ctx *ctx)
@ -835,7 +830,7 @@ done:
if (ret) {
if (imu)
io_free_imu(ctx, imu);
io_free_node(ctx, node);
io_cache_free(&ctx->node_cache, node);
node = ERR_PTR(ret);
}
kvfree(pages);