io_uring/kbuf: remove legacy kbuf bulk allocation

Legacy provided buffers are slow and discouraged in favour of the ring
variant. Remove the bulk allocation to keep it simpler as we don't care
about performance.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/a064d70370e590efed8076e9501ae4cfc20fe0ca.1738724373.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2025-02-05 11:36:42 +00:00 committed by Jens Axboe
parent 92a3bac9a5
commit 7919292a96

View file

@ -501,12 +501,9 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return 0;
}
#define IO_BUFFER_ALLOC_BATCH 64
static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
{
struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
int allocated;
struct io_buffer *buf;
/*
* Completions that don't happen inline (eg not under uring_lock) will
@ -524,27 +521,10 @@ static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
spin_unlock(&ctx->completion_lock);
}
/*
* No free buffers and no completion entries either. Allocate a new
* batch of buffer entries and add those to our freelist.
*/
allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
ARRAY_SIZE(bufs), (void **) bufs);
if (unlikely(!allocated)) {
/*
* Bulk alloc is all-or-nothing. If we fail to get a batch,
* retry single alloc to be on the safe side.
*/
bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
if (!bufs[0])
return -ENOMEM;
allocated = 1;
}
while (allocated)
list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
buf = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
if (!buf)
return -ENOMEM;
list_add_tail(&buf->list, &ctx->io_buffers_cache);
return 0;
}