mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
fuse: convert direct io to use folios
Convert direct io requests to use folios instead of pages. No functional changes. Signed-off-by: Joanne Koong <joannelkoong@gmail.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
7fce207af5
commit
3b97c3652d
2 changed files with 36 additions and 68 deletions
|
@ -665,11 +665,11 @@ static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres,
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < ap->num_pages; i++) {
|
for (i = 0; i < ap->num_folios; i++) {
|
||||||
if (should_dirty)
|
if (should_dirty)
|
||||||
set_page_dirty_lock(ap->pages[i]);
|
folio_mark_dirty_lock(ap->folios[i]);
|
||||||
if (ap->args.is_pinned)
|
if (ap->args.is_pinned)
|
||||||
unpin_user_page(ap->pages[i]);
|
unpin_folio(ap->folios[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nres > 0 && ap->args.invalidate_vmap)
|
if (nres > 0 && ap->args.invalidate_vmap)
|
||||||
|
@ -742,24 +742,6 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
|
||||||
kref_put(&io->refcnt, fuse_io_release);
|
kref_put(&io->refcnt, fuse_io_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
|
|
||||||
unsigned int npages)
|
|
||||||
{
|
|
||||||
struct fuse_io_args *ia;
|
|
||||||
|
|
||||||
ia = kzalloc(sizeof(*ia), GFP_KERNEL);
|
|
||||||
if (ia) {
|
|
||||||
ia->io = io;
|
|
||||||
ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
|
|
||||||
&ia->ap.descs);
|
|
||||||
if (!ia->ap.pages) {
|
|
||||||
kfree(ia);
|
|
||||||
ia = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ia;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
|
static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
|
||||||
unsigned int nfolios)
|
unsigned int nfolios)
|
||||||
{
|
{
|
||||||
|
@ -779,12 +761,6 @@ static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io,
|
||||||
return ia;
|
return ia;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fuse_io_free(struct fuse_io_args *ia)
|
|
||||||
{
|
|
||||||
kfree(ia->ap.pages);
|
|
||||||
kfree(ia);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void fuse_io_folios_free(struct fuse_io_args *ia)
|
static void fuse_io_folios_free(struct fuse_io_args *ia)
|
||||||
{
|
{
|
||||||
kfree(ia->ap.folios);
|
kfree(ia->ap.folios);
|
||||||
|
@ -821,7 +797,7 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
|
||||||
fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty);
|
fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty);
|
||||||
|
|
||||||
fuse_aio_complete(io, err, pos);
|
fuse_aio_complete(io, err, pos);
|
||||||
fuse_io_free(ia);
|
fuse_io_folios_free(ia);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t fuse_async_req_send(struct fuse_mount *fm,
|
static ssize_t fuse_async_req_send(struct fuse_mount *fm,
|
||||||
|
@ -1531,6 +1507,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
|
||||||
bool use_pages_for_kvec_io)
|
bool use_pages_for_kvec_io)
|
||||||
{
|
{
|
||||||
bool flush_or_invalidate = false;
|
bool flush_or_invalidate = false;
|
||||||
|
unsigned int nr_pages = 0;
|
||||||
size_t nbytes = 0; /* # bytes already packed in req */
|
size_t nbytes = 0; /* # bytes already packed in req */
|
||||||
ssize_t ret = 0;
|
ssize_t ret = 0;
|
||||||
|
|
||||||
|
@ -1560,15 +1537,23 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (nbytes < *nbytesp && ap->num_pages < max_pages) {
|
/*
|
||||||
unsigned npages;
|
* Until there is support for iov_iter_extract_folios(), we have to
|
||||||
size_t start;
|
* manually extract pages using iov_iter_extract_pages() and then
|
||||||
struct page **pt_pages;
|
* copy that to a folios array.
|
||||||
|
*/
|
||||||
|
struct page **pages = kzalloc(max_pages * sizeof(struct page *),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!pages)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
pt_pages = &ap->pages[ap->num_pages];
|
while (nbytes < *nbytesp && nr_pages < max_pages) {
|
||||||
ret = iov_iter_extract_pages(ii, &pt_pages,
|
unsigned nfolios, i;
|
||||||
|
size_t start;
|
||||||
|
|
||||||
|
ret = iov_iter_extract_pages(ii, &pages,
|
||||||
*nbytesp - nbytes,
|
*nbytesp - nbytes,
|
||||||
max_pages - ap->num_pages,
|
max_pages - nr_pages,
|
||||||
0, &start);
|
0, &start);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
|
@ -1576,15 +1561,20 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
|
||||||
nbytes += ret;
|
nbytes += ret;
|
||||||
|
|
||||||
ret += start;
|
ret += start;
|
||||||
npages = DIV_ROUND_UP(ret, PAGE_SIZE);
|
/* Currently, all folios in FUSE are one page */
|
||||||
|
nfolios = DIV_ROUND_UP(ret, PAGE_SIZE);
|
||||||
|
|
||||||
ap->descs[ap->num_pages].offset = start;
|
ap->folio_descs[ap->num_folios].offset = start;
|
||||||
fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
|
fuse_folio_descs_length_init(ap->folio_descs, ap->num_folios, nfolios);
|
||||||
|
for (i = 0; i < nfolios; i++)
|
||||||
|
ap->folios[i + ap->num_folios] = page_folio(pages[i]);
|
||||||
|
|
||||||
ap->num_pages += npages;
|
ap->num_folios += nfolios;
|
||||||
ap->descs[ap->num_pages - 1].length -=
|
ap->folio_descs[ap->num_folios - 1].length -=
|
||||||
(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
|
(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
|
||||||
|
nr_pages += nfolios;
|
||||||
}
|
}
|
||||||
|
kfree(pages);
|
||||||
|
|
||||||
if (write && flush_or_invalidate)
|
if (write && flush_or_invalidate)
|
||||||
flush_kernel_vmap_range(ap->args.vmap_base, nbytes);
|
flush_kernel_vmap_range(ap->args.vmap_base, nbytes);
|
||||||
|
@ -1624,14 +1614,14 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
||||||
bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
|
bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
|
||||||
|
|
||||||
max_pages = iov_iter_npages(iter, fc->max_pages);
|
max_pages = iov_iter_npages(iter, fc->max_pages);
|
||||||
ia = fuse_io_alloc(io, max_pages);
|
ia = fuse_io_folios_alloc(io, max_pages);
|
||||||
if (!ia)
|
if (!ia)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (fopen_direct_io && fc->direct_io_allow_mmap) {
|
if (fopen_direct_io && fc->direct_io_allow_mmap) {
|
||||||
res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
|
res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
|
||||||
if (res) {
|
if (res) {
|
||||||
fuse_io_free(ia);
|
fuse_io_folios_free(ia);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1646,7 +1636,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
||||||
if (fopen_direct_io && write) {
|
if (fopen_direct_io && write) {
|
||||||
res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
|
res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
|
||||||
if (res) {
|
if (res) {
|
||||||
fuse_io_free(ia);
|
fuse_io_folios_free(ia);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1673,7 +1663,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
||||||
|
|
||||||
if (!io->async || nres < 0) {
|
if (!io->async || nres < 0) {
|
||||||
fuse_release_user_pages(&ia->ap, nres, io->should_dirty);
|
fuse_release_user_pages(&ia->ap, nres, io->should_dirty);
|
||||||
fuse_io_free(ia);
|
fuse_io_folios_free(ia);
|
||||||
}
|
}
|
||||||
ia = NULL;
|
ia = NULL;
|
||||||
if (nres < 0) {
|
if (nres < 0) {
|
||||||
|
@ -1692,13 +1682,13 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
||||||
}
|
}
|
||||||
if (count) {
|
if (count) {
|
||||||
max_pages = iov_iter_npages(iter, fc->max_pages);
|
max_pages = iov_iter_npages(iter, fc->max_pages);
|
||||||
ia = fuse_io_alloc(io, max_pages);
|
ia = fuse_io_folios_alloc(io, max_pages);
|
||||||
if (!ia)
|
if (!ia)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (ia)
|
if (ia)
|
||||||
fuse_io_free(ia);
|
fuse_io_folios_free(ia);
|
||||||
if (res > 0)
|
if (res > 0)
|
||||||
*ppos = pos;
|
*ppos = pos;
|
||||||
|
|
||||||
|
|
|
@ -1017,18 +1017,6 @@ static inline bool fuse_is_bad(struct inode *inode)
|
||||||
return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state));
|
return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
|
|
||||||
struct fuse_page_desc **desc)
|
|
||||||
{
|
|
||||||
struct page **pages;
|
|
||||||
|
|
||||||
pages = kzalloc(npages * (sizeof(struct page *) +
|
|
||||||
sizeof(struct fuse_page_desc)), flags);
|
|
||||||
*desc = (void *) (pages + npages);
|
|
||||||
|
|
||||||
return pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags,
|
static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags,
|
||||||
struct fuse_folio_desc **desc)
|
struct fuse_folio_desc **desc)
|
||||||
{
|
{
|
||||||
|
@ -1041,16 +1029,6 @@ static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags
|
||||||
return folios;
|
return folios;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs,
|
|
||||||
unsigned int index,
|
|
||||||
unsigned int nr_pages)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = index; i < index + nr_pages; i++)
|
|
||||||
descs[i].length = PAGE_SIZE - descs[i].offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void fuse_folio_descs_length_init(struct fuse_folio_desc *descs,
|
static inline void fuse_folio_descs_length_init(struct fuse_folio_desc *descs,
|
||||||
unsigned int index,
|
unsigned int index,
|
||||||
unsigned int nr_folios)
|
unsigned int nr_folios)
|
||||||
|
|
Loading…
Add table
Reference in a new issue