mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
xfs: cleanup mapping tmpfs folios into the buffer cache
Directly assign b_addr based on the tmpfs folios without a detour through pages, reuse the folio_put path used for non-tmpfs buffers and replace all references to pages in comments with folios. Partially based on a patch from Dave Chinner <dchinner@redhat.com>. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
This commit is contained in:
parent
e2874632a6
commit
e614a00117
3 changed files with 14 additions and 32 deletions
|
@ -206,9 +206,7 @@ xfs_buf_free(
|
|||
if (!xfs_buftarg_is_mem(bp->b_target) && size >= PAGE_SIZE)
|
||||
mm_account_reclaimed_pages(howmany(size, PAGE_SHIFT));
|
||||
|
||||
if (xfs_buftarg_is_mem(bp->b_target))
|
||||
xmbuf_unmap_page(bp);
|
||||
else if (is_vmalloc_addr(bp->b_addr))
|
||||
if (is_vmalloc_addr(bp->b_addr))
|
||||
vfree(bp->b_addr);
|
||||
else if (bp->b_flags & _XBF_KMEM)
|
||||
kfree(bp->b_addr);
|
||||
|
@ -275,7 +273,7 @@ xfs_buf_alloc_backing_mem(
|
|||
struct folio *folio;
|
||||
|
||||
if (xfs_buftarg_is_mem(bp->b_target))
|
||||
return xmbuf_map_page(bp);
|
||||
return xmbuf_map_backing_mem(bp);
|
||||
|
||||
/* Assure zeroed buffer for non-read cases. */
|
||||
if (!(flags & XBF_READ))
|
||||
|
|
|
@ -74,7 +74,7 @@ xmbuf_alloc(
|
|||
|
||||
/*
|
||||
* We don't want to bother with kmapping data during repair, so don't
|
||||
* allow highmem pages to back this mapping.
|
||||
* allow highmem folios to back this mapping.
|
||||
*/
|
||||
mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
|
||||
|
||||
|
@ -127,14 +127,13 @@ xmbuf_free(
|
|||
kfree(btp);
|
||||
}
|
||||
|
||||
/* Directly map a shmem page into the buffer cache. */
|
||||
/* Directly map a shmem folio into the buffer cache. */
|
||||
int
|
||||
xmbuf_map_page(
|
||||
xmbuf_map_backing_mem(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct inode *inode = file_inode(bp->b_target->bt_file);
|
||||
struct folio *folio = NULL;
|
||||
struct page *page;
|
||||
loff_t pos = BBTOB(xfs_buf_daddr(bp));
|
||||
int error;
|
||||
|
||||
|
@ -159,30 +158,17 @@ xmbuf_map_page(
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
page = folio_file_page(folio, pos >> PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* Mark the page dirty so that it won't be reclaimed once we drop the
|
||||
* (potentially last) reference in xmbuf_unmap_page.
|
||||
* Mark the folio dirty so that it won't be reclaimed once we drop the
|
||||
* (potentially last) reference in xfs_buf_free.
|
||||
*/
|
||||
set_page_dirty(page);
|
||||
unlock_page(page);
|
||||
folio_set_dirty(folio);
|
||||
folio_unlock(folio);
|
||||
|
||||
bp->b_addr = page_address(page);
|
||||
bp->b_addr = folio_address(folio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Unmap a shmem page that was mapped into the buffer cache. */
|
||||
void
|
||||
xmbuf_unmap_page(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
ASSERT(xfs_buftarg_is_mem(bp->b_target));
|
||||
|
||||
put_page(virt_to_page(bp->b_addr));
|
||||
bp->b_addr = NULL;
|
||||
}
|
||||
|
||||
/* Is this a valid daddr within the buftarg? */
|
||||
bool
|
||||
xmbuf_verify_daddr(
|
||||
|
@ -196,7 +182,7 @@ xmbuf_verify_daddr(
|
|||
return daddr < (inode->i_sb->s_maxbytes >> BBSHIFT);
|
||||
}
|
||||
|
||||
/* Discard the page backing this buffer. */
|
||||
/* Discard the folio backing this buffer. */
|
||||
static void
|
||||
xmbuf_stale(
|
||||
struct xfs_buf *bp)
|
||||
|
@ -211,7 +197,7 @@ xmbuf_stale(
|
|||
}
|
||||
|
||||
/*
|
||||
* Finalize a buffer -- discard the backing page if it's stale, or run the
|
||||
* Finalize a buffer -- discard the backing folio if it's stale, or run the
|
||||
* write verifier to detect problems.
|
||||
*/
|
||||
int
|
||||
|
|
|
@ -19,16 +19,14 @@ int xmbuf_alloc(struct xfs_mount *mp, const char *descr,
|
|||
struct xfs_buftarg **btpp);
|
||||
void xmbuf_free(struct xfs_buftarg *btp);
|
||||
|
||||
int xmbuf_map_page(struct xfs_buf *bp);
|
||||
void xmbuf_unmap_page(struct xfs_buf *bp);
|
||||
bool xmbuf_verify_daddr(struct xfs_buftarg *btp, xfs_daddr_t daddr);
|
||||
void xmbuf_trans_bdetach(struct xfs_trans *tp, struct xfs_buf *bp);
|
||||
int xmbuf_finalize(struct xfs_buf *bp);
|
||||
#else
|
||||
# define xfs_buftarg_is_mem(...) (false)
|
||||
# define xmbuf_map_page(...) (-ENOMEM)
|
||||
# define xmbuf_unmap_page(...) ((void)0)
|
||||
# define xmbuf_verify_daddr(...) (false)
|
||||
#endif /* CONFIG_XFS_MEMORY_BUFS */
|
||||
|
||||
int xmbuf_map_backing_mem(struct xfs_buf *bp);
|
||||
|
||||
#endif /* __XFS_BUF_MEM_H__ */
|
||||
|
|
Loading…
Add table
Reference in a new issue