mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
Fixes for 5.10-rc3:
- Fix an uninitialized struct problem. - Fix an iomap problem zeroing unwritten EOF blocks. - Fix some clumsy error handling when writeback fails on blocksize < pagesize filesystems. - Fix a retry loop not resetting loop variables properly. - Fix scrub flagging rtinherit inodes on a non-rt fs, since the kernel actually does permit that combination. - Fix excessive page cache flushing when unsharing part of a file. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAl+jWK8ACgkQ+H93GTRK tOuiEQ/+IAEncpqUS1PTSWRlNX7MEQDvlnoLl9ZqhaYrW9pNyz8JzxejubkP/7RA qkI/fgcBIhxOf+mTKguAUsu81we49PmlObWCEb5mfBI2aeoSL/yM4zikOHFNpy0o f4U9++kpKJwrWG6kyvNwYMyT6r74vLW0EO9lhYjxAY6+5KgZL0SuFuRAaADDtWj8 SKIc/dli6qDS3IrnkibQtzFOOcmeOEn0qcWcS4gD7tbUpJlw0M2g88JjBPoT8oTK wRBNrspbAA42YbYqlwmkBQZZwM+XZKLZNcvzzLgQLaQdTKEem2w2pB1j1KvJXsSo ibxhmk1/tGFKtPTmbpm7dUC9ubr7xch6J+GHNwHuaWL2hxBWJzNRVokG1BsbDXcc FW3ilwLFd8CFUXttQqQfhiUx8wfe2eJ1aXEBK5JeHWRwD+egLI9WXFJQzjUUwe+v T+7r+0kS2TL3SXKU5TE+gsuuI5mcJpYvcWVqYPwBxjZW0tIhUzBldpfBYysG3ZAm uhYcw3BHw1ucsjqcSe14CWqA4KnwgfAcKva5AJSLjJBu3wOi1wrFKg/+Wtpo0xA2 yFAqFP5FGW13oqeYtqJy0J79qOw6Po9wl+XnekSiBCEif965KtV+RBMP2/TBG+Pl R+bNvSXlb1QiDNSjiIG2b34RiDNoiV2k+ELxOz3SSbzIxx8gvm4= =MqoT -----END PGP SIGNATURE----- Merge tag 'xfs-5.10-fixes-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull xfs fixes from Darrick Wong: - Fix an uninitialized struct problem - Fix an iomap problem zeroing unwritten EOF blocks - Fix some clumsy error handling when writeback fails on filesystems with blocksize < pagesize - Fix a retry loop not resetting loop variables properly - Fix scrub flagging rtinherit inodes on a non-rt fs, since the kernel actually does permit that combination - Fix excessive page cache flushing when unsharing part of a file * tag 'xfs-5.10-fixes-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: xfs: only flush the unshared range in xfs_reflink_unshare xfs: fix scrub flagging rtinherit even if there is no rt device xfs: fix missing CoW blocks writeback conversion retry iomap: clean up writeback state logic on writepage error iomap: support partial page discard on writeback block mapping failure xfs: flush new eof page on truncate to avoid post-eof corruption xfs: set xefi_discard when creating a deferred agfl free log intent item
This commit is contained in:
commit
9dbc1c03ee
8 changed files with 38 additions and 33 deletions
|
@ -1374,6 +1374,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
||||||
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
|
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
|
||||||
WARN_ON_ONCE(!PageLocked(page));
|
WARN_ON_ONCE(!PageLocked(page));
|
||||||
WARN_ON_ONCE(PageWriteback(page));
|
WARN_ON_ONCE(PageWriteback(page));
|
||||||
|
WARN_ON_ONCE(PageDirty(page));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We cannot cancel the ioend directly here on error. We may have
|
* We cannot cancel the ioend directly here on error. We may have
|
||||||
|
@ -1382,33 +1383,22 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
||||||
* appropriately.
|
* appropriately.
|
||||||
*/
|
*/
|
||||||
if (unlikely(error)) {
|
if (unlikely(error)) {
|
||||||
|
/*
|
||||||
|
* Let the filesystem know what portion of the current page
|
||||||
|
* failed to map. If the page wasn't been added to ioend, it
|
||||||
|
* won't be affected by I/O completion and we must unlock it
|
||||||
|
* now.
|
||||||
|
*/
|
||||||
|
if (wpc->ops->discard_page)
|
||||||
|
wpc->ops->discard_page(page, file_offset);
|
||||||
if (!count) {
|
if (!count) {
|
||||||
/*
|
|
||||||
* If the current page hasn't been added to ioend, it
|
|
||||||
* won't be affected by I/O completions and we must
|
|
||||||
* discard and unlock it right here.
|
|
||||||
*/
|
|
||||||
if (wpc->ops->discard_page)
|
|
||||||
wpc->ops->discard_page(page);
|
|
||||||
ClearPageUptodate(page);
|
ClearPageUptodate(page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If the page was not fully cleaned, we need to ensure that the
|
|
||||||
* higher layers come back to it correctly. That means we need
|
|
||||||
* to keep the page dirty, and for WB_SYNC_ALL writeback we need
|
|
||||||
* to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
|
|
||||||
* so another attempt to write this page in this writeback sweep
|
|
||||||
* will be made.
|
|
||||||
*/
|
|
||||||
set_page_writeback_keepwrite(page);
|
|
||||||
} else {
|
|
||||||
clear_page_dirty_for_io(page);
|
|
||||||
set_page_writeback(page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set_page_writeback(page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2467,6 +2467,7 @@ xfs_defer_agfl_block(
|
||||||
new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
|
new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
|
||||||
new->xefi_blockcount = 1;
|
new->xefi_blockcount = 1;
|
||||||
new->xefi_oinfo = *oinfo;
|
new->xefi_oinfo = *oinfo;
|
||||||
|
new->xefi_skip_discard = false;
|
||||||
|
|
||||||
trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
|
trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
|
||||||
|
|
||||||
|
|
|
@ -52,9 +52,9 @@ struct xfs_extent_free_item
|
||||||
{
|
{
|
||||||
xfs_fsblock_t xefi_startblock;/* starting fs block number */
|
xfs_fsblock_t xefi_startblock;/* starting fs block number */
|
||||||
xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
|
xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
|
||||||
|
bool xefi_skip_discard;
|
||||||
struct list_head xefi_list;
|
struct list_head xefi_list;
|
||||||
struct xfs_owner_info xefi_oinfo; /* extent owner */
|
struct xfs_owner_info xefi_oinfo; /* extent owner */
|
||||||
bool xefi_skip_discard;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define XFS_BMAP_MAX_NMAP 4
|
#define XFS_BMAP_MAX_NMAP 4
|
||||||
|
|
|
@ -121,8 +121,7 @@ xchk_inode_flags(
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
/* rt flags require rt device */
|
/* rt flags require rt device */
|
||||||
if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) &&
|
if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
|
||||||
!mp->m_rtdev_targp)
|
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
/* new rt bitmap flag only valid for rbmino */
|
/* new rt bitmap flag only valid for rbmino */
|
||||||
|
|
|
@ -346,8 +346,8 @@ xfs_map_blocks(
|
||||||
ssize_t count = i_blocksize(inode);
|
ssize_t count = i_blocksize(inode);
|
||||||
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||||
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
|
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
|
||||||
xfs_fileoff_t cow_fsb = NULLFILEOFF;
|
xfs_fileoff_t cow_fsb;
|
||||||
int whichfork = XFS_DATA_FORK;
|
int whichfork;
|
||||||
struct xfs_bmbt_irec imap;
|
struct xfs_bmbt_irec imap;
|
||||||
struct xfs_iext_cursor icur;
|
struct xfs_iext_cursor icur;
|
||||||
int retries = 0;
|
int retries = 0;
|
||||||
|
@ -381,6 +381,8 @@ xfs_map_blocks(
|
||||||
* landed in a hole and we skip the block.
|
* landed in a hole and we skip the block.
|
||||||
*/
|
*/
|
||||||
retry:
|
retry:
|
||||||
|
cow_fsb = NULLFILEOFF;
|
||||||
|
whichfork = XFS_DATA_FORK;
|
||||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||||
ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
|
ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
|
||||||
(ip->i_df.if_flags & XFS_IFEXTENTS));
|
(ip->i_df.if_flags & XFS_IFEXTENTS));
|
||||||
|
@ -527,13 +529,15 @@ xfs_prepare_ioend(
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
xfs_discard_page(
|
xfs_discard_page(
|
||||||
struct page *page)
|
struct page *page,
|
||||||
|
loff_t fileoff)
|
||||||
{
|
{
|
||||||
struct inode *inode = page->mapping->host;
|
struct inode *inode = page->mapping->host;
|
||||||
struct xfs_inode *ip = XFS_I(inode);
|
struct xfs_inode *ip = XFS_I(inode);
|
||||||
struct xfs_mount *mp = ip->i_mount;
|
struct xfs_mount *mp = ip->i_mount;
|
||||||
loff_t offset = page_offset(page);
|
unsigned int pageoff = offset_in_page(fileoff);
|
||||||
xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
|
xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, fileoff);
|
||||||
|
xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
if (XFS_FORCED_SHUTDOWN(mp))
|
if (XFS_FORCED_SHUTDOWN(mp))
|
||||||
|
@ -541,14 +545,14 @@ xfs_discard_page(
|
||||||
|
|
||||||
xfs_alert_ratelimited(mp,
|
xfs_alert_ratelimited(mp,
|
||||||
"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
|
"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
|
||||||
page, ip->i_ino, offset);
|
page, ip->i_ino, fileoff);
|
||||||
|
|
||||||
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
|
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
|
||||||
i_blocks_per_page(inode, page));
|
i_blocks_per_page(inode, page) - pageoff_fsb);
|
||||||
if (error && !XFS_FORCED_SHUTDOWN(mp))
|
if (error && !XFS_FORCED_SHUTDOWN(mp))
|
||||||
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
|
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
|
||||||
out_invalidate:
|
out_invalidate:
|
||||||
iomap_invalidatepage(page, 0, PAGE_SIZE);
|
iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iomap_writeback_ops xfs_writeback_ops = {
|
static const struct iomap_writeback_ops xfs_writeback_ops = {
|
||||||
|
|
|
@ -911,6 +911,16 @@ xfs_setattr_size(
|
||||||
error = iomap_zero_range(inode, oldsize, newsize - oldsize,
|
error = iomap_zero_range(inode, oldsize, newsize - oldsize,
|
||||||
&did_zeroing, &xfs_buffered_write_iomap_ops);
|
&did_zeroing, &xfs_buffered_write_iomap_ops);
|
||||||
} else {
|
} else {
|
||||||
|
/*
|
||||||
|
* iomap won't detect a dirty page over an unwritten block (or a
|
||||||
|
* cow block over a hole) and subsequently skips zeroing the
|
||||||
|
* newly post-EOF portion of the page. Flush the new EOF to
|
||||||
|
* convert the block before the pagecache truncate.
|
||||||
|
*/
|
||||||
|
error = filemap_write_and_wait_range(inode->i_mapping, newsize,
|
||||||
|
newsize);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
error = iomap_truncate_page(inode, newsize, &did_zeroing,
|
error = iomap_truncate_page(inode, newsize, &did_zeroing,
|
||||||
&xfs_buffered_write_iomap_ops);
|
&xfs_buffered_write_iomap_ops);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1502,7 +1502,8 @@ xfs_reflink_unshare(
|
||||||
&xfs_buffered_write_iomap_ops);
|
&xfs_buffered_write_iomap_ops);
|
||||||
if (error)
|
if (error)
|
||||||
goto out;
|
goto out;
|
||||||
error = filemap_write_and_wait(inode->i_mapping);
|
|
||||||
|
error = filemap_write_and_wait_range(inode->i_mapping, offset, len);
|
||||||
if (error)
|
if (error)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
|
@ -221,7 +221,7 @@ struct iomap_writeback_ops {
|
||||||
* Optional, allows the file system to discard state on a page where
|
* Optional, allows the file system to discard state on a page where
|
||||||
* we failed to submit any I/O.
|
* we failed to submit any I/O.
|
||||||
*/
|
*/
|
||||||
void (*discard_page)(struct page *page);
|
void (*discard_page)(struct page *page, loff_t fileoff);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct iomap_writepage_ctx {
|
struct iomap_writepage_ctx {
|
||||||
|
|
Loading…
Add table
Reference in a new issue