linux/fs/xfs/xfs_file.c
Linus Torvalds beace86e61 Summary of significant series in this pull request:
- The 4 patch series "mm: ksm: prevent KSM from breaking merging of new
   VMAs" from Lorenzo Stoakes addresses an issue with KSM's
   PR_SET_MEMORY_MERGE mode: newly mapped VMAs were not eligible for
   merging with existing adjacent VMAs.
 
 - The 4 patch series "mm/damon: introduce DAMON_STAT for simple and
   practical access monitoring" from SeongJae Park adds a new kernel module
   which simplifies the setup and usage of DAMON in production
   environments.
 
 - The 6 patch series "stop passing a writeback_control to swap/shmem
   writeout" from Christoph Hellwig is a cleanup to the writeback code
   which removes a couple of pointers from struct writeback_control.
 
 - The 7 patch series "drivers/base/node.c: optimization and cleanups"
   from Donet Tom contains largely uncorrelated cleanups to the NUMA node
   setup and management code.
 
 - The 4 patch series "mm: userfaultfd: assorted fixes and cleanups" from
   Tal Zussman does some maintenance work on the userfaultfd code.
 
 - The 5 patch series "Readahead tweaks for larger folios" from Ryan
   Roberts implements some tuneups for pagecache readahead when it is
   reading into order>0 folios.
 
 - The 4 patch series "selftests/mm: Tweaks to the cow test" from Mark
   Brown provides some cleanups and consistency improvements to the
   selftests code.
 
 - The 4 patch series "Optimize mremap() for large folios" from Dev Jain
   does that.  A 37% reduction in execution time was measured in a
   memset+mremap+munmap microbenchmark.
 
 - The 5 patch series "Remove zero_user()" from Matthew Wilcox expunges
   zero_user() in favor of the more modern memzero_page().
 
 - The 3 patch series "mm/huge_memory: vmf_insert_folio_*() and
   vmf_insert_pfn_pud() fixes" from David Hildenbrand addresses some warts
   which David noticed in the huge page code.  These were not known to be
   causing any issues at this time.
 
 - The 3 patch series "mm/damon: use alloc_migrate_target() for
   DAMOS_MIGRATE_{HOT,COLD" from SeongJae Park provides some cleanup and
   consolidation work in DAMON.
 
 - The 3 patch series "use vm_flags_t consistently" from Lorenzo Stoakes
   uses vm_flags_t in places where we were inappropriately using other
   types.
 
 - The 3 patch series "mm/memfd: Reserve hugetlb folios before
   allocation" from Vivek Kasireddy increases the reliability of large page
   allocation in the memfd code.
 
 - The 14 patch series "mm: Remove pXX_devmap page table bit and pfn_t
   type" from Alistair Popple removes several now-unneeded PFN_* flags.
 
 - The 5 patch series "mm/damon: decouple sysfs from core" from SeongJae
   Park implememnts some cleanup and maintainability work in the DAMON
   sysfs layer.
 
 - The 5 patch series "madvise cleanup" from Lorenzo Stoakes does quite a
   lot of cleanup/maintenance work in the madvise() code.
 
 - The 4 patch series "madvise anon_name cleanups" from Vlastimil Babka
   provides additional cleanups on top or Lorenzo's effort.
 
 - The 11 patch series "Implement numa node notifier" from Oscar Salvador
   creates a standalone notifier for NUMA node memory state changes.
   Previously these were lumped under the more general memory on/offline
   notifier.
 
 - The 6 patch series "Make MIGRATE_ISOLATE a standalone bit" from Zi Yan
   cleans up the pageblock isolation code and fixes a potential issue which
   doesn't seem to cause any problems in practice.
 
 - The 5 patch series "selftests/damon: add python and drgn based DAMON
   sysfs functionality tests" from SeongJae Park adds additional drgn- and
   python-based DAMON selftests which are more comprehensive than the
   existing selftest suite.
 
 - The 5 patch series "Misc rework on hugetlb faulting path" from Oscar
   Salvador fixes a rather obscure deadlock in the hugetlb fault code and
   follows that fix with a series of cleanups.
 
 - The 3 patch series "cma: factor out allocation logic from
   __cma_declare_contiguous_nid" from Mike Rapoport rationalizes and cleans
   up the highmem-specific code in the CMA allocator.
 
 - The 28 patch series "mm/migration: rework movable_ops page migration
   (part 1)" from David Hildenbrand provides cleanups and
   future-preparedness to the migration code.
 
 - The 2 patch series "mm/damon: add trace events for auto-tuned
   monitoring intervals and DAMOS quota" from SeongJae Park adds some
   tracepoints to some DAMON auto-tuning code.
 
 - The 6 patch series "mm/damon: fix misc bugs in DAMON modules" from
   SeongJae Park does that.
 
 - The 6 patch series "mm/damon: misc cleanups" from SeongJae Park also
   does what it claims.
 
 - The 4 patch series "mm: folio_pte_batch() improvements" from David
   Hildenbrand cleans up the large folio PTE batching code.
 
 - The 13 patch series "mm/damon/vaddr: Allow interleaving in
   migrate_{hot,cold} actions" from SeongJae Park facilitates dynamic
   alteration of DAMON's inter-node allocation policy.
 
 - The 3 patch series "Remove unmap_and_put_page()" from Vishal Moola
   provides a couple of page->folio conversions.
 
 - The 4 patch series "mm: per-node proactive reclaim" from Davidlohr
   Bueso implements a per-node control of proactive reclaim - beyond the
   current memcg-based implementation.
 
 - The 14 patch series "mm/damon: remove damon_callback" from SeongJae
   Park replaces the damon_callback interface with a more general and
   powerful damon_call()+damos_walk() interface.
 
 - The 10 patch series "mm/mremap: permit mremap() move of multiple VMAs"
   from Lorenzo Stoakes implements a number of mremap cleanups (of course)
   in preparation for adding new mremap() functionality: newly permit the
   remapping of multiple VMAs when the user is specifying MREMAP_FIXED.  It
   still excludes some specialized situations where this cannot be
   performed reliably.
 
 - The 3 patch series "drop hugetlb_free_pgd_range()" from Anthony Yznaga
   switches some sparc hugetlb code over to the generic version and removes
   the thus-unneeded hugetlb_free_pgd_range().
 
 - The 4 patch series "mm/damon/sysfs: support periodic and automated
   stats update" from SeongJae Park augments the present
   userspace-requested update of DAMON sysfs monitoring files.  Automatic
   update is now provided, along with a tunable to control the update
   interval.
 
 - The 4 patch series "Some randome fixes and cleanups to swapfile" from
   Kemeng Shi does what is claims.
 
 - The 4 patch series "mm: introduce snapshot_page" from Luiz Capitulino
   and David Hildenbrand provides (and uses) a means by which debug-style
   functions can grab a copy of a pageframe and inspect it locklessly
   without tripping over the races inherent in operating on the live
   pageframe directly.
 
 - The 6 patch series "use per-vma locks for /proc/pid/maps reads" from
   Suren Baghdasaryan addresses the large contention issues which can be
   triggered by reads from that procfs file.  Latencies are reduced by more
   than half in some situations.  The series also introduces several new
   selftests for the /proc/pid/maps interface.
 
 - The 6 patch series "__folio_split() clean up" from Zi Yan cleans up
   __folio_split()!
 
 - The 7 patch series "Optimize mprotect() for large folios" from Dev
   Jain provides some quite large (>3x) speedups to mprotect() when dealing
   with large folios.
 
 - The 2 patch series "selftests/mm: reuse FORCE_READ to replace "asm
   volatile("" : "+r" (XXX));" and some cleanup" from wang lian does some
   cleanup work in the selftests code.
 
 - The 3 patch series "tools/testing: expand mremap testing" from Lorenzo
   Stoakes extends the mremap() selftest in several ways, including adding
   more checking of Lorenzo's recently added "permit mremap() move of
   multiple VMAs" feature.
 
 - The 22 patch series "selftests/damon/sysfs.py: test all parameters"
   from SeongJae Park extends the DAMON sysfs interface selftest so that it
   tests all possible user-requested parameters.  Rather than the present
   minimal subset.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaIqcCgAKCRDdBJ7gKXxA
 jkVBAQCCn9DR1QP0CRk961ot0cKzOgioSc0aA03DPb2KXRt2kQEAzDAz0ARurFhL
 8BzbvI0c+4tntHLXvIlrC33n9KWAOQM=
 =XsFy
 -----END PGP SIGNATURE-----

Merge tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull MM updates from Andrew Morton:
 "As usual, many cleanups. The below blurbiage describes 42 patchsets.
  21 of those are partially or fully cleanup work. "cleans up",
  "cleanup", "maintainability", "rationalizes", etc.

  I never knew the MM code was so dirty.

  "mm: ksm: prevent KSM from breaking merging of new VMAs" (Lorenzo Stoakes)
     addresses an issue with KSM's PR_SET_MEMORY_MERGE mode: newly
     mapped VMAs were not eligible for merging with existing adjacent
     VMAs.

  "mm/damon: introduce DAMON_STAT for simple and practical access monitoring" (SeongJae Park)
     adds a new kernel module which simplifies the setup and usage of
     DAMON in production environments.

  "stop passing a writeback_control to swap/shmem writeout" (Christoph Hellwig)
     is a cleanup to the writeback code which removes a couple of
     pointers from struct writeback_control.

  "drivers/base/node.c: optimization and cleanups" (Donet Tom)
     contains largely uncorrelated cleanups to the NUMA node setup and
     management code.

  "mm: userfaultfd: assorted fixes and cleanups" (Tal Zussman)
     does some maintenance work on the userfaultfd code.

  "Readahead tweaks for larger folios" (Ryan Roberts)
     implements some tuneups for pagecache readahead when it is reading
     into order>0 folios.

  "selftests/mm: Tweaks to the cow test" (Mark Brown)
     provides some cleanups and consistency improvements to the
     selftests code.

  "Optimize mremap() for large folios" (Dev Jain)
     does that. A 37% reduction in execution time was measured in a
     memset+mremap+munmap microbenchmark.

  "Remove zero_user()" (Matthew Wilcox)
     expunges zero_user() in favor of the more modern memzero_page().

  "mm/huge_memory: vmf_insert_folio_*() and vmf_insert_pfn_pud() fixes" (David Hildenbrand)
     addresses some warts which David noticed in the huge page code.
     These were not known to be causing any issues at this time.

  "mm/damon: use alloc_migrate_target() for DAMOS_MIGRATE_{HOT,COLD" (SeongJae Park)
     provides some cleanup and consolidation work in DAMON.

  "use vm_flags_t consistently" (Lorenzo Stoakes)
     uses vm_flags_t in places where we were inappropriately using other
     types.

  "mm/memfd: Reserve hugetlb folios before allocation" (Vivek Kasireddy)
     increases the reliability of large page allocation in the memfd
     code.

  "mm: Remove pXX_devmap page table bit and pfn_t type" (Alistair Popple)
     removes several now-unneeded PFN_* flags.

  "mm/damon: decouple sysfs from core" (SeongJae Park)
     implememnts some cleanup and maintainability work in the DAMON
     sysfs layer.

  "madvise cleanup" (Lorenzo Stoakes)
     does quite a lot of cleanup/maintenance work in the madvise() code.

  "madvise anon_name cleanups" (Vlastimil Babka)
     provides additional cleanups on top or Lorenzo's effort.

  "Implement numa node notifier" (Oscar Salvador)
     creates a standalone notifier for NUMA node memory state changes.
     Previously these were lumped under the more general memory
     on/offline notifier.

  "Make MIGRATE_ISOLATE a standalone bit" (Zi Yan)
     cleans up the pageblock isolation code and fixes a potential issue
     which doesn't seem to cause any problems in practice.

  "selftests/damon: add python and drgn based DAMON sysfs functionality tests" (SeongJae Park)
     adds additional drgn- and python-based DAMON selftests which are
     more comprehensive than the existing selftest suite.

  "Misc rework on hugetlb faulting path" (Oscar Salvador)
     fixes a rather obscure deadlock in the hugetlb fault code and
     follows that fix with a series of cleanups.

  "cma: factor out allocation logic from __cma_declare_contiguous_nid" (Mike Rapoport)
     rationalizes and cleans up the highmem-specific code in the CMA
     allocator.

  "mm/migration: rework movable_ops page migration (part 1)" (David Hildenbrand)
     provides cleanups and future-preparedness to the migration code.

  "mm/damon: add trace events for auto-tuned monitoring intervals and DAMOS quota" (SeongJae Park)
     adds some tracepoints to some DAMON auto-tuning code.

  "mm/damon: fix misc bugs in DAMON modules" (SeongJae Park)
     does that.

  "mm/damon: misc cleanups" (SeongJae Park)
     also does what it claims.

  "mm: folio_pte_batch() improvements" (David Hildenbrand)
     cleans up the large folio PTE batching code.

  "mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions" (SeongJae Park)
     facilitates dynamic alteration of DAMON's inter-node allocation
     policy.

  "Remove unmap_and_put_page()" (Vishal Moola)
     provides a couple of page->folio conversions.

  "mm: per-node proactive reclaim" (Davidlohr Bueso)
     implements a per-node control of proactive reclaim - beyond the
     current memcg-based implementation.

  "mm/damon: remove damon_callback" (SeongJae Park)
     replaces the damon_callback interface with a more general and
     powerful damon_call()+damos_walk() interface.

  "mm/mremap: permit mremap() move of multiple VMAs" (Lorenzo Stoakes)
     implements a number of mremap cleanups (of course) in preparation
     for adding new mremap() functionality: newly permit the remapping
     of multiple VMAs when the user is specifying MREMAP_FIXED. It still
     excludes some specialized situations where this cannot be performed
     reliably.

  "drop hugetlb_free_pgd_range()" (Anthony Yznaga)
     switches some sparc hugetlb code over to the generic version and
     removes the thus-unneeded hugetlb_free_pgd_range().

  "mm/damon/sysfs: support periodic and automated stats update" (SeongJae Park)
     augments the present userspace-requested update of DAMON sysfs
     monitoring files. Automatic update is now provided, along with a
     tunable to control the update interval.

  "Some randome fixes and cleanups to swapfile" (Kemeng Shi)
     does what is claims.

  "mm: introduce snapshot_page" (Luiz Capitulino and David Hildenbrand)
     provides (and uses) a means by which debug-style functions can grab
     a copy of a pageframe and inspect it locklessly without tripping
     over the races inherent in operating on the live pageframe
     directly.

  "use per-vma locks for /proc/pid/maps reads" (Suren Baghdasaryan)
     addresses the large contention issues which can be triggered by
     reads from that procfs file. Latencies are reduced by more than
     half in some situations. The series also introduces several new
     selftests for the /proc/pid/maps interface.

  "__folio_split() clean up" (Zi Yan)
     cleans up __folio_split()!

  "Optimize mprotect() for large folios" (Dev Jain)
     provides some quite large (>3x) speedups to mprotect() when dealing
     with large folios.

  "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup" (wang lian)
     does some cleanup work in the selftests code.

  "tools/testing: expand mremap testing" (Lorenzo Stoakes)
     extends the mremap() selftest in several ways, including adding
     more checking of Lorenzo's recently added "permit mremap() move of
     multiple VMAs" feature.

  "selftests/damon/sysfs.py: test all parameters" (SeongJae Park)
     extends the DAMON sysfs interface selftest so that it tests all
     possible user-requested parameters. Rather than the present minimal
     subset"

* tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (370 commits)
  MAINTAINERS: add missing headers to mempory policy & migration section
  MAINTAINERS: add missing file to cgroup section
  MAINTAINERS: add MM MISC section, add missing files to MISC and CORE
  MAINTAINERS: add missing zsmalloc file
  MAINTAINERS: add missing files to page alloc section
  MAINTAINERS: add missing shrinker files
  MAINTAINERS: move memremap.[ch] to hotplug section
  MAINTAINERS: add missing mm_slot.h file THP section
  MAINTAINERS: add missing interval_tree.c to memory mapping section
  MAINTAINERS: add missing percpu-internal.h file to per-cpu section
  mm/page_alloc: remove trace_mm_alloc_contig_migrate_range_info()
  selftests/damon: introduce _common.sh to host shared function
  selftests/damon/sysfs.py: test runtime reduction of DAMON parameters
  selftests/damon/sysfs.py: test non-default parameters runtime commit
  selftests/damon/sysfs.py: generalize DAMON context commit assertion
  selftests/damon/sysfs.py: generalize monitoring attributes commit assertion
  selftests/damon/sysfs.py: generalize DAMOS schemes commit assertion
  selftests/damon/sysfs.py: test DAMOS filters commitment
  selftests/damon/sysfs.py: generalize DAMOS scheme commit assertion
  selftests/damon/sysfs.py: test DAMOS destinations commitment
  ...
2025-07-31 14:57:54 -07:00

1975 lines
51 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_ioctl.h"
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_icache.h"
#include "xfs_pnfs.h"
#include "xfs_iomap.h"
#include "xfs_reflink.h"
#include "xfs_file.h"
#include "xfs_aops.h"
#include "xfs_zone_alloc.h"
#include <linux/dax.h>
#include <linux/falloc.h>
#include <linux/backing-dev.h>
#include <linux/mman.h>
#include <linux/fadvise.h>
#include <linux/mount.h>
static const struct vm_operations_struct xfs_file_vm_ops;
/*
* Decide if the given file range is aligned to the size of the fundamental
* allocation unit for the file.
*/
bool
xfs_is_falloc_aligned(
struct xfs_inode *ip,
loff_t pos,
long long int len)
{
unsigned int alloc_unit = xfs_inode_alloc_unitsize(ip);
if (!is_power_of_2(alloc_unit))
return isaligned_64(pos, alloc_unit) &&
isaligned_64(len, alloc_unit);
return !((pos | len) & (alloc_unit - 1));
}
/*
* Fsync operations on directories are much simpler than on regular files,
* as there is no file data to flush, and thus also no need for explicit
* cache flush operations, and there are no non-transaction metadata updates
* on directories either.
*/
STATIC int
xfs_dir_fsync(
struct file *file,
loff_t start,
loff_t end,
int datasync)
{
struct xfs_inode *ip = XFS_I(file->f_mapping->host);
trace_xfs_dir_fsync(ip);
return xfs_log_force_inode(ip);
}
static xfs_csn_t
xfs_fsync_seq(
struct xfs_inode *ip,
bool datasync)
{
if (!xfs_ipincount(ip))
return 0;
if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
return 0;
return ip->i_itemp->ili_commit_seq;
}
/*
* All metadata updates are logged, which means that we just have to flush the
* log up to the latest LSN that touched the inode.
*
* If we have concurrent fsync/fdatasync() calls, we need them to all block on
* the log force before we clear the ili_fsync_fields field. This ensures that
* we don't get a racing sync operation that does not wait for the metadata to
* hit the journal before returning. If we race with clearing ili_fsync_fields,
* then all that will happen is the log force will do nothing as the lsn will
* already be on disk. We can't race with setting ili_fsync_fields because that
* is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
* shared until after the ili_fsync_fields is cleared.
*/
static int
xfs_fsync_flush_log(
struct xfs_inode *ip,
bool datasync,
int *log_flushed)
{
int error = 0;
xfs_csn_t seq;
xfs_ilock(ip, XFS_ILOCK_SHARED);
seq = xfs_fsync_seq(ip, datasync);
if (seq) {
error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
log_flushed);
spin_lock(&ip->i_itemp->ili_lock);
ip->i_itemp->ili_fsync_fields = 0;
spin_unlock(&ip->i_itemp->ili_lock);
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return error;
}
STATIC int
xfs_file_fsync(
struct file *file,
loff_t start,
loff_t end,
int datasync)
{
struct xfs_inode *ip = XFS_I(file->f_mapping->host);
struct xfs_mount *mp = ip->i_mount;
int error, err2;
int log_flushed = 0;
trace_xfs_file_fsync(ip);
error = file_write_and_wait_range(file, start, end);
if (error)
return error;
if (xfs_is_shutdown(mp))
return -EIO;
xfs_iflags_clear(ip, XFS_ITRUNCATED);
/*
* If we have an RT and/or log subvolume we need to make sure to flush
* the write cache the device used for file data first. This is to
* ensure newly written file data make it to disk before logging the new
* inode size in case of an extending write.
*/
if (XFS_IS_REALTIME_INODE(ip) && mp->m_rtdev_targp != mp->m_ddev_targp)
error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
else if (mp->m_logdev_targp != mp->m_ddev_targp)
error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
/*
* Any inode that has dirty modifications in the log is pinned. The
* racy check here for a pinned inode will not catch modifications
* that happen concurrently to the fsync call, but fsync semantics
* only require to sync previously completed I/O.
*/
if (xfs_ipincount(ip)) {
err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
if (err2 && !error)
error = err2;
}
/*
* If we only have a single device, and the log force about was
* a no-op we might have to flush the data device cache here.
* This can only happen for fdatasync/O_DSYNC if we were overwriting
* an already allocated file and thus do not have any metadata to
* commit.
*/
if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
mp->m_logdev_targp == mp->m_ddev_targp) {
err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
if (err2 && !error)
error = err2;
}
return error;
}
static int
xfs_ilock_iocb(
struct kiocb *iocb,
unsigned int lock_mode)
{
struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!xfs_ilock_nowait(ip, lock_mode))
return -EAGAIN;
} else {
xfs_ilock(ip, lock_mode);
}
return 0;
}
static int
xfs_ilock_iocb_for_write(
struct kiocb *iocb,
unsigned int *lock_mode)
{
ssize_t ret;
struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
ret = xfs_ilock_iocb(iocb, *lock_mode);
if (ret)
return ret;
/*
* If a reflink remap is in progress we always need to take the iolock
* exclusively to wait for it to finish.
*/
if (*lock_mode == XFS_IOLOCK_SHARED &&
xfs_iflags_test(ip, XFS_IREMAPPING)) {
xfs_iunlock(ip, *lock_mode);
*lock_mode = XFS_IOLOCK_EXCL;
return xfs_ilock_iocb(iocb, *lock_mode);
}
return 0;
}
STATIC ssize_t
xfs_file_dio_read(
struct kiocb *iocb,
struct iov_iter *to)
{
struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
ssize_t ret;
trace_xfs_file_direct_read(iocb, to);
if (!iov_iter_count(to))
return 0; /* skip atime */
file_accessed(iocb->ki_filp);
ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
if (ret)
return ret;
ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
}
static noinline ssize_t
xfs_file_dax_read(
struct kiocb *iocb,
struct iov_iter *to)
{
struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
ssize_t ret = 0;
trace_xfs_file_dax_read(iocb, to);
if (!iov_iter_count(to))
return 0; /* skip atime */
ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
if (ret)
return ret;
ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
file_accessed(iocb->ki_filp);
return ret;
}
STATIC ssize_t
xfs_file_buffered_read(
struct kiocb *iocb,
struct iov_iter *to)
{
struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
ssize_t ret;
trace_xfs_file_buffered_read(iocb, to);
ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
if (ret)
return ret;
ret = generic_file_read_iter(iocb, to);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
}
STATIC ssize_t
xfs_file_read_iter(
struct kiocb *iocb,
struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct xfs_mount *mp = XFS_I(inode)->i_mount;
ssize_t ret = 0;
XFS_STATS_INC(mp, xs_read_calls);
if (xfs_is_shutdown(mp))
return -EIO;
if (IS_DAX(inode))
ret = xfs_file_dax_read(iocb, to);
else if (iocb->ki_flags & IOCB_DIRECT)
ret = xfs_file_dio_read(iocb, to);
else
ret = xfs_file_buffered_read(iocb, to);
if (ret > 0)
XFS_STATS_ADD(mp, xs_read_bytes, ret);
return ret;
}
STATIC ssize_t
xfs_file_splice_read(
struct file *in,
loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
struct inode *inode = file_inode(in);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
ssize_t ret = 0;
XFS_STATS_INC(mp, xs_read_calls);
if (xfs_is_shutdown(mp))
return -EIO;
trace_xfs_file_splice_read(ip, *ppos, len);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
ret = filemap_splice_read(in, ppos, pipe, len, flags);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
if (ret > 0)
XFS_STATS_ADD(mp, xs_read_bytes, ret);
return ret;
}
/*
* Take care of zeroing post-EOF blocks when they might exist.
*
* Returns 0 if successfully, a negative error for a failure, or 1 if this
* function dropped the iolock and reacquired it exclusively and the caller
* needs to restart the write sanity checks.
*/
static ssize_t
xfs_file_write_zero_eof(
struct kiocb *iocb,
struct iov_iter *from,
unsigned int *iolock,
size_t count,
bool *drained_dio,
struct xfs_zone_alloc_ctx *ac)
{
struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
loff_t isize;
int error;
/*
* We need to serialise against EOF updates that occur in IO completions
* here. We want to make sure that nobody is changing the size while
* we do this check until we have placed an IO barrier (i.e. hold
* XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. The
* spinlock effectively forms a memory barrier once we have
* XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
* hence be able to correctly determine if we need to run zeroing.
*/
spin_lock(&ip->i_flags_lock);
isize = i_size_read(VFS_I(ip));
if (iocb->ki_pos <= isize) {
spin_unlock(&ip->i_flags_lock);
return 0;
}
spin_unlock(&ip->i_flags_lock);
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
if (!*drained_dio) {
/*
* If zeroing is needed and we are currently holding the iolock
* shared, we need to update it to exclusive which implies
* having to redo all checks before.
*/
if (*iolock == XFS_IOLOCK_SHARED) {
xfs_iunlock(ip, *iolock);
*iolock = XFS_IOLOCK_EXCL;
xfs_ilock(ip, *iolock);
iov_iter_reexpand(from, count);
}
/*
* We now have an IO submission barrier in place, but AIO can do
* EOF updates during IO completion and hence we now need to
* wait for all of them to drain. Non-AIO DIO will have drained
* before we are given the XFS_IOLOCK_EXCL, and so for most
* cases this wait is a no-op.
*/
inode_dio_wait(VFS_I(ip));
*drained_dio = true;
return 1;
}
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, ac, NULL);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
return error;
}
/*
* Common pre-write limit and setup checks.
*
* Called with the iolock held either shared and exclusive according to
* @iolock, and returns with it held. Might upgrade the iolock to exclusive
* if called for a direct write beyond i_size.
*/
STATIC ssize_t
xfs_file_write_checks(
struct kiocb *iocb,
struct iov_iter *from,
unsigned int *iolock,
struct xfs_zone_alloc_ctx *ac)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
size_t count = iov_iter_count(from);
bool drained_dio = false;
ssize_t error;
restart:
error = generic_write_checks(iocb, from);
if (error <= 0)
return error;
if (iocb->ki_flags & IOCB_NOWAIT) {
error = break_layout(inode, false);
if (error == -EWOULDBLOCK)
error = -EAGAIN;
} else {
error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
}
if (error)
return error;
/*
* For changing security info in file_remove_privs() we need i_rwsem
* exclusively.
*/
if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
xfs_iunlock(XFS_I(inode), *iolock);
*iolock = XFS_IOLOCK_EXCL;
error = xfs_ilock_iocb(iocb, *iolock);
if (error) {
*iolock = 0;
return error;
}
goto restart;
}
/*
* If the offset is beyond the size of the file, we need to zero all
* blocks that fall between the existing EOF and the start of this
* write.
*
* We can do an unlocked check for i_size here safely as I/O completion
* can only extend EOF. Truncate is locked out at this point, so the
* EOF can not move backwards, only forwards. Hence we only need to take
* the slow path when we are at or beyond the current EOF.
*/
if (iocb->ki_pos > i_size_read(inode)) {
error = xfs_file_write_zero_eof(iocb, from, iolock, count,
&drained_dio, ac);
if (error == 1)
goto restart;
if (error)
return error;
}
return kiocb_modified(iocb);
}
static ssize_t
xfs_zoned_write_space_reserve(
struct xfs_mount *mp,
struct kiocb *iocb,
struct iov_iter *from,
unsigned int flags,
struct xfs_zone_alloc_ctx *ac)
{
loff_t count = iov_iter_count(from);
int error;
if (iocb->ki_flags & IOCB_NOWAIT)
flags |= XFS_ZR_NOWAIT;
/*
* Check the rlimit and LFS boundary first so that we don't over-reserve
* by possibly a lot.
*
* The generic write path will redo this check later, and it might have
* changed by then. If it got expanded we'll stick to our earlier
* smaller limit, and if it is decreased the new smaller limit will be
* used and our extra space reservation will be returned after finishing
* the write.
*/
error = generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, &count);
if (error)
return error;
/*
* Sloppily round up count to file system blocks.
*
* This will often reserve an extra block, but that avoids having to look
* at the start offset, which isn't stable for O_APPEND until taking the
* iolock. Also we need to reserve a block each for zeroing the old
* EOF block and the new start block if they are unaligned.
*
* Any remaining block will be returned after the write.
*/
return xfs_zoned_space_reserve(mp, XFS_B_TO_FSB(mp, count) + 1 + 2,
flags, ac);
}
static int
xfs_dio_write_end_io(
struct kiocb *iocb,
ssize_t size,
int error,
unsigned flags)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct xfs_inode *ip = XFS_I(inode);
loff_t offset = iocb->ki_pos;
unsigned int nofs_flag;
ASSERT(!xfs_is_zoned_inode(ip) ||
!(flags & (IOMAP_DIO_UNWRITTEN | IOMAP_DIO_COW)));
trace_xfs_end_io_direct_write(ip, offset, size);
if (xfs_is_shutdown(ip->i_mount))
return -EIO;
if (error)
return error;
if (!size)
return 0;
/*
* Capture amount written on completion as we can't reliably account
* for it on submission.
*/
XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
/*
* We can allocate memory here while doing writeback on behalf of
* memory reclaim. To avoid memory allocation deadlocks set the
* task-wide nofs context for the following operations.
*/
nofs_flag = memalloc_nofs_save();
if (flags & IOMAP_DIO_COW) {
if (iocb->ki_flags & IOCB_ATOMIC)
error = xfs_reflink_end_atomic_cow(ip, offset, size);
else
error = xfs_reflink_end_cow(ip, offset, size);
if (error)
goto out;
}
/*
* Unwritten conversion updates the in-core isize after extent
* conversion but before updating the on-disk size. Updating isize any
* earlier allows a racing dio read to find unwritten extents before
* they are converted.
*/
if (flags & IOMAP_DIO_UNWRITTEN) {
error = xfs_iomap_write_unwritten(ip, offset, size, true);
goto out;
}
/*
* We need to update the in-core inode size here so that we don't end up
* with the on-disk inode size being outside the in-core inode size. We
* have no other method of updating EOF for AIO, so always do it here
* if necessary.
*
* We need to lock the test/set EOF update as we can be racing with
* other IO completions here to update the EOF. Failing to serialise
* here can result in EOF moving backwards and Bad Things Happen when
* that occurs.
*
* As IO completion only ever extends EOF, we can do an unlocked check
* here to avoid taking the spinlock. If we land within the current EOF,
* then we do not need to do an extending update at all, and we don't
* need to take the lock to check this. If we race with an update moving
* EOF, then we'll either still be beyond EOF and need to take the lock,
* or we'll be within EOF and we don't need to take it at all.
*/
if (offset + size <= i_size_read(inode))
goto out;
spin_lock(&ip->i_flags_lock);
if (offset + size > i_size_read(inode)) {
i_size_write(inode, offset + size);
spin_unlock(&ip->i_flags_lock);
error = xfs_setfilesize(ip, offset, size);
} else {
spin_unlock(&ip->i_flags_lock);
}
out:
memalloc_nofs_restore(nofs_flag);
return error;
}
static const struct iomap_dio_ops xfs_dio_write_ops = {
.end_io = xfs_dio_write_end_io,
};
static void
xfs_dio_zoned_submit_io(
const struct iomap_iter *iter,
struct bio *bio,
loff_t file_offset)
{
struct xfs_mount *mp = XFS_I(iter->inode)->i_mount;
struct xfs_zone_alloc_ctx *ac = iter->private;
xfs_filblks_t count_fsb;
struct iomap_ioend *ioend;
count_fsb = XFS_B_TO_FSB(mp, bio->bi_iter.bi_size);
if (count_fsb > ac->reserved_blocks) {
xfs_err(mp,
"allocation (%lld) larger than reservation (%lld).",
count_fsb, ac->reserved_blocks);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
bio_io_error(bio);
return;
}
ac->reserved_blocks -= count_fsb;
bio->bi_end_io = xfs_end_bio;
ioend = iomap_init_ioend(iter->inode, bio, file_offset,
IOMAP_IOEND_DIRECT);
xfs_zone_alloc_and_submit(ioend, &ac->open_zone);
}
static const struct iomap_dio_ops xfs_dio_zoned_write_ops = {
.bio_set = &iomap_ioend_bioset,
.submit_io = xfs_dio_zoned_submit_io,
.end_io = xfs_dio_write_end_io,
};
/*
* Handle block aligned direct I/O writes.
*/
static noinline ssize_t
xfs_file_dio_write_aligned(
struct xfs_inode *ip,
struct kiocb *iocb,
struct iov_iter *from,
const struct iomap_ops *ops,
const struct iomap_dio_ops *dops,
struct xfs_zone_alloc_ctx *ac)
{
unsigned int iolock = XFS_IOLOCK_SHARED;
ssize_t ret;
ret = xfs_ilock_iocb_for_write(iocb, &iolock);
if (ret)
return ret;
ret = xfs_file_write_checks(iocb, from, &iolock, ac);
if (ret)
goto out_unlock;
/*
* We don't need to hold the IOLOCK exclusively across the IO, so demote
* the iolock back to shared if we had to take the exclusive lock in
* xfs_file_write_checks() for other reasons.
*/
if (iolock == XFS_IOLOCK_EXCL) {
xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
iolock = XFS_IOLOCK_SHARED;
}
trace_xfs_file_direct_write(iocb, from);
ret = iomap_dio_rw(iocb, from, ops, dops, 0, ac, 0);
out_unlock:
xfs_iunlock(ip, iolock);
return ret;
}
/*
* Handle block aligned direct I/O writes to zoned devices.
*/
static noinline ssize_t
xfs_file_dio_write_zoned(
struct xfs_inode *ip,
struct kiocb *iocb,
struct iov_iter *from)
{
struct xfs_zone_alloc_ctx ac = { };
ssize_t ret;
ret = xfs_zoned_write_space_reserve(ip->i_mount, iocb, from, 0, &ac);
if (ret < 0)
return ret;
ret = xfs_file_dio_write_aligned(ip, iocb, from,
&xfs_zoned_direct_write_iomap_ops,
&xfs_dio_zoned_write_ops, &ac);
xfs_zoned_space_unreserve(ip->i_mount, &ac);
return ret;
}
/*
* Handle block atomic writes
*
* Two methods of atomic writes are supported:
* - REQ_ATOMIC-based, which would typically use some form of HW offload in the
* disk
* - COW-based, which uses a COW fork as a staging extent for data updates
* before atomically updating extent mappings for the range being written
*
*/
static noinline ssize_t
xfs_file_dio_write_atomic(
struct xfs_inode *ip,
struct kiocb *iocb,
struct iov_iter *from)
{
unsigned int iolock = XFS_IOLOCK_SHARED;
ssize_t ret, ocount = iov_iter_count(from);
const struct iomap_ops *dops;
/*
* HW offload should be faster, so try that first if it is already
* known that the write length is not too large.
*/
if (ocount > xfs_inode_buftarg(ip)->bt_awu_max)
dops = &xfs_atomic_write_cow_iomap_ops;
else
dops = &xfs_direct_write_iomap_ops;
retry:
ret = xfs_ilock_iocb_for_write(iocb, &iolock);
if (ret)
return ret;
ret = xfs_file_write_checks(iocb, from, &iolock, NULL);
if (ret)
goto out_unlock;
/* Demote similar to xfs_file_dio_write_aligned() */
if (iolock == XFS_IOLOCK_EXCL) {
xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
iolock = XFS_IOLOCK_SHARED;
}
trace_xfs_file_direct_write(iocb, from);
ret = iomap_dio_rw(iocb, from, dops, &xfs_dio_write_ops,
0, NULL, 0);
/*
* The retry mechanism is based on the ->iomap_begin method returning
* -ENOPROTOOPT, which would be when the REQ_ATOMIC-based write is not
* possible. The REQ_ATOMIC-based method typically not be possible if
* the write spans multiple extents or the disk blocks are misaligned.
*/
if (ret == -ENOPROTOOPT && dops == &xfs_direct_write_iomap_ops) {
xfs_iunlock(ip, iolock);
dops = &xfs_atomic_write_cow_iomap_ops;
goto retry;
}
out_unlock:
if (iolock)
xfs_iunlock(ip, iolock);
return ret;
}
/*
* Handle block unaligned direct I/O writes
*
* In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
* them to be done in parallel with reads and other direct I/O writes. However,
* if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
* to do sub-block zeroing and that requires serialisation against other direct
* I/O to the same block. In this case we need to serialise the submission of
* the unaligned I/O so that we don't get racing block zeroing in the dio layer.
* In the case where sub-block zeroing is not required, we can do concurrent
* sub-block dios to the same block successfully.
*
* Optimistically submit the I/O using the shared lock first, but use the
* IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
* if block allocation or partial block zeroing would be required. In that case
* we try again with the exclusive lock.
*/
static noinline ssize_t
xfs_file_dio_write_unaligned(
struct xfs_inode *ip,
struct kiocb *iocb,
struct iov_iter *from)
{
size_t isize = i_size_read(VFS_I(ip));
size_t count = iov_iter_count(from);
unsigned int iolock = XFS_IOLOCK_SHARED;
unsigned int flags = IOMAP_DIO_OVERWRITE_ONLY;
ssize_t ret;
/*
* Extending writes need exclusivity because of the sub-block zeroing
* that the DIO code always does for partial tail blocks beyond EOF, so
* don't even bother trying the fast path in this case.
*/
if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
retry_exclusive:
iolock = XFS_IOLOCK_EXCL;
flags = IOMAP_DIO_FORCE_WAIT;
}
ret = xfs_ilock_iocb_for_write(iocb, &iolock);
if (ret)
return ret;
/*
* We can't properly handle unaligned direct I/O to reflink files yet,
* as we can't unshare a partial block.
*/
if (xfs_is_cow_inode(ip)) {
trace_xfs_reflink_bounce_dio_write(iocb, from);
ret = -ENOTBLK;
goto out_unlock;
}
ret = xfs_file_write_checks(iocb, from, &iolock, NULL);
if (ret)
goto out_unlock;
/*
* If we are doing exclusive unaligned I/O, this must be the only I/O
* in-flight. Otherwise we risk data corruption due to unwritten extent
* conversions from the AIO end_io handler. Wait for all other I/O to
* drain first.
*/
if (flags & IOMAP_DIO_FORCE_WAIT)
inode_dio_wait(VFS_I(ip));
trace_xfs_file_direct_write(iocb, from);
ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
&xfs_dio_write_ops, flags, NULL, 0);
/*
* Retry unaligned I/O with exclusive blocking semantics if the DIO
* layer rejected it for mapping or locking reasons. If we are doing
* nonblocking user I/O, propagate the error.
*/
if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
xfs_iunlock(ip, iolock);
goto retry_exclusive;
}
out_unlock:
if (iolock)
xfs_iunlock(ip, iolock);
return ret;
}
static ssize_t
xfs_file_dio_write(
struct kiocb *iocb,
struct iov_iter *from)
{
struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
size_t count = iov_iter_count(from);
/* direct I/O must be aligned to device logical sector size */
if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
return -EINVAL;
/*
* For always COW inodes we also must check the alignment of each
* individual iovec segment, as they could end up with different
* I/Os due to the way bio_iov_iter_get_pages works, and we'd
* then overwrite an already written block.
*/
if (((iocb->ki_pos | count) & ip->i_mount->m_blockmask) ||
(xfs_is_always_cow_inode(ip) &&
(iov_iter_alignment(from) & ip->i_mount->m_blockmask)))
return xfs_file_dio_write_unaligned(ip, iocb, from);
if (xfs_is_zoned_inode(ip))
return xfs_file_dio_write_zoned(ip, iocb, from);
if (iocb->ki_flags & IOCB_ATOMIC)
return xfs_file_dio_write_atomic(ip, iocb, from);
return xfs_file_dio_write_aligned(ip, iocb, from,
&xfs_direct_write_iomap_ops, &xfs_dio_write_ops, NULL);
}
static noinline ssize_t
xfs_file_dax_write(
struct kiocb *iocb,
struct iov_iter *from)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
unsigned int iolock = XFS_IOLOCK_EXCL;
ssize_t ret, error = 0;
loff_t pos;
ret = xfs_ilock_iocb(iocb, iolock);
if (ret)
return ret;
ret = xfs_file_write_checks(iocb, from, &iolock, NULL);
if (ret)
goto out;
pos = iocb->ki_pos;
trace_xfs_file_dax_write(iocb, from);
ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos);
error = xfs_setfilesize(ip, pos, ret);
}
out:
if (iolock)
xfs_iunlock(ip, iolock);
if (error)
return error;
if (ret > 0) {
XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
/* Handle various SYNC-type writes */
ret = generic_write_sync(iocb, ret);
}
return ret;
}
STATIC ssize_t
xfs_file_buffered_write(
struct kiocb *iocb,
struct iov_iter *from)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
ssize_t ret;
bool cleared_space = false;
unsigned int iolock;
write_retry:
iolock = XFS_IOLOCK_EXCL;
ret = xfs_ilock_iocb(iocb, iolock);
if (ret)
return ret;
ret = xfs_file_write_checks(iocb, from, &iolock, NULL);
if (ret)
goto out;
trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
NULL);
/*
* If we hit a space limit, try to free up some lingering preallocated
* space before returning an error. In the case of ENOSPC, first try to
* write back all dirty inodes to free up some of the excess reserved
* metadata space. This reduces the chances that the eofblocks scan
* waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
* also behaves as a filter to prevent too many eofblocks scans from
* running at the same time. Use a synchronous scan to increase the
* effectiveness of the scan.
*/
if (ret == -EDQUOT && !cleared_space) {
xfs_iunlock(ip, iolock);
xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
cleared_space = true;
goto write_retry;
} else if (ret == -ENOSPC && !cleared_space) {
struct xfs_icwalk icw = {0};
cleared_space = true;
xfs_flush_inodes(ip->i_mount);
xfs_iunlock(ip, iolock);
icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
xfs_blockgc_free_space(ip->i_mount, &icw);
goto write_retry;
}
out:
if (iolock)
xfs_iunlock(ip, iolock);
if (ret > 0) {
XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
/* Handle various SYNC-type writes */
ret = generic_write_sync(iocb, ret);
}
return ret;
}
STATIC ssize_t
xfs_file_buffered_write_zoned(
struct kiocb *iocb,
struct iov_iter *from)
{
struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
struct xfs_mount *mp = ip->i_mount;
unsigned int iolock = XFS_IOLOCK_EXCL;
bool cleared_space = false;
struct xfs_zone_alloc_ctx ac = { };
ssize_t ret;
ret = xfs_zoned_write_space_reserve(mp, iocb, from, XFS_ZR_GREEDY, &ac);
if (ret < 0)
return ret;
ret = xfs_ilock_iocb(iocb, iolock);
if (ret)
goto out_unreserve;
ret = xfs_file_write_checks(iocb, from, &iolock, &ac);
if (ret)
goto out_unlock;
/*
* Truncate the iter to the length that we were actually able to
* allocate blocks for. This needs to happen after
* xfs_file_write_checks, because that assigns ki_pos for O_APPEND
* writes.
*/
iov_iter_truncate(from,
XFS_FSB_TO_B(mp, ac.reserved_blocks) -
(iocb->ki_pos & mp->m_blockmask));
if (!iov_iter_count(from))
goto out_unlock;
retry:
trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
&ac);
if (ret == -ENOSPC && !cleared_space) {
/*
* Kick off writeback to convert delalloc space and release the
* usually too pessimistic indirect block reservations.
*/
xfs_flush_inodes(mp);
cleared_space = true;
goto retry;
}
out_unlock:
xfs_iunlock(ip, iolock);
out_unreserve:
xfs_zoned_space_unreserve(ip->i_mount, &ac);
if (ret > 0) {
XFS_STATS_ADD(mp, xs_write_bytes, ret);
ret = generic_write_sync(iocb, ret);
}
return ret;
}
STATIC ssize_t
xfs_file_write_iter(
struct kiocb *iocb,
struct iov_iter *from)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
ssize_t ret;
size_t ocount = iov_iter_count(from);
XFS_STATS_INC(ip->i_mount, xs_write_calls);
if (ocount == 0)
return 0;
if (xfs_is_shutdown(ip->i_mount))
return -EIO;
if (IS_DAX(inode))
return xfs_file_dax_write(iocb, from);
if (iocb->ki_flags & IOCB_ATOMIC) {
if (ocount < xfs_get_atomic_write_min(ip))
return -EINVAL;
if (ocount > xfs_get_atomic_write_max(ip))
return -EINVAL;
ret = generic_atomic_write_valid(iocb, from);
if (ret)
return ret;
}
if (iocb->ki_flags & IOCB_DIRECT) {
/*
* Allow a directio write to fall back to a buffered
* write *only* in the case that we're doing a reflink
* CoW. In all other directio scenarios we do not
* allow an operation to fall back to buffered mode.
*/
ret = xfs_file_dio_write(iocb, from);
if (ret != -ENOTBLK)
return ret;
}
if (xfs_is_zoned_inode(ip))
return xfs_file_buffered_write_zoned(iocb, from);
return xfs_file_buffered_write(iocb, from);
}
/* Does this file, inode, or mount want synchronous writes? */
static inline bool xfs_file_sync_writes(struct file *filp)
{
struct xfs_inode *ip = XFS_I(file_inode(filp));
if (xfs_has_wsync(ip->i_mount))
return true;
if (filp->f_flags & (__O_SYNC | O_DSYNC))
return true;
if (IS_SYNC(file_inode(filp)))
return true;
return false;
}
static int
xfs_falloc_newsize(
struct file *file,
int mode,
loff_t offset,
loff_t len,
loff_t *new_size)
{
struct inode *inode = file_inode(file);
if ((mode & FALLOC_FL_KEEP_SIZE) || offset + len <= i_size_read(inode))
return 0;
*new_size = offset + len;
return inode_newsize_ok(inode, *new_size);
}
static int
xfs_falloc_setsize(
struct file *file,
loff_t new_size)
{
struct iattr iattr = {
.ia_valid = ATTR_SIZE,
.ia_size = new_size,
};
if (!new_size)
return 0;
return xfs_vn_setattr_size(file_mnt_idmap(file), file_dentry(file),
&iattr);
}
static int
xfs_falloc_collapse_range(
struct file *file,
loff_t offset,
loff_t len,
struct xfs_zone_alloc_ctx *ac)
{
struct inode *inode = file_inode(file);
loff_t new_size = i_size_read(inode) - len;
int error;
if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
return -EINVAL;
/*
* There is no need to overlap collapse range with EOF, in which case it
* is effectively a truncate operation
*/
if (offset + len >= i_size_read(inode))
return -EINVAL;
error = xfs_collapse_file_space(XFS_I(inode), offset, len, ac);
if (error)
return error;
return xfs_falloc_setsize(file, new_size);
}
static int
xfs_falloc_insert_range(
struct file *file,
loff_t offset,
loff_t len)
{
struct inode *inode = file_inode(file);
loff_t isize = i_size_read(inode);
int error;
if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
return -EINVAL;
/*
* New inode size must not exceed ->s_maxbytes, accounting for
* possible signed overflow.
*/
if (inode->i_sb->s_maxbytes - isize < len)
return -EFBIG;
/* Offset should be less than i_size */
if (offset >= isize)
return -EINVAL;
error = xfs_falloc_setsize(file, isize + len);
if (error)
return error;
/*
* Perform hole insertion now that the file size has been updated so
* that if we crash during the operation we don't leave shifted extents
* past EOF and hence losing access to the data that is contained within
* them.
*/
return xfs_insert_file_space(XFS_I(inode), offset, len);
}
/*
* Punch a hole and prealloc the range. We use a hole punch rather than
* unwritten extent conversion for two reasons:
*
* 1.) Hole punch handles partial block zeroing for us.
* 2.) If prealloc returns ENOSPC, the file range is still zero-valued by
* virtue of the hole punch.
*/
static int
xfs_falloc_zero_range(
struct file *file,
int mode,
loff_t offset,
loff_t len,
struct xfs_zone_alloc_ctx *ac)
{
struct inode *inode = file_inode(file);
unsigned int blksize = i_blocksize(inode);
loff_t new_size = 0;
int error;
trace_xfs_zero_file_space(XFS_I(inode));
error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
if (error)
return error;
error = xfs_free_file_space(XFS_I(inode), offset, len, ac);
if (error)
return error;
len = round_up(offset + len, blksize) - round_down(offset, blksize);
offset = round_down(offset, blksize);
error = xfs_alloc_file_space(XFS_I(inode), offset, len);
if (error)
return error;
return xfs_falloc_setsize(file, new_size);
}
static int
xfs_falloc_unshare_range(
struct file *file,
int mode,
loff_t offset,
loff_t len)
{
struct inode *inode = file_inode(file);
loff_t new_size = 0;
int error;
error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
if (error)
return error;
error = xfs_reflink_unshare(XFS_I(inode), offset, len);
if (error)
return error;
error = xfs_alloc_file_space(XFS_I(inode), offset, len);
if (error)
return error;
return xfs_falloc_setsize(file, new_size);
}
static int
xfs_falloc_allocate_range(
struct file *file,
int mode,
loff_t offset,
loff_t len)
{
struct inode *inode = file_inode(file);
loff_t new_size = 0;
int error;
/*
* If always_cow mode we can't use preallocations and thus should not
* create them.
*/
if (xfs_is_always_cow_inode(XFS_I(inode)))
return -EOPNOTSUPP;
error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
if (error)
return error;
error = xfs_alloc_file_space(XFS_I(inode), offset, len);
if (error)
return error;
return xfs_falloc_setsize(file, new_size);
}
#define XFS_FALLOC_FL_SUPPORTED \
(FALLOC_FL_ALLOCATE_RANGE | FALLOC_FL_KEEP_SIZE | \
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | \
FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE | \
FALLOC_FL_UNSHARE_RANGE)
STATIC long
__xfs_file_fallocate(
struct file *file,
int mode,
loff_t offset,
loff_t len,
struct xfs_zone_alloc_ctx *ac)
{
struct inode *inode = file_inode(file);
struct xfs_inode *ip = XFS_I(inode);
long error;
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_ilock(ip, iolock);
error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
if (error)
goto out_unlock;
/*
* Must wait for all AIO to complete before we continue as AIO can
* change the file size on completion without holding any locks we
* currently hold. We must do this first because AIO can update both
* the on disk and in memory inode sizes, and the operations that follow
* require the in-memory size to be fully up-to-date.
*/
inode_dio_wait(inode);
error = file_modified(file);
if (error)
goto out_unlock;
switch (mode & FALLOC_FL_MODE_MASK) {
case FALLOC_FL_PUNCH_HOLE:
error = xfs_free_file_space(ip, offset, len, ac);
break;
case FALLOC_FL_COLLAPSE_RANGE:
error = xfs_falloc_collapse_range(file, offset, len, ac);
break;
case FALLOC_FL_INSERT_RANGE:
error = xfs_falloc_insert_range(file, offset, len);
break;
case FALLOC_FL_ZERO_RANGE:
error = xfs_falloc_zero_range(file, mode, offset, len, ac);
break;
case FALLOC_FL_UNSHARE_RANGE:
error = xfs_falloc_unshare_range(file, mode, offset, len);
break;
case FALLOC_FL_ALLOCATE_RANGE:
error = xfs_falloc_allocate_range(file, mode, offset, len);
break;
default:
error = -EOPNOTSUPP;
break;
}
if (!error && xfs_file_sync_writes(file))
error = xfs_log_force_inode(ip);
out_unlock:
xfs_iunlock(ip, iolock);
return error;
}
static long
xfs_file_zoned_fallocate(
struct file *file,
int mode,
loff_t offset,
loff_t len)
{
struct xfs_zone_alloc_ctx ac = { };
struct xfs_inode *ip = XFS_I(file_inode(file));
int error;
error = xfs_zoned_space_reserve(ip->i_mount, 2, XFS_ZR_RESERVED, &ac);
if (error)
return error;
error = __xfs_file_fallocate(file, mode, offset, len, &ac);
xfs_zoned_space_unreserve(ip->i_mount, &ac);
return error;
}
static long
xfs_file_fallocate(
struct file *file,
int mode,
loff_t offset,
loff_t len)
{
struct inode *inode = file_inode(file);
if (!S_ISREG(inode->i_mode))
return -EINVAL;
if (mode & ~XFS_FALLOC_FL_SUPPORTED)
return -EOPNOTSUPP;
/*
* For zoned file systems, zeroing the first and last block of a hole
* punch requires allocating a new block to rewrite the remaining data
* and new zeroes out of place. Get a reservations for those before
* taking the iolock. Dip into the reserved pool because we are
* expected to be able to punch a hole even on a completely full
* file system.
*/
if (xfs_is_zoned_inode(XFS_I(inode)) &&
(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
FALLOC_FL_COLLAPSE_RANGE)))
return xfs_file_zoned_fallocate(file, mode, offset, len);
return __xfs_file_fallocate(file, mode, offset, len, NULL);
}
STATIC int
xfs_file_fadvise(
struct file *file,
loff_t start,
loff_t end,
int advice)
{
struct xfs_inode *ip = XFS_I(file_inode(file));
int ret;
int lockflags = 0;
/*
* Operations creating pages in page cache need protection from hole
* punching and similar ops
*/
if (advice == POSIX_FADV_WILLNEED) {
lockflags = XFS_IOLOCK_SHARED;
xfs_ilock(ip, lockflags);
}
ret = generic_fadvise(file, start, end, advice);
if (lockflags)
xfs_iunlock(ip, lockflags);
return ret;
}
STATIC loff_t
xfs_file_remap_range(
struct file *file_in,
loff_t pos_in,
struct file *file_out,
loff_t pos_out,
loff_t len,
unsigned int remap_flags)
{
struct inode *inode_in = file_inode(file_in);
struct xfs_inode *src = XFS_I(inode_in);
struct inode *inode_out = file_inode(file_out);
struct xfs_inode *dest = XFS_I(inode_out);
struct xfs_mount *mp = src->i_mount;
loff_t remapped = 0;
xfs_extlen_t cowextsize;
int ret;
if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
return -EINVAL;
if (!xfs_has_reflink(mp))
return -EOPNOTSUPP;
if (xfs_is_shutdown(mp))
return -EIO;
/* Prepare and then clone file data. */
ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
&len, remap_flags);
if (ret || len == 0)
return ret;
trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
&remapped);
if (ret)
goto out_unlock;
/*
* Carry the cowextsize hint from src to dest if we're sharing the
* entire source file to the entire destination file, the source file
* has a cowextsize hint, and the destination file does not.
*/
cowextsize = 0;
if (pos_in == 0 && len == i_size_read(inode_in) &&
(src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
pos_out == 0 && len >= i_size_read(inode_out) &&
!(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
cowextsize = src->i_cowextsize;
ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
remap_flags);
if (ret)
goto out_unlock;
if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
xfs_log_force_inode(dest);
out_unlock:
xfs_iunlock2_remapping(src, dest);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
/*
* If the caller did not set CAN_SHORTEN, then it is not prepared to
* handle partial results -- either the whole remap succeeds, or we
* must say why it did not. In this case, any error should be returned
* to the caller.
*/
if (ret && remapped < len && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
return ret;
return remapped > 0 ? remapped : ret;
}
STATIC int
xfs_file_open(
struct inode *inode,
struct file *file)
{
if (xfs_is_shutdown(XFS_M(inode->i_sb)))
return -EIO;
file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
if (xfs_get_atomic_write_min(XFS_I(inode)) > 0)
file->f_mode |= FMODE_CAN_ATOMIC_WRITE;
return generic_file_open(inode, file);
}
STATIC int
xfs_dir_open(
struct inode *inode,
struct file *file)
{
struct xfs_inode *ip = XFS_I(inode);
unsigned int mode;
int error;
if (xfs_is_shutdown(ip->i_mount))
return -EIO;
error = generic_file_open(inode, file);
if (error)
return error;
/*
* If there are any blocks, read-ahead block 0 as we're almost
* certain to have the next operation be a read there.
*/
mode = xfs_ilock_data_map_shared(ip);
if (ip->i_df.if_nextents > 0)
error = xfs_dir3_data_readahead(ip, 0, 0);
xfs_iunlock(ip, mode);
return error;
}
/*
* Don't bother propagating errors. We're just doing cleanup, and the caller
* ignores the return value anyway.
*/
STATIC int
xfs_file_release(
struct inode *inode,
struct file *file)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
/*
* If this is a read-only mount or the file system has been shut down,
* don't generate I/O.
*/
if (xfs_is_readonly(mp) || xfs_is_shutdown(mp))
return 0;
/*
* If we previously truncated this file and removed old data in the
* process, we want to initiate "early" writeout on the last close.
* This is an attempt to combat the notorious NULL files problem which
* is particularly noticeable from a truncate down, buffered (re-)write
* (delalloc), followed by a crash. What we are effectively doing here
* is significantly reducing the time window where we'd otherwise be
* exposed to that problem.
*/
if (xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED)) {
xfs_iflags_clear(ip, XFS_EOFBLOCKS_RELEASED);
if (ip->i_delayed_blks > 0)
filemap_flush(inode->i_mapping);
}
/*
* XFS aggressively preallocates post-EOF space to generate contiguous
* allocations for writers that append to the end of the file.
*
* To support workloads that close and reopen the file frequently, these
* preallocations usually persist after a close unless it is the first
* close for the inode. This is a tradeoff to generate tightly packed
* data layouts for unpacking tarballs or similar archives that write
* one file after another without going back to it while keeping the
* preallocation for files that have recurring open/write/close cycles.
*
* This heuristic is skipped for inodes with the append-only flag as
* that flag is rather pointless for inodes written only once.
*
* There is no point in freeing blocks here for open but unlinked files
* as they will be taken care of by the inactivation path soon.
*
* When releasing a read-only context, don't flush data or trim post-EOF
* blocks. This avoids open/read/close workloads from removing EOF
* blocks that other writers depend upon to reduce fragmentation.
*
* Inodes on the zoned RT device never have preallocations, so skip
* taking the locks below.
*/
if (!inode->i_nlink ||
!(file->f_mode & FMODE_WRITE) ||
(ip->i_diflags & XFS_DIFLAG_APPEND) ||
xfs_is_zoned_inode(ip))
return 0;
/*
* If we can't get the iolock just skip truncating the blocks past EOF
* because we could deadlock with the mmap_lock otherwise. We'll get
* another chance to drop them once the last reference to the inode is
* dropped, so we'll never leak blocks permanently.
*/
if (!xfs_iflags_test(ip, XFS_EOFBLOCKS_RELEASED) &&
xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
if (xfs_can_free_eofblocks(ip) &&
!xfs_iflags_test_and_set(ip, XFS_EOFBLOCKS_RELEASED))
xfs_free_eofblocks(ip);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
}
return 0;
}
STATIC int
xfs_file_readdir(
struct file *file,
struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
xfs_inode_t *ip = XFS_I(inode);
size_t bufsize;
/*
* The Linux API doesn't pass down the total size of the buffer
* we read into down to the filesystem. With the filldir concept
* it's not needed for correct information, but the XFS dir2 leaf
* code wants an estimate of the buffer size to calculate it's
* readahead window and size the buffers used for mapping to
* physical blocks.
*
* Try to give it an estimate that's good enough, maybe at some
* point we can change the ->readdir prototype to include the
* buffer size. For now we use the current glibc buffer size.
*/
bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
return xfs_readdir(NULL, ip, ctx, bufsize);
}
STATIC loff_t
xfs_file_llseek(
struct file *file,
loff_t offset,
int whence)
{
struct inode *inode = file->f_mapping->host;
if (xfs_is_shutdown(XFS_I(inode)->i_mount))
return -EIO;
switch (whence) {
default:
return generic_file_llseek(file, offset, whence);
case SEEK_HOLE:
offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
break;
case SEEK_DATA:
offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
break;
}
if (offset < 0)
return offset;
return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
}
static inline vm_fault_t
xfs_dax_fault_locked(
struct vm_fault *vmf,
unsigned int order,
bool write_fault)
{
vm_fault_t ret;
unsigned long pfn;
if (!IS_ENABLED(CONFIG_FS_DAX)) {
ASSERT(0);
return VM_FAULT_SIGBUS;
}
ret = dax_iomap_fault(vmf, order, &pfn, NULL,
(write_fault && !vmf->cow_page) ?
&xfs_dax_write_iomap_ops :
&xfs_read_iomap_ops);
if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, order, pfn);
return ret;
}
static vm_fault_t
xfs_dax_read_fault(
struct vm_fault *vmf,
unsigned int order)
{
struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file));
vm_fault_t ret;
trace_xfs_read_fault(ip, order);
xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
ret = xfs_dax_fault_locked(vmf, order, false);
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
return ret;
}
/*
* Locking for serialisation of IO during page faults. This results in a lock
* ordering of:
*
* mmap_lock (MM)
* sb_start_pagefault(vfs, freeze)
* invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
* page_lock (MM)
* i_lock (XFS - extent map serialisation)
*/
static vm_fault_t
__xfs_write_fault(
struct vm_fault *vmf,
unsigned int order,
struct xfs_zone_alloc_ctx *ac)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
struct xfs_inode *ip = XFS_I(inode);
unsigned int lock_mode = XFS_MMAPLOCK_SHARED;
vm_fault_t ret;
trace_xfs_write_fault(ip, order);
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
/*
* Normally we only need the shared mmaplock, but if a reflink remap is
* in progress we take the exclusive lock to wait for the remap to
* finish before taking a write fault.
*/
xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
if (xfs_iflags_test(ip, XFS_IREMAPPING)) {
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
lock_mode = XFS_MMAPLOCK_EXCL;
}
if (IS_DAX(inode))
ret = xfs_dax_fault_locked(vmf, order, true);
else
ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops,
ac);
xfs_iunlock(ip, lock_mode);
sb_end_pagefault(inode->i_sb);
return ret;
}
static vm_fault_t
xfs_write_fault_zoned(
struct vm_fault *vmf,
unsigned int order)
{
struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file));
unsigned int len = folio_size(page_folio(vmf->page));
struct xfs_zone_alloc_ctx ac = { };
int error;
vm_fault_t ret;
/*
* This could over-allocate as it doesn't check for truncation.
*
* But as the overallocation is limited to less than a folio and will be
* release instantly that's just fine.
*/
error = xfs_zoned_space_reserve(ip->i_mount,
XFS_B_TO_FSB(ip->i_mount, len), 0, &ac);
if (error < 0)
return vmf_fs_error(error);
ret = __xfs_write_fault(vmf, order, &ac);
xfs_zoned_space_unreserve(ip->i_mount, &ac);
return ret;
}
static vm_fault_t
xfs_write_fault(
struct vm_fault *vmf,
unsigned int order)
{
if (xfs_is_zoned_inode(XFS_I(file_inode(vmf->vma->vm_file))))
return xfs_write_fault_zoned(vmf, order);
return __xfs_write_fault(vmf, order, NULL);
}
static inline bool
xfs_is_write_fault(
struct vm_fault *vmf)
{
return (vmf->flags & FAULT_FLAG_WRITE) &&
(vmf->vma->vm_flags & VM_SHARED);
}
static vm_fault_t
xfs_filemap_fault(
struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
/* DAX can shortcut the normal fault path on write faults! */
if (IS_DAX(inode)) {
if (xfs_is_write_fault(vmf))
return xfs_write_fault(vmf, 0);
return xfs_dax_read_fault(vmf, 0);
}
trace_xfs_read_fault(XFS_I(inode), 0);
return filemap_fault(vmf);
}
static vm_fault_t
xfs_filemap_huge_fault(
struct vm_fault *vmf,
unsigned int order)
{
if (!IS_DAX(file_inode(vmf->vma->vm_file)))
return VM_FAULT_FALLBACK;
/* DAX can shortcut the normal fault path on write faults! */
if (xfs_is_write_fault(vmf))
return xfs_write_fault(vmf, order);
return xfs_dax_read_fault(vmf, order);
}
static vm_fault_t
xfs_filemap_page_mkwrite(
struct vm_fault *vmf)
{
return xfs_write_fault(vmf, 0);
}
/*
* pfn_mkwrite was originally intended to ensure we capture time stamp updates
* on write faults. In reality, it needs to serialise against truncate and
* prepare memory for writing so handle is as standard write fault.
*/
static vm_fault_t
xfs_filemap_pfn_mkwrite(
struct vm_fault *vmf)
{
return xfs_write_fault(vmf, 0);
}
static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = xfs_filemap_fault,
.huge_fault = xfs_filemap_huge_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = xfs_filemap_page_mkwrite,
.pfn_mkwrite = xfs_filemap_pfn_mkwrite,
};
STATIC int
xfs_file_mmap_prepare(
struct vm_area_desc *desc)
{
struct file *file = desc->file;
struct inode *inode = file_inode(file);
struct xfs_buftarg *target = xfs_inode_buftarg(XFS_I(inode));
/*
* We don't support synchronous mappings for non-DAX files and
* for DAX files if underneath dax_device is not synchronous.
*/
if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file),
target->bt_daxdev))
return -EOPNOTSUPP;
file_accessed(file);
desc->vm_ops = &xfs_file_vm_ops;
if (IS_DAX(inode))
desc->vm_flags |= VM_HUGEPAGE;
return 0;
}
const struct file_operations xfs_file_operations = {
.llseek = xfs_file_llseek,
.read_iter = xfs_file_read_iter,
.write_iter = xfs_file_write_iter,
.splice_read = xfs_file_splice_read,
.splice_write = iter_file_splice_write,
.iopoll = iocb_bio_iopoll,
.unlocked_ioctl = xfs_file_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = xfs_file_compat_ioctl,
#endif
.mmap_prepare = xfs_file_mmap_prepare,
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
.get_unmapped_area = thp_get_unmapped_area,
.fallocate = xfs_file_fallocate,
.fadvise = xfs_file_fadvise,
.remap_file_range = xfs_file_remap_range,
.fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
FOP_BUFFER_WASYNC | FOP_DIO_PARALLEL_WRITE |
FOP_DONTCACHE,
};
const struct file_operations xfs_dir_file_operations = {
.open = xfs_dir_open,
.read = generic_read_dir,
.iterate_shared = xfs_file_readdir,
.llseek = generic_file_llseek,
.unlocked_ioctl = xfs_file_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = xfs_file_compat_ioctl,
#endif
.fsync = xfs_dir_fsync,
};