2021-08-13 17:21:29 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
|
|
|
|
*
|
2021-08-03 14:57:09 +03:00
|
|
|
* Regular file handling primitives for NTFS-based filesystems.
|
|
|
|
*
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
2021-08-03 14:57:09 +03:00
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
#include <linux/backing-dev.h>
|
2021-09-20 14:33:14 +02:00
|
|
|
#include <linux/blkdev.h>
|
2021-08-13 17:21:29 +03:00
|
|
|
#include <linux/buffer_head.h>
|
|
|
|
#include <linux/compat.h>
|
|
|
|
#include <linux/falloc.h>
|
|
|
|
#include <linux/fiemap.h>
|
2024-06-18 17:31:21 +03:00
|
|
|
#include <linux/fileattr.h>
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
#include "debug.h"
|
|
|
|
#include "ntfs.h"
|
|
|
|
#include "ntfs_fs.h"
|
|
|
|
|
|
|
|
static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct fstrim_range __user *user_range;
|
|
|
|
struct fstrim_range range;
|
2023-01-17 15:01:00 +04:00
|
|
|
struct block_device *dev;
|
2021-08-13 17:21:29 +03:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2023-01-17 15:01:00 +04:00
|
|
|
dev = sbi->sb->s_bdev;
|
|
|
|
if (!bdev_max_discard_sectors(dev))
|
2021-08-13 17:21:29 +03:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
user_range = (struct fstrim_range __user *)arg;
|
|
|
|
if (copy_from_user(&range, user_range, sizeof(range)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2023-01-17 15:01:00 +04:00
|
|
|
range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
err = ntfs_trim_fs(sbi, &range);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (copy_to_user(user_range, &range, sizeof(range)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-06-26 15:26:18 +03:00
|
|
|
/*
|
|
|
|
* ntfs_ioctl - file_operations::unlocked_ioctl
|
|
|
|
*/
|
2023-10-30 08:33:54 +00:00
|
|
|
long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
|
2021-08-13 17:21:29 +03:00
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(filp);
|
|
|
|
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
|
|
|
|
|
2025-06-24 16:35:39 +03:00
|
|
|
/* Avoid any operation if inode is bad. */
|
|
|
|
if (unlikely(is_bad_ni(ntfs_i(inode))))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
switch (cmd) {
|
|
|
|
case FITRIM:
|
|
|
|
return ntfs_ioctl_fitrim(sbi, arg);
|
|
|
|
}
|
2021-08-03 14:57:09 +03:00
|
|
|
return -ENOTTY; /* Inappropriate ioctl for device. */
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
2023-10-30 08:33:54 +00:00
|
|
|
long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
{
|
|
|
|
return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2021-08-03 14:57:09 +03:00
|
|
|
* ntfs_getattr - inode_operations::getattr
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
2023-01-13 12:49:12 +01:00
|
|
|
int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
|
2021-08-13 17:21:29 +03:00
|
|
|
struct kstat *stat, u32 request_mask, u32 flags)
|
|
|
|
{
|
|
|
|
struct inode *inode = d_inode(path->dentry);
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
|
2025-06-24 16:35:39 +03:00
|
|
|
/* Avoid any operation if inode is bad. */
|
|
|
|
if (unlikely(is_bad_ni(ni)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2024-06-18 17:31:21 +03:00
|
|
|
stat->result_mask |= STATX_BTIME;
|
|
|
|
stat->btime = ni->i_crtime;
|
|
|
|
stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
|
|
|
|
|
|
|
|
if (inode->i_flags & S_IMMUTABLE)
|
|
|
|
stat->attributes |= STATX_ATTR_IMMUTABLE;
|
|
|
|
|
|
|
|
if (inode->i_flags & S_APPEND)
|
|
|
|
stat->attributes |= STATX_ATTR_APPEND;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (is_compressed(ni))
|
|
|
|
stat->attributes |= STATX_ATTR_COMPRESSED;
|
|
|
|
|
|
|
|
if (is_encrypted(ni))
|
|
|
|
stat->attributes |= STATX_ATTR_ENCRYPTED;
|
|
|
|
|
2024-06-18 17:31:21 +03:00
|
|
|
stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
|
|
|
|
STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2023-08-07 15:38:33 -04:00
|
|
|
generic_fillattr(idmap, request_mask, inode, stat);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ntfs_extend_initialized_size(struct file *file,
|
|
|
|
struct ntfs_inode *ni,
|
|
|
|
const loff_t valid,
|
|
|
|
const loff_t new_valid)
|
|
|
|
{
|
|
|
|
struct inode *inode = &ni->vfs_inode;
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
|
|
|
|
loff_t pos = valid;
|
|
|
|
int err;
|
|
|
|
|
2024-10-14 13:52:42 +03:00
|
|
|
if (valid >= new_valid)
|
|
|
|
return 0;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (is_resident(ni)) {
|
|
|
|
ni->i_valid = new_valid;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(is_compressed(ni));
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
u32 zerofrom, len;
|
2024-07-15 14:24:01 -04:00
|
|
|
struct folio *folio;
|
2021-08-13 17:21:29 +03:00
|
|
|
u8 bits;
|
|
|
|
CLST vcn, lcn, clen;
|
|
|
|
|
|
|
|
if (is_sparsed(ni)) {
|
|
|
|
bits = sbi->cluster_bits;
|
|
|
|
vcn = pos >> bits;
|
|
|
|
|
2022-10-07 14:02:36 +03:00
|
|
|
err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
|
|
|
|
false);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (lcn == SPARSE_LCN) {
|
2022-10-07 13:57:28 +03:00
|
|
|
pos = ((loff_t)clen + vcn) << bits;
|
|
|
|
ni->i_valid = pos;
|
|
|
|
goto next;
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
zerofrom = pos & (PAGE_SIZE - 1);
|
|
|
|
len = PAGE_SIZE - zerofrom;
|
|
|
|
|
|
|
|
if (pos + len > new_valid)
|
|
|
|
len = new_valid - pos;
|
|
|
|
|
2025-07-16 09:36:06 +00:00
|
|
|
err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2024-11-01 10:39:36 +03:00
|
|
|
folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2025-07-16 09:36:06 +00:00
|
|
|
err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
pos += len;
|
|
|
|
|
|
|
|
next:
|
|
|
|
if (pos >= new_valid)
|
|
|
|
break;
|
|
|
|
|
|
|
|
balance_dirty_pages_ratelimited(mapping);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
ni->i_valid = valid;
|
|
|
|
ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
|
|
|
|
new_valid);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-08-03 14:57:09 +03:00
|
|
|
* ntfs_zero_range - Helper function for punch_hole.
|
2021-08-31 16:57:40 +03:00
|
|
|
*
|
|
|
|
* It zeroes a range [vbo, vbo_to).
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
|
|
|
static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
2023-03-10 11:08:19 +08:00
|
|
|
u32 blocksize = i_blocksize(inode);
|
2021-08-13 17:21:29 +03:00
|
|
|
pgoff_t idx = vbo >> PAGE_SHIFT;
|
2022-10-07 14:02:36 +03:00
|
|
|
u32 from = vbo & (PAGE_SIZE - 1);
|
2021-08-13 17:21:29 +03:00
|
|
|
pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
loff_t page_off;
|
|
|
|
struct buffer_head *head, *bh;
|
2022-10-07 14:02:36 +03:00
|
|
|
u32 bh_next, bh_off, to;
|
2021-08-13 17:21:29 +03:00
|
|
|
sector_t iblock;
|
2023-10-16 21:11:07 +01:00
|
|
|
struct folio *folio;
|
2023-11-24 12:08:36 +03:00
|
|
|
bool dirty = false;
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2022-10-07 14:02:36 +03:00
|
|
|
for (; idx < idx_end; idx += 1, from = 0) {
|
2021-08-13 17:21:29 +03:00
|
|
|
page_off = (loff_t)idx << PAGE_SHIFT;
|
2023-01-17 15:01:00 +04:00
|
|
|
to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
|
2023-05-08 12:22:05 +04:00
|
|
|
PAGE_SIZE;
|
2021-08-13 17:21:29 +03:00
|
|
|
iblock = page_off >> inode->i_blkbits;
|
|
|
|
|
2024-06-26 15:26:18 +03:00
|
|
|
folio = __filemap_get_folio(
|
|
|
|
mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
|
|
|
|
mapping_gfp_constraint(mapping, ~__GFP_FS));
|
2023-10-16 21:11:07 +01:00
|
|
|
if (IS_ERR(folio))
|
|
|
|
return PTR_ERR(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2023-10-16 21:11:07 +01:00
|
|
|
head = folio_buffers(folio);
|
|
|
|
if (!head)
|
2023-10-16 21:11:14 +01:00
|
|
|
head = create_empty_buffers(folio, blocksize, 0);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2023-10-16 21:11:07 +01:00
|
|
|
bh = head;
|
2021-08-13 17:21:29 +03:00
|
|
|
bh_off = 0;
|
|
|
|
do {
|
|
|
|
bh_next = bh_off + blocksize;
|
|
|
|
|
2022-10-07 14:02:36 +03:00
|
|
|
if (bh_next <= from || bh_off >= to)
|
2021-08-13 17:21:29 +03:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!buffer_mapped(bh)) {
|
|
|
|
ntfs_get_block(inode, iblock, bh, 0);
|
2021-08-31 16:57:40 +03:00
|
|
|
/* Unmapped? It's a hole - nothing to do. */
|
2021-08-13 17:21:29 +03:00
|
|
|
if (!buffer_mapped(bh))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-08-31 16:57:40 +03:00
|
|
|
/* Ok, it's mapped. Make sure it's up-to-date. */
|
2023-10-16 21:11:07 +01:00
|
|
|
if (folio_test_uptodate(folio))
|
2021-08-13 17:21:29 +03:00
|
|
|
set_buffer_uptodate(bh);
|
2023-11-24 12:08:36 +03:00
|
|
|
else if (bh_read(bh, 0) < 0) {
|
|
|
|
err = -EIO;
|
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
|
|
|
goto out;
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
mark_buffer_dirty(bh);
|
|
|
|
} while (bh_off = bh_next, iblock += 1,
|
|
|
|
head != (bh = bh->b_this_page));
|
|
|
|
|
2023-10-16 21:11:07 +01:00
|
|
|
folio_zero_segment(folio, from, to);
|
2023-11-24 12:08:36 +03:00
|
|
|
dirty = true;
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2023-10-16 21:11:07 +01:00
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
out:
|
2023-11-24 12:08:36 +03:00
|
|
|
if (dirty)
|
|
|
|
mark_inode_dirty(inode);
|
2021-08-13 17:21:29 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
fs: convert most other generic_file_*mmap() users to .mmap_prepare()
Update nearly all generic_file_mmap() and generic_file_readonly_mmap()
callers to use generic_file_mmap_prepare() and
generic_file_readonly_mmap_prepare() respectively.
We update blkdev, 9p, afs, erofs, ext2, nfs, ntfs3, smb, ubifs and vboxsf
file systems this way.
Remaining users we cannot yet update are ecryptfs, fuse and cramfs. The
former two are nested file systems that must support any underlying file
ssytem, and cramfs inserts a mixed mapping which currently requires a VMA.
Once all file systems have been converted to mmap_prepare(), we can then
update nested file systems.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/08db85970d89b17a995d2cffae96fb4cc462377f.1750099179.git.lorenzo.stoakes@oracle.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-06-16 20:33:28 +01:00
|
|
|
* ntfs_file_mmap_prepare - file_operations::mmap_prepare
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
fs: convert most other generic_file_*mmap() users to .mmap_prepare()
Update nearly all generic_file_mmap() and generic_file_readonly_mmap()
callers to use generic_file_mmap_prepare() and
generic_file_readonly_mmap_prepare() respectively.
We update blkdev, 9p, afs, erofs, ext2, nfs, ntfs3, smb, ubifs and vboxsf
file systems this way.
Remaining users we cannot yet update are ecryptfs, fuse and cramfs. The
former two are nested file systems that must support any underlying file
ssytem, and cramfs inserts a mixed mapping which currently requires a VMA.
Once all file systems have been converted to mmap_prepare(), we can then
update nested file systems.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/08db85970d89b17a995d2cffae96fb4cc462377f.1750099179.git.lorenzo.stoakes@oracle.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-06-16 20:33:28 +01:00
|
|
|
static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
|
2021-08-13 17:21:29 +03:00
|
|
|
{
|
fs: convert most other generic_file_*mmap() users to .mmap_prepare()
Update nearly all generic_file_mmap() and generic_file_readonly_mmap()
callers to use generic_file_mmap_prepare() and
generic_file_readonly_mmap_prepare() respectively.
We update blkdev, 9p, afs, erofs, ext2, nfs, ntfs3, smb, ubifs and vboxsf
file systems this way.
Remaining users we cannot yet update are ecryptfs, fuse and cramfs. The
former two are nested file systems that must support any underlying file
ssytem, and cramfs inserts a mixed mapping which currently requires a VMA.
Once all file systems have been converted to mmap_prepare(), we can then
update nested file systems.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/08db85970d89b17a995d2cffae96fb4cc462377f.1750099179.git.lorenzo.stoakes@oracle.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-06-16 20:33:28 +01:00
|
|
|
struct file *file = desc->file;
|
2024-06-17 14:56:03 +03:00
|
|
|
struct inode *inode = file_inode(file);
|
2021-08-13 17:21:29 +03:00
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
fs: convert most other generic_file_*mmap() users to .mmap_prepare()
Update nearly all generic_file_mmap() and generic_file_readonly_mmap()
callers to use generic_file_mmap_prepare() and
generic_file_readonly_mmap_prepare() respectively.
We update blkdev, 9p, afs, erofs, ext2, nfs, ntfs3, smb, ubifs and vboxsf
file systems this way.
Remaining users we cannot yet update are ecryptfs, fuse and cramfs. The
former two are nested file systems that must support any underlying file
ssytem, and cramfs inserts a mixed mapping which currently requires a VMA.
Once all file systems have been converted to mmap_prepare(), we can then
update nested file systems.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/08db85970d89b17a995d2cffae96fb4cc462377f.1750099179.git.lorenzo.stoakes@oracle.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-06-16 20:33:28 +01:00
|
|
|
u64 from = ((u64)desc->pgoff << PAGE_SHIFT);
|
|
|
|
bool rw = desc->vm_flags & VM_WRITE;
|
2021-08-13 17:21:29 +03:00
|
|
|
int err;
|
|
|
|
|
2025-06-24 16:35:39 +03:00
|
|
|
/* Avoid any operation if inode is bad. */
|
|
|
|
if (unlikely(is_bad_ni(ni)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2023-11-24 12:19:37 +03:00
|
|
|
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
|
|
|
|
return -EIO;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (is_encrypted(ni)) {
|
|
|
|
ntfs_inode_warn(inode, "mmap encrypted not supported");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_dedup(ni)) {
|
|
|
|
ntfs_inode_warn(inode, "mmap deduplicated not supported");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_compressed(ni) && rw) {
|
|
|
|
ntfs_inode_warn(inode, "mmap(write) compressed not supported");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rw) {
|
|
|
|
u64 to = min_t(loff_t, i_size_read(inode),
|
fs: convert most other generic_file_*mmap() users to .mmap_prepare()
Update nearly all generic_file_mmap() and generic_file_readonly_mmap()
callers to use generic_file_mmap_prepare() and
generic_file_readonly_mmap_prepare() respectively.
We update blkdev, 9p, afs, erofs, ext2, nfs, ntfs3, smb, ubifs and vboxsf
file systems this way.
Remaining users we cannot yet update are ecryptfs, fuse and cramfs. The
former two are nested file systems that must support any underlying file
ssytem, and cramfs inserts a mixed mapping which currently requires a VMA.
Once all file systems have been converted to mmap_prepare(), we can then
update nested file systems.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/08db85970d89b17a995d2cffae96fb4cc462377f.1750099179.git.lorenzo.stoakes@oracle.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-06-16 20:33:28 +01:00
|
|
|
from + desc->end - desc->start);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
if (is_sparsed(ni)) {
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Allocate clusters for rw map. */
|
2021-08-13 17:21:29 +03:00
|
|
|
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
|
|
|
|
CLST lcn, len;
|
|
|
|
CLST vcn = from >> sbi->cluster_bits;
|
|
|
|
CLST end = bytes_to_cluster(sbi, to);
|
|
|
|
bool new;
|
|
|
|
|
|
|
|
for (; vcn < end; vcn += len) {
|
|
|
|
err = attr_data_get_block(ni, vcn, 1, &lcn,
|
2022-10-07 14:02:36 +03:00
|
|
|
&len, &new, true);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ni->i_valid < to) {
|
2025-07-04 15:11:32 +02:00
|
|
|
if (!inode_trylock(inode)) {
|
|
|
|
err = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-08-13 17:21:29 +03:00
|
|
|
err = ntfs_extend_initialized_size(file, ni,
|
|
|
|
ni->i_valid, to);
|
|
|
|
inode_unlock(inode);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fs: convert most other generic_file_*mmap() users to .mmap_prepare()
Update nearly all generic_file_mmap() and generic_file_readonly_mmap()
callers to use generic_file_mmap_prepare() and
generic_file_readonly_mmap_prepare() respectively.
We update blkdev, 9p, afs, erofs, ext2, nfs, ntfs3, smb, ubifs and vboxsf
file systems this way.
Remaining users we cannot yet update are ecryptfs, fuse and cramfs. The
former two are nested file systems that must support any underlying file
ssytem, and cramfs inserts a mixed mapping which currently requires a VMA.
Once all file systems have been converted to mmap_prepare(), we can then
update nested file systems.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/08db85970d89b17a995d2cffae96fb4cc462377f.1750099179.git.lorenzo.stoakes@oracle.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-06-16 20:33:28 +01:00
|
|
|
err = generic_file_mmap_prepare(desc);
|
2021-08-13 17:21:29 +03:00
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
|
|
|
|
struct file *file)
|
|
|
|
{
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
loff_t end = pos + count;
|
|
|
|
bool extend_init = file && pos > ni->i_valid;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (end <= inode->i_size && !extend_init)
|
|
|
|
return 0;
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Mark rw ntfs as dirty. It will be cleared at umount. */
|
2021-08-13 17:21:29 +03:00
|
|
|
ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
|
|
|
|
|
|
|
|
if (end > inode->i_size) {
|
|
|
|
err = ntfs_set_size(inode, end);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (extend_init && !is_compressed(ni)) {
|
|
|
|
err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
} else {
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
|
2024-06-28 18:27:46 +03:00
|
|
|
if (file && is_sparsed(ni)) {
|
|
|
|
/*
|
|
|
|
* This code optimizes large writes to sparse file.
|
|
|
|
* TODO: merge this fragment with fallocate fragment.
|
|
|
|
*/
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
CLST vcn = pos >> sbi->cluster_bits;
|
|
|
|
CLST cend = bytes_to_cluster(sbi, end);
|
|
|
|
CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
|
|
|
|
CLST lcn, clen;
|
|
|
|
bool new;
|
|
|
|
|
|
|
|
if (cend_v > cend)
|
|
|
|
cend_v = cend;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate and zero new clusters.
|
|
|
|
* Zeroing these clusters may be too long.
|
|
|
|
*/
|
|
|
|
for (; vcn < cend_v; vcn += clen) {
|
|
|
|
err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn,
|
|
|
|
&clen, &new, true);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Allocate but not zero new clusters.
|
|
|
|
*/
|
|
|
|
for (; vcn < cend; vcn += clen) {
|
|
|
|
err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
|
|
|
|
&clen, &new, false);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-04 14:52:40 -04:00
|
|
|
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
|
2021-08-13 17:21:29 +03:00
|
|
|
mark_inode_dirty(inode);
|
|
|
|
|
|
|
|
if (IS_SYNC(inode)) {
|
|
|
|
int err2;
|
|
|
|
|
|
|
|
err = filemap_fdatawrite_range(mapping, pos, end - 1);
|
|
|
|
err2 = sync_mapping_buffers(mapping);
|
|
|
|
if (!err)
|
|
|
|
err = err2;
|
|
|
|
err2 = write_inode_now(inode, 1);
|
|
|
|
if (!err)
|
|
|
|
err = err2;
|
|
|
|
if (!err)
|
|
|
|
err = filemap_fdatawait_range(mapping, pos, end - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ntfs_truncate(struct inode *inode, loff_t new_size)
|
|
|
|
{
|
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
int err, dirty = 0;
|
|
|
|
u64 new_valid;
|
|
|
|
|
|
|
|
if (!S_ISREG(inode->i_mode))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (is_compressed(ni)) {
|
|
|
|
if (ni->i_valid > new_size)
|
|
|
|
ni->i_valid = new_size;
|
|
|
|
} else {
|
|
|
|
err = block_truncate_page(inode->i_mapping, new_size,
|
|
|
|
ntfs_get_block);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
|
|
|
|
|
|
|
|
truncate_setsize(inode, new_size);
|
|
|
|
|
2023-01-02 23:05:33 +09:00
|
|
|
ni_lock(ni);
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
down_write(&ni->file.run_lock);
|
|
|
|
err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
|
2021-10-22 17:37:52 +03:00
|
|
|
&new_valid, ni->mi.sbi->options->prealloc, NULL);
|
2021-08-13 17:21:29 +03:00
|
|
|
up_write(&ni->file.run_lock);
|
|
|
|
|
|
|
|
if (new_valid < ni->i_valid)
|
|
|
|
ni->i_valid = new_valid;
|
|
|
|
|
|
|
|
ni_unlock(ni);
|
|
|
|
|
|
|
|
ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
|
2023-10-04 14:52:40 -04:00
|
|
|
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
|
2021-08-13 17:21:29 +03:00
|
|
|
if (!IS_DIRSYNC(inode)) {
|
|
|
|
dirty = 1;
|
|
|
|
} else {
|
|
|
|
err = ntfs_sync_inode(inode);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dirty)
|
|
|
|
mark_inode_dirty(inode);
|
|
|
|
|
|
|
|
/*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2024-06-28 18:29:46 +03:00
|
|
|
* ntfs_fallocate - file_operations::ntfs_fallocate
|
2021-08-03 14:57:09 +03:00
|
|
|
*
|
2021-08-13 17:21:29 +03:00
|
|
|
* Preallocate space for a file. This implements ntfs's fallocate file
|
|
|
|
* operation, which gets called from sys_fallocate system call. User
|
|
|
|
* space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
|
|
|
|
* we just allocate clusters without zeroing them out. Otherwise we
|
|
|
|
* allocate and zero out clusters via an expanding truncate.
|
|
|
|
*/
|
|
|
|
static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
|
|
|
|
{
|
2024-06-17 14:56:03 +03:00
|
|
|
struct inode *inode = file_inode(file);
|
2022-06-21 12:49:52 +03:00
|
|
|
struct address_space *mapping = inode->i_mapping;
|
2021-08-13 17:21:29 +03:00
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
|
struct ntfs_sb_info *sbi = sb->s_fs_info;
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
loff_t end = vbo + len;
|
2022-10-07 14:02:36 +03:00
|
|
|
loff_t vbo_down = round_down(vbo, max_t(unsigned long,
|
|
|
|
sbi->cluster_size, PAGE_SIZE));
|
2022-06-21 12:49:52 +03:00
|
|
|
bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
|
|
|
|
loff_t i_size, new_size;
|
|
|
|
bool map_locked;
|
2021-08-13 17:21:29 +03:00
|
|
|
int err;
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* No support for dir. */
|
2021-08-13 17:21:29 +03:00
|
|
|
if (!S_ISREG(inode->i_mode))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2022-06-21 12:49:52 +03:00
|
|
|
/*
|
|
|
|
* vfs_fallocate checks all possible combinations of mode.
|
|
|
|
* Do additional checks here before ntfs_set_state(dirty).
|
|
|
|
*/
|
|
|
|
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
|
|
|
if (!is_supported_holes)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
|
|
|
|
} else if (mode & FALLOC_FL_INSERT_RANGE) {
|
|
|
|
if (!is_supported_holes)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
} else if (mode &
|
|
|
|
~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
|
|
|
|
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
|
2021-08-13 17:21:29 +03:00
|
|
|
ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
|
|
|
|
mode);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
|
|
|
|
|
|
|
|
inode_lock(inode);
|
|
|
|
i_size = inode->i_size;
|
2022-06-21 12:49:52 +03:00
|
|
|
new_size = max(end, i_size);
|
|
|
|
map_locked = false;
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Should never be here, see ntfs_file_open. */
|
2021-08-13 17:21:29 +03:00
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-06-21 12:49:52 +03:00
|
|
|
if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
|
|
|
|
FALLOC_FL_INSERT_RANGE)) {
|
|
|
|
inode_dio_wait(inode);
|
|
|
|
filemap_invalidate_lock(mapping);
|
|
|
|
map_locked = true;
|
|
|
|
}
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
|
|
|
u32 frame_size;
|
|
|
|
loff_t mask, vbo_a, end_a, tmp;
|
|
|
|
|
2022-10-07 14:02:36 +03:00
|
|
|
err = filemap_write_and_wait_range(mapping, vbo_down,
|
|
|
|
LLONG_MAX);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
truncate_pagecache(inode, vbo_down);
|
|
|
|
|
|
|
|
ni_lock(ni);
|
|
|
|
err = attr_punch_hole(ni, vbo, len, &frame_size);
|
|
|
|
ni_unlock(ni);
|
2023-11-24 12:17:46 +03:00
|
|
|
if (!err)
|
|
|
|
goto ok;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err != E_NTFS_NOTALIGNED)
|
|
|
|
goto out;
|
|
|
|
|
2021-08-31 16:57:40 +03:00
|
|
|
/* Process not aligned punch. */
|
2023-11-24 12:17:46 +03:00
|
|
|
err = 0;
|
2021-08-13 17:21:29 +03:00
|
|
|
mask = frame_size - 1;
|
|
|
|
vbo_a = (vbo + mask) & ~mask;
|
|
|
|
end_a = end & ~mask;
|
|
|
|
|
|
|
|
tmp = min(vbo_a, end);
|
|
|
|
if (tmp > vbo) {
|
|
|
|
err = ntfs_zero_range(inode, vbo, tmp);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vbo < end_a && end_a < end) {
|
|
|
|
err = ntfs_zero_range(inode, end_a, end);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Aligned punch_hole */
|
|
|
|
if (end_a > vbo_a) {
|
|
|
|
ni_lock(ni);
|
|
|
|
err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
|
|
|
|
ni_unlock(ni);
|
2023-11-24 12:17:46 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
|
|
|
|
/*
|
|
|
|
* Write tail of the last page before removed range since
|
|
|
|
* it will get removed from the page cache below.
|
|
|
|
*/
|
2022-06-21 12:49:52 +03:00
|
|
|
err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write data that will be shifted to preserve them
|
2021-08-03 14:57:09 +03:00
|
|
|
* when discarding page cache below.
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
2022-06-21 12:49:52 +03:00
|
|
|
err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
truncate_pagecache(inode, vbo_down);
|
|
|
|
|
|
|
|
ni_lock(ni);
|
|
|
|
err = attr_collapse_range(ni, vbo, len);
|
|
|
|
ni_unlock(ni);
|
2024-06-28 18:29:46 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2022-06-21 12:49:52 +03:00
|
|
|
} else if (mode & FALLOC_FL_INSERT_RANGE) {
|
|
|
|
/* Check new size. */
|
|
|
|
err = inode_newsize_ok(inode, new_size);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Write out all dirty pages. */
|
|
|
|
err = filemap_write_and_wait_range(mapping, vbo_down,
|
|
|
|
LLONG_MAX);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
truncate_pagecache(inode, vbo_down);
|
2021-10-25 18:34:06 +03:00
|
|
|
|
2022-06-21 12:49:52 +03:00
|
|
|
ni_lock(ni);
|
|
|
|
err = attr_insert_range(ni, vbo, len);
|
|
|
|
ni_unlock(ni);
|
2023-11-24 12:17:46 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2021-08-13 17:21:29 +03:00
|
|
|
} else {
|
2022-06-21 12:49:52 +03:00
|
|
|
/* Check new size. */
|
2023-01-17 15:01:00 +04:00
|
|
|
u8 cluster_bits = sbi->cluster_bits;
|
2022-05-13 19:21:54 +03:00
|
|
|
|
2024-04-16 10:08:18 +03:00
|
|
|
/* Be sure file is non resident. */
|
|
|
|
if (is_resident(ni)) {
|
|
|
|
ni_lock(ni);
|
|
|
|
err = attr_force_nonresident(ni);
|
|
|
|
ni_unlock(ni);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-05-13 19:21:54 +03:00
|
|
|
/* generic/213: expected -ENOSPC instead of -EFBIG. */
|
|
|
|
if (!is_supported_holes) {
|
|
|
|
loff_t to_alloc = new_size - inode_get_bytes(inode);
|
|
|
|
|
|
|
|
if (to_alloc > 0 &&
|
2023-01-17 15:01:00 +04:00
|
|
|
(to_alloc >> cluster_bits) >
|
2022-05-13 19:21:54 +03:00
|
|
|
wnd_zeroes(&sbi->used.bitmap)) {
|
|
|
|
err = -ENOSPC;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2021-10-25 18:34:06 +03:00
|
|
|
|
|
|
|
err = inode_newsize_ok(inode, new_size);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2022-10-07 20:08:06 +03:00
|
|
|
if (new_size > i_size) {
|
|
|
|
/*
|
|
|
|
* Allocate clusters, do not change 'valid' size.
|
|
|
|
*/
|
|
|
|
err = ntfs_set_size(inode, new_size);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2022-06-21 12:49:52 +03:00
|
|
|
if (is_supported_holes) {
|
2023-01-17 15:01:00 +04:00
|
|
|
CLST vcn = vbo >> cluster_bits;
|
2021-08-13 17:21:29 +03:00
|
|
|
CLST cend = bytes_to_cluster(sbi, end);
|
2022-10-07 14:02:36 +03:00
|
|
|
CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
|
2021-08-13 17:21:29 +03:00
|
|
|
CLST lcn, clen;
|
|
|
|
bool new;
|
|
|
|
|
2022-10-07 14:02:36 +03:00
|
|
|
if (cend_v > cend)
|
|
|
|
cend_v = cend;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
/*
|
2022-10-07 14:02:36 +03:00
|
|
|
* Allocate and zero new clusters.
|
2021-08-03 14:57:09 +03:00
|
|
|
* Zeroing these clusters may be too long.
|
2022-10-07 14:02:36 +03:00
|
|
|
*/
|
|
|
|
for (; vcn < cend_v; vcn += clen) {
|
|
|
|
err = attr_data_get_block(ni, vcn, cend_v - vcn,
|
|
|
|
&lcn, &clen, &new,
|
|
|
|
true);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Allocate but not zero new clusters.
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
|
|
|
for (; vcn < cend; vcn += clen) {
|
|
|
|
err = attr_data_get_block(ni, vcn, cend - vcn,
|
2022-10-07 14:02:36 +03:00
|
|
|
&lcn, &clen, &new,
|
|
|
|
false);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mode & FALLOC_FL_KEEP_SIZE) {
|
|
|
|
ni_lock(ni);
|
2021-08-03 14:57:09 +03:00
|
|
|
/* True - Keep preallocated. */
|
2021-08-13 17:21:29 +03:00
|
|
|
err = attr_set_size(ni, ATTR_DATA, NULL, 0,
|
|
|
|
&ni->file.run, i_size, &ni->i_valid,
|
|
|
|
true, NULL);
|
|
|
|
ni_unlock(ni);
|
2023-11-24 12:16:49 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2022-10-07 20:08:06 +03:00
|
|
|
} else if (new_size > i_size) {
|
2024-01-26 11:12:38 +03:00
|
|
|
i_size_write(inode, new_size);
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-24 12:17:46 +03:00
|
|
|
ok:
|
2023-11-24 12:16:49 +03:00
|
|
|
err = file_modified(file);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
out:
|
2022-06-21 12:49:52 +03:00
|
|
|
if (map_locked)
|
|
|
|
filemap_invalidate_unlock(mapping);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
if (!err) {
|
2023-10-04 14:52:40 -04:00
|
|
|
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
|
2021-08-13 17:21:29 +03:00
|
|
|
mark_inode_dirty(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
inode_unlock(inode);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2024-07-18 17:41:08 +03:00
|
|
|
* ntfs_setattr - inode_operations::setattr
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
2024-07-18 17:41:08 +03:00
|
|
|
int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
|
|
|
|
struct iattr *attr)
|
2021-08-13 17:21:29 +03:00
|
|
|
{
|
|
|
|
struct inode *inode = d_inode(dentry);
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
u32 ia_valid = attr->ia_valid;
|
|
|
|
umode_t mode = inode->i_mode;
|
|
|
|
int err;
|
|
|
|
|
2025-06-24 16:35:39 +03:00
|
|
|
/* Avoid any operation if inode is bad. */
|
|
|
|
if (unlikely(is_bad_ni(ni)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2023-11-24 12:19:37 +03:00
|
|
|
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
|
|
|
|
return -EIO;
|
|
|
|
|
2023-01-13 12:49:11 +01:00
|
|
|
err = setattr_prepare(idmap, dentry, attr);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (ia_valid & ATTR_SIZE) {
|
2022-10-07 20:08:06 +03:00
|
|
|
loff_t newsize, oldsize;
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Should never be here, see ntfs_file_open(). */
|
2021-08-13 17:21:29 +03:00
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
inode_dio_wait(inode);
|
2024-01-26 11:12:38 +03:00
|
|
|
oldsize = i_size_read(inode);
|
2022-10-07 20:08:06 +03:00
|
|
|
newsize = attr->ia_size;
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2022-10-07 20:08:06 +03:00
|
|
|
if (newsize <= oldsize)
|
|
|
|
err = ntfs_truncate(inode, newsize);
|
|
|
|
else
|
|
|
|
err = ntfs_extend(inode, newsize, 0, NULL);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
|
2024-01-26 11:12:38 +03:00
|
|
|
i_size_write(inode, newsize);
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
2023-01-13 12:49:11 +01:00
|
|
|
setattr_copy(idmap, inode, attr);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
if (mode != inode->i_mode) {
|
2023-01-13 12:49:20 +01:00
|
|
|
err = ntfs_acl_chmod(idmap, dentry);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Linux 'w' -> Windows 'ro'. */
|
2021-08-13 17:21:29 +03:00
|
|
|
if (0222 & inode->i_mode)
|
|
|
|
ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
|
|
|
|
else
|
|
|
|
ni->std_fa |= FILE_ATTRIBUTE_READONLY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
|
2023-01-16 13:03:24 +04:00
|
|
|
ntfs_save_wsl_perm(inode, NULL);
|
2021-08-13 17:21:29 +03:00
|
|
|
mark_inode_dirty(inode);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
/*
|
|
|
|
* check_read_restriction:
|
|
|
|
* common code for ntfs_file_read_iter and ntfs_file_splice_read
|
|
|
|
*/
|
|
|
|
static int check_read_restriction(struct inode *inode)
|
2021-08-13 17:21:29 +03:00
|
|
|
{
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
|
2025-06-24 16:35:39 +03:00
|
|
|
/* Avoid any operation if inode is bad. */
|
|
|
|
if (unlikely(is_bad_ni(ni)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2023-11-24 12:19:37 +03:00
|
|
|
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
|
|
|
|
return -EIO;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (is_encrypted(ni)) {
|
|
|
|
ntfs_inode_warn(inode, "encrypted i/o not supported");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_NTFS3_LZX_XPRESS
|
|
|
|
if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
|
|
|
|
ntfs_inode_warn(
|
|
|
|
inode,
|
|
|
|
"activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (is_dedup(ni)) {
|
|
|
|
ntfs_inode_warn(inode, "read deduplicated not supported");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
return 0;
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
/*
|
|
|
|
* ntfs_file_read_iter - file_operations::read_iter
|
|
|
|
*/
|
|
|
|
static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
|
2023-05-22 14:50:08 +01:00
|
|
|
{
|
2024-07-15 09:31:10 +03:00
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct inode *inode = file_inode(file);
|
2023-05-22 14:50:08 +01:00
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
2024-07-15 09:31:10 +03:00
|
|
|
ssize_t err;
|
2023-05-22 14:50:08 +01:00
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
err = check_read_restriction(inode);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2023-11-24 12:19:37 +03:00
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
|
|
|
|
ntfs_inode_warn(inode, "direct i/o + compressed not supported");
|
2023-05-22 14:50:08 +01:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
return generic_file_read_iter(iocb, iter);
|
|
|
|
}
|
2023-05-22 14:50:08 +01:00
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
/*
|
|
|
|
* ntfs_file_splice_read - file_operations::splice_read
|
|
|
|
*/
|
|
|
|
static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
|
|
|
|
struct pipe_inode_info *pipe, size_t len,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(in);
|
|
|
|
ssize_t err;
|
|
|
|
|
|
|
|
err = check_read_restriction(inode);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2023-05-22 14:50:08 +01:00
|
|
|
|
2023-05-22 14:50:15 +01:00
|
|
|
return filemap_splice_read(in, ppos, pipe, len, flags);
|
2023-05-22 14:50:08 +01:00
|
|
|
}
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/*
|
|
|
|
* ntfs_get_frame_pages
|
|
|
|
*
|
|
|
|
* Return: Array of locked pages.
|
|
|
|
*/
|
2021-08-13 17:21:29 +03:00
|
|
|
static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
|
|
|
|
struct page **pages, u32 pages_per_frame,
|
|
|
|
bool *frame_uptodate)
|
|
|
|
{
|
|
|
|
gfp_t gfp_mask = mapping_gfp_mask(mapping);
|
|
|
|
u32 npages;
|
|
|
|
|
|
|
|
*frame_uptodate = true;
|
|
|
|
|
|
|
|
for (npages = 0; npages < pages_per_frame; npages++, index++) {
|
2024-04-22 20:32:00 +01:00
|
|
|
struct folio *folio;
|
2021-08-13 17:21:29 +03:00
|
|
|
|
2024-04-22 20:32:00 +01:00
|
|
|
folio = __filemap_get_folio(mapping, index,
|
2024-06-26 15:26:18 +03:00
|
|
|
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
|
|
|
|
gfp_mask);
|
2024-04-22 20:32:00 +01:00
|
|
|
if (IS_ERR(folio)) {
|
2021-08-13 17:21:29 +03:00
|
|
|
while (npages--) {
|
2024-04-22 20:32:00 +01:00
|
|
|
folio = page_folio(pages[npages]);
|
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2024-04-22 20:32:00 +01:00
|
|
|
if (!folio_test_uptodate(folio))
|
2021-08-13 17:21:29 +03:00
|
|
|
*frame_uptodate = false;
|
|
|
|
|
2024-04-22 20:32:00 +01:00
|
|
|
pages[npages] = &folio->page;
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/*
|
|
|
|
* ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
|
|
|
|
*/
|
2021-08-13 17:21:29 +03:00
|
|
|
static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
size_t count = iov_iter_count(from);
|
|
|
|
loff_t pos = iocb->ki_pos;
|
|
|
|
struct inode *inode = file_inode(file);
|
2024-01-26 11:12:38 +03:00
|
|
|
loff_t i_size = i_size_read(inode);
|
2021-08-13 17:21:29 +03:00
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
u64 valid = ni->i_valid;
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
2025-05-14 18:06:03 +01:00
|
|
|
struct page **pages = NULL;
|
|
|
|
struct folio *folio;
|
2021-08-13 17:21:29 +03:00
|
|
|
size_t written = 0;
|
|
|
|
u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
|
|
|
|
u32 frame_size = 1u << frame_bits;
|
|
|
|
u32 pages_per_frame = frame_size >> PAGE_SHIFT;
|
|
|
|
u32 ip, off;
|
|
|
|
CLST frame;
|
|
|
|
u64 frame_vbo;
|
|
|
|
pgoff_t index;
|
|
|
|
bool frame_uptodate;
|
|
|
|
|
|
|
|
if (frame_size < PAGE_SIZE) {
|
|
|
|
/*
|
|
|
|
* frame_size == 8K if cluster 512
|
|
|
|
* frame_size == 64K if cluster 4096
|
|
|
|
*/
|
|
|
|
ntfs_inode_warn(inode, "page size is bigger than frame size");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2021-08-24 21:37:08 +03:00
|
|
|
pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (!pages)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = file_remove_privs(file);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = file_update_time(file);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Zero range [valid : pos). */
|
2021-08-13 17:21:29 +03:00
|
|
|
while (valid < pos) {
|
|
|
|
CLST lcn, clen;
|
|
|
|
|
|
|
|
frame = valid >> frame_bits;
|
|
|
|
frame_vbo = valid & ~(frame_size - 1);
|
|
|
|
off = valid & (frame_size - 1);
|
|
|
|
|
2022-10-07 14:02:36 +03:00
|
|
|
err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
|
|
|
|
&clen, NULL, false);
|
2021-08-13 17:21:29 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (lcn == SPARSE_LCN) {
|
|
|
|
ni->i_valid = valid =
|
|
|
|
frame_vbo + ((u64)clen << sbi->cluster_bits);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Load full frame. */
|
2021-08-13 17:21:29 +03:00
|
|
|
err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
|
|
|
|
pages, pages_per_frame,
|
|
|
|
&frame_uptodate);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!frame_uptodate && off) {
|
|
|
|
err = ni_read_frame(ni, frame_vbo, pages,
|
|
|
|
pages_per_frame);
|
|
|
|
if (err) {
|
|
|
|
for (ip = 0; ip < pages_per_frame; ip++) {
|
2025-05-14 18:06:03 +01:00
|
|
|
folio = page_folio(pages[ip]);
|
2024-10-11 21:36:36 +03:00
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ip = off >> PAGE_SHIFT;
|
|
|
|
off = offset_in_page(valid);
|
|
|
|
for (; ip < pages_per_frame; ip++, off = 0) {
|
2025-05-14 18:06:03 +01:00
|
|
|
folio = page_folio(pages[ip]);
|
|
|
|
folio_zero_segment(folio, off, PAGE_SIZE);
|
|
|
|
flush_dcache_folio(folio);
|
2024-10-11 21:36:36 +03:00
|
|
|
folio_mark_uptodate(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ni_lock(ni);
|
|
|
|
err = ni_write_frame(ni, pages, pages_per_frame);
|
|
|
|
ni_unlock(ni);
|
|
|
|
|
|
|
|
for (ip = 0; ip < pages_per_frame; ip++) {
|
2025-05-14 18:06:03 +01:00
|
|
|
folio = page_folio(pages[ip]);
|
2024-10-11 21:36:36 +03:00
|
|
|
folio_mark_uptodate(folio);
|
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ni->i_valid = valid = frame_vbo + frame_size;
|
|
|
|
}
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Copy user data [pos : pos + count). */
|
2021-08-13 17:21:29 +03:00
|
|
|
while (count) {
|
|
|
|
size_t copied, bytes;
|
|
|
|
|
|
|
|
off = pos & (frame_size - 1);
|
|
|
|
bytes = frame_size - off;
|
|
|
|
if (bytes > count)
|
|
|
|
bytes = count;
|
|
|
|
|
|
|
|
frame_vbo = pos & ~(frame_size - 1);
|
|
|
|
index = frame_vbo >> PAGE_SHIFT;
|
|
|
|
|
2021-08-02 14:54:16 +02:00
|
|
|
if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
|
2021-08-13 17:21:29 +03:00
|
|
|
err = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Load full frame. */
|
2021-08-13 17:21:29 +03:00
|
|
|
err = ntfs_get_frame_pages(mapping, index, pages,
|
|
|
|
pages_per_frame, &frame_uptodate);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!frame_uptodate) {
|
|
|
|
loff_t to = pos + bytes;
|
|
|
|
|
|
|
|
if (off || (to < i_size && (to & (frame_size - 1)))) {
|
|
|
|
err = ni_read_frame(ni, frame_vbo, pages,
|
|
|
|
pages_per_frame);
|
|
|
|
if (err) {
|
|
|
|
for (ip = 0; ip < pages_per_frame;
|
|
|
|
ip++) {
|
2025-05-14 18:06:03 +01:00
|
|
|
folio = page_folio(pages[ip]);
|
2024-10-11 21:36:36 +03:00
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(!bytes);
|
|
|
|
copied = 0;
|
|
|
|
ip = off >> PAGE_SHIFT;
|
|
|
|
off = offset_in_page(pos);
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Copy user data to pages. */
|
2021-08-13 17:21:29 +03:00
|
|
|
for (;;) {
|
|
|
|
size_t cp, tail = PAGE_SIZE - off;
|
|
|
|
|
2025-05-14 18:06:03 +01:00
|
|
|
folio = page_folio(pages[ip]);
|
|
|
|
cp = copy_folio_from_iter_atomic(folio, off,
|
2021-08-13 17:21:29 +03:00
|
|
|
min(tail, bytes), from);
|
2025-05-14 18:06:03 +01:00
|
|
|
flush_dcache_folio(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
copied += cp;
|
|
|
|
bytes -= cp;
|
|
|
|
if (!bytes || !cp)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (cp < tail) {
|
|
|
|
off += cp;
|
|
|
|
} else {
|
|
|
|
ip++;
|
|
|
|
off = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ni_lock(ni);
|
|
|
|
err = ni_write_frame(ni, pages, pages_per_frame);
|
|
|
|
ni_unlock(ni);
|
|
|
|
|
|
|
|
for (ip = 0; ip < pages_per_frame; ip++) {
|
2025-05-14 18:06:03 +01:00
|
|
|
folio = page_folio(pages[ip]);
|
|
|
|
folio_clear_dirty(folio);
|
2024-10-11 21:36:36 +03:00
|
|
|
folio_mark_uptodate(folio);
|
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2021-08-13 17:21:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can loop for a long time in here. Be nice and allow
|
|
|
|
* us to schedule out to avoid softlocking if preempt
|
|
|
|
* is disabled.
|
|
|
|
*/
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
pos += copied;
|
|
|
|
written += copied;
|
|
|
|
|
|
|
|
count = iov_iter_count(from);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2021-08-24 21:37:07 +03:00
|
|
|
kfree(pages);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
iocb->ki_pos += written;
|
|
|
|
if (iocb->ki_pos > ni->i_valid)
|
|
|
|
ni->i_valid = iocb->ki_pos;
|
2024-01-29 10:30:09 +03:00
|
|
|
if (iocb->ki_pos > i_size)
|
|
|
|
i_size_write(inode, iocb->ki_pos);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
return written;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2024-07-15 09:31:10 +03:00
|
|
|
* check_write_restriction:
|
|
|
|
* common code for ntfs_file_write_iter and ntfs_file_splice_write
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
2024-07-15 09:31:10 +03:00
|
|
|
static int check_write_restriction(struct inode *inode)
|
2021-08-13 17:21:29 +03:00
|
|
|
{
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
|
2025-06-24 16:35:39 +03:00
|
|
|
/* Avoid any operation if inode is bad. */
|
|
|
|
if (unlikely(is_bad_ni(ni)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2023-11-24 12:19:37 +03:00
|
|
|
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
|
|
|
|
return -EIO;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (is_encrypted(ni)) {
|
|
|
|
ntfs_inode_warn(inode, "encrypted i/o not supported");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
if (is_dedup(ni)) {
|
|
|
|
ntfs_inode_warn(inode, "write into deduplicated not supported");
|
2021-08-13 17:21:29 +03:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ntfs_file_write_iter - file_operations::write_iter
|
|
|
|
*/
|
|
|
|
static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct inode *inode = file_inode(file);
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
ssize_t ret;
|
|
|
|
int err;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (!inode_trylock(inode)) {
|
|
|
|
if (iocb->ki_flags & IOCB_NOWAIT)
|
|
|
|
return -EAGAIN;
|
|
|
|
inode_lock(inode);
|
|
|
|
}
|
|
|
|
|
fs/ntfs3: Keep write operations atomic
syzbot reported a NULL pointer dereference in __generic_file_write_iter. [1]
Before the write operation is completed, the user executes ioctl[2] to clear
the compress flag of the file, which causes the is_compressed() judgment to
return 0, further causing the program to enter the wrong process and call the
wrong ops ntfs_aops_cmpr, which triggers the null pointer dereference of
write_begin.
Use inode lock to synchronize ioctl and write to avoid this case.
[1]
Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
Mem abort info:
ESR = 0x0000000086000006
EC = 0x21: IABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
FSC = 0x06: level 2 translation fault
user pgtable: 4k pages, 48-bit VAs, pgdp=000000011896d000
[0000000000000000] pgd=0800000118b44403, p4d=0800000118b44403, pud=0800000117517403, pmd=0000000000000000
Internal error: Oops: 0000000086000006 [#1] PREEMPT SMP
Modules linked in:
CPU: 0 UID: 0 PID: 6427 Comm: syz-executor347 Not tainted 6.13.0-rc3-syzkaller-g573067a5a685 #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024
pstate: 80400005 (Nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : 0x0
lr : generic_perform_write+0x29c/0x868 mm/filemap.c:4055
sp : ffff80009d4978a0
x29: ffff80009d4979c0 x28: dfff800000000000 x27: ffff80009d497bc8
x26: 0000000000000000 x25: ffff80009d497960 x24: ffff80008ba71c68
x23: 0000000000000000 x22: ffff0000c655dac0 x21: 0000000000001000
x20: 000000000000000c x19: 1ffff00013a92f2c x18: ffff0000e183aa1c
x17: 0004060000000014 x16: ffff800083275834 x15: 0000000000000001
x14: 0000000000000000 x13: 0000000000000001 x12: ffff0000c655dac0
x11: 0000000000ff0100 x10: 0000000000ff0100 x9 : 0000000000000000
x8 : 0000000000000000 x7 : 0000000000000000 x6 : 0000000000000000
x5 : ffff80009d497980 x4 : ffff80009d497960 x3 : 0000000000001000
x2 : 0000000000000000 x1 : ffff0000e183a928 x0 : ffff0000d60b0fc0
Call trace:
0x0 (P)
__generic_file_write_iter+0xfc/0x204 mm/filemap.c:4156
ntfs_file_write_iter+0x54c/0x630 fs/ntfs3/file.c:1267
new_sync_write fs/read_write.c:586 [inline]
vfs_write+0x920/0xcf4 fs/read_write.c:679
ksys_write+0x15c/0x26c fs/read_write.c:731
__do_sys_write fs/read_write.c:742 [inline]
__se_sys_write fs/read_write.c:739 [inline]
__arm64_sys_write+0x7c/0x90 fs/read_write.c:739
__invoke_syscall arch/arm64/kernel/syscall.c:35 [inline]
invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:49
el0_svc_common+0x130/0x23c arch/arm64/kernel/syscall.c:132
do_el0_svc+0x48/0x58 arch/arm64/kernel/syscall.c:151
el0_svc+0x54/0x168 arch/arm64/kernel/entry-common.c:744
el0t_64_sync_handler+0x84/0x108 arch/arm64/kernel/entry-common.c:762
[2]
ioctl$FS_IOC_SETFLAGS(r0, 0x40086602, &(0x7f00000000c0)=0x20)
Reported-by: syzbot+5d0bdc98770e6c55a0fd@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=5d0bdc98770e6c55a0fd
Signed-off-by: Lizhi Xu <lizhi.xu@windriver.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
2024-12-30 15:10:03 +08:00
|
|
|
ret = check_write_restriction(inode);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
|
|
|
|
ntfs_inode_warn(inode, "direct i/o + compressed not supported");
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
ret = generic_write_checks(iocb, from);
|
|
|
|
if (ret <= 0)
|
|
|
|
goto out;
|
|
|
|
|
2023-11-24 12:16:49 +03:00
|
|
|
err = file_modified(iocb->ki_filp);
|
|
|
|
if (err) {
|
|
|
|
ret = err;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
|
2021-08-31 16:57:40 +03:00
|
|
|
/* Should never be here, see ntfs_file_open(). */
|
2021-08-13 17:21:29 +03:00
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2023-01-17 15:01:00 +04:00
|
|
|
ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
|
2023-05-08 12:22:05 +04:00
|
|
|
__generic_file_write_iter(iocb, from);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
inode_unlock(inode);
|
|
|
|
|
|
|
|
if (ret > 0)
|
|
|
|
ret = generic_write_sync(iocb, ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-08-03 14:57:09 +03:00
|
|
|
* ntfs_file_open - file_operations::open
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
|
|
|
int ntfs_file_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
|
2025-06-24 16:35:39 +03:00
|
|
|
/* Avoid any operation if inode is bad. */
|
|
|
|
if (unlikely(is_bad_ni(ni)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2023-11-24 12:19:37 +03:00
|
|
|
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
|
|
|
|
return -EIO;
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
|
|
|
|
(file->f_flags & O_DIRECT))) {
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* Decompress "external compressed" file if opened for rw. */
|
2021-08-13 17:21:29 +03:00
|
|
|
if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
|
|
|
|
(file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
|
|
|
|
#ifdef CONFIG_NTFS3_LZX_XPRESS
|
|
|
|
int err = ni_decompress_file(ni);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
#else
|
|
|
|
ntfs_inode_warn(
|
|
|
|
inode,
|
|
|
|
"activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
return generic_file_open(inode, file);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-08-03 14:57:09 +03:00
|
|
|
* ntfs_file_release - file_operations::release
|
2021-08-13 17:21:29 +03:00
|
|
|
*/
|
|
|
|
static int ntfs_file_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
|
|
|
int err = 0;
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/* If we are last writer on the inode, drop the block reservation. */
|
2023-01-17 15:01:00 +04:00
|
|
|
if (sbi->options->prealloc &&
|
|
|
|
((file->f_mode & FMODE_WRITE) &&
|
2024-09-04 12:57:31 +03:00
|
|
|
atomic_read(&inode->i_writecount) == 1)
|
|
|
|
/*
|
|
|
|
* The only file when inode->i_fop = &ntfs_file_operations and
|
|
|
|
* init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
|
|
|
|
*
|
|
|
|
* Add additional check here.
|
|
|
|
*/
|
|
|
|
&& inode->i_ino != MFT_REC_MFT) {
|
2021-08-13 17:21:29 +03:00
|
|
|
ni_lock(ni);
|
|
|
|
down_write(&ni->file.run_lock);
|
|
|
|
|
|
|
|
err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
|
2024-01-26 11:12:38 +03:00
|
|
|
i_size_read(inode), &ni->i_valid, false,
|
|
|
|
NULL);
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
up_write(&ni->file.run_lock);
|
|
|
|
ni_unlock(ni);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-08-03 14:57:09 +03:00
|
|
|
/*
|
2024-06-26 15:26:18 +03:00
|
|
|
* ntfs_fiemap - inode_operations::fiemap
|
2021-08-03 14:57:09 +03:00
|
|
|
*/
|
2021-08-13 17:21:29 +03:00
|
|
|
int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
|
|
__u64 start, __u64 len)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
|
|
|
2025-06-24 16:35:39 +03:00
|
|
|
/* Avoid any operation if inode is bad. */
|
|
|
|
if (unlikely(is_bad_ni(ni)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-08-25 21:24:35 +03:00
|
|
|
err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2021-08-13 17:21:29 +03:00
|
|
|
|
|
|
|
ni_lock(ni);
|
|
|
|
|
|
|
|
err = ni_fiemap(ni, fieinfo, start, len);
|
|
|
|
|
|
|
|
ni_unlock(ni);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2024-07-15 09:31:10 +03:00
|
|
|
/*
|
|
|
|
* ntfs_file_splice_write - file_operations::splice_write
|
|
|
|
*/
|
|
|
|
static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe,
|
|
|
|
struct file *file, loff_t *ppos,
|
|
|
|
size_t len, unsigned int flags)
|
|
|
|
{
|
|
|
|
ssize_t err;
|
|
|
|
struct inode *inode = file_inode(file);
|
|
|
|
|
|
|
|
err = check_write_restriction(inode);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return iter_file_splice_write(pipe, file, ppos, len, flags);
|
|
|
|
}
|
|
|
|
|
2021-08-13 17:21:29 +03:00
|
|
|
// clang-format off
|
|
|
|
const struct inode_operations ntfs_file_inode_operations = {
|
|
|
|
.getattr = ntfs_getattr,
|
2024-07-18 17:41:08 +03:00
|
|
|
.setattr = ntfs_setattr,
|
2021-08-13 17:21:29 +03:00
|
|
|
.listxattr = ntfs_listxattr,
|
2023-02-14 15:06:27 +04:00
|
|
|
.get_acl = ntfs_get_acl,
|
2021-08-13 17:21:29 +03:00
|
|
|
.set_acl = ntfs_set_acl,
|
|
|
|
.fiemap = ntfs_fiemap,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct file_operations ntfs_file_operations = {
|
|
|
|
.llseek = generic_file_llseek,
|
|
|
|
.read_iter = ntfs_file_read_iter,
|
|
|
|
.write_iter = ntfs_file_write_iter,
|
|
|
|
.unlocked_ioctl = ntfs_ioctl,
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
.compat_ioctl = ntfs_compat_ioctl,
|
|
|
|
#endif
|
2023-05-22 14:50:08 +01:00
|
|
|
.splice_read = ntfs_file_splice_read,
|
2024-07-15 09:31:10 +03:00
|
|
|
.splice_write = ntfs_file_splice_write,
|
fs: convert most other generic_file_*mmap() users to .mmap_prepare()
Update nearly all generic_file_mmap() and generic_file_readonly_mmap()
callers to use generic_file_mmap_prepare() and
generic_file_readonly_mmap_prepare() respectively.
We update blkdev, 9p, afs, erofs, ext2, nfs, ntfs3, smb, ubifs and vboxsf
file systems this way.
Remaining users we cannot yet update are ecryptfs, fuse and cramfs. The
former two are nested file systems that must support any underlying file
ssytem, and cramfs inserts a mixed mapping which currently requires a VMA.
Once all file systems have been converted to mmap_prepare(), we can then
update nested file systems.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/08db85970d89b17a995d2cffae96fb4cc462377f.1750099179.git.lorenzo.stoakes@oracle.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-06-16 20:33:28 +01:00
|
|
|
.mmap_prepare = ntfs_file_mmap_prepare,
|
2021-08-13 17:21:29 +03:00
|
|
|
.open = ntfs_file_open,
|
|
|
|
.fsync = generic_file_fsync,
|
|
|
|
.fallocate = ntfs_fallocate,
|
|
|
|
.release = ntfs_file_release,
|
|
|
|
};
|
2024-04-16 12:20:50 +02:00
|
|
|
|
2024-06-17 15:03:21 +03:00
|
|
|
#if IS_ENABLED(CONFIG_NTFS_FS)
|
2024-04-16 12:20:50 +02:00
|
|
|
const struct file_operations ntfs_legacy_file_operations = {
|
|
|
|
.llseek = generic_file_llseek,
|
|
|
|
.read_iter = ntfs_file_read_iter,
|
|
|
|
.splice_read = ntfs_file_splice_read,
|
|
|
|
.open = ntfs_file_open,
|
|
|
|
.release = ntfs_file_release,
|
|
|
|
};
|
2024-06-17 15:03:21 +03:00
|
|
|
#endif
|
2021-08-13 17:21:29 +03:00
|
|
|
// clang-format on
|