2019-05-28 09:57:16 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2006-01-18 17:43:02 -08:00
|
|
|
/*
|
|
|
|
* This file contians vfs address (mmap) ops for 9P2000.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
|
|
|
|
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/pagemap.h>
|
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
|
|
|
#include <linux/sched.h>
|
2021-12-22 17:21:04 +00:00
|
|
|
#include <linux/swap.h>
|
2015-02-22 08:58:50 -08:00
|
|
|
#include <linux/uio.h>
|
2021-11-02 08:29:55 +00:00
|
|
|
#include <linux/netfs.h>
|
2007-07-10 17:57:28 -05:00
|
|
|
#include <net/9p/9p.h>
|
|
|
|
#include <net/9p/client.h>
|
2023-12-06 12:48:56 +00:00
|
|
|
#include <trace/events/netfs.h>
|
2006-01-18 17:43:02 -08:00
|
|
|
|
|
|
|
#include "v9fs.h"
|
|
|
|
#include "v9fs_vfs.h"
|
2009-09-23 13:00:27 -05:00
|
|
|
#include "cache.h"
|
2011-02-28 17:03:58 +05:30
|
|
|
#include "fid.h"
|
2006-01-18 17:43:02 -08:00
|
|
|
|
2024-03-18 20:29:53 +00:00
|
|
|
/*
|
|
|
|
* Writeback calls this when it finds a folio that needs uploading. This isn't
|
|
|
|
* called if writeback only has copy-to-cache to deal with.
|
|
|
|
*/
|
|
|
|
static void v9fs_begin_writeback(struct netfs_io_request *wreq)
|
|
|
|
{
|
|
|
|
struct p9_fid *fid;
|
|
|
|
|
|
|
|
fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
|
|
|
|
if (!fid) {
|
|
|
|
WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
|
|
|
|
wreq->inode->i_ino);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
wreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
|
|
|
|
if (fid->iounit)
|
|
|
|
wreq->wsize = min(wreq->wsize, fid->iounit);
|
|
|
|
wreq->netfs_priv = fid;
|
|
|
|
wreq->io_streams[0].avail = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Issue a subrequest to write to the server.
|
|
|
|
*/
|
|
|
|
static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
|
|
|
|
{
|
|
|
|
struct p9_fid *fid = subreq->rreq->netfs_priv;
|
|
|
|
int err, len;
|
|
|
|
|
|
|
|
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
|
netfs: Work around recursion by abandoning retry if nothing read
syzkaller reported recursion with a loop of three calls (netfs_rreq_assess,
netfs_retry_reads and netfs_rreq_terminated) hitting the limit of the stack
during an unbuffered or direct I/O read.
There are a number of issues:
(1) There is no limit on the number of retries.
(2) A subrequest is supposed to be abandoned if it does not transfer
anything (NETFS_SREQ_NO_PROGRESS), but that isn't checked under all
circumstances.
(3) The actual root cause, which is this:
if (atomic_dec_and_test(&rreq->nr_outstanding))
netfs_rreq_terminated(rreq, ...);
When we do a retry, we bump the rreq->nr_outstanding counter to
prevent the final cleanup phase running before we've finished
dispatching the retries. The problem is if we hit 0, we have to do
the cleanup phase - but we're in the cleanup phase and end up
repeating the retry cycle, hence the recursion.
Work around the problem by limiting the number of retries. This is based
on Lizhi Xu's patch[1], and makes the following changes:
(1) Replace NETFS_SREQ_NO_PROGRESS with NETFS_SREQ_MADE_PROGRESS and make
the filesystem set it if it managed to read or write at least one byte
of data. Clear this bit before issuing a subrequest.
(2) Add a ->retry_count member to the subrequest and increment it any time
we do a retry.
(3) Remove the NETFS_SREQ_RETRYING flag as it is superfluous with
->retry_count. If the latter is non-zero, we're doing a retry.
(4) Abandon a subrequest if retry_count is non-zero and we made no
progress.
(5) Use ->retry_count in both the write-side and the read-size.
[?] Question: Should I set a hard limit on retry_count in both read and
write? Say it hits 50, we always abandon it. The problem is that
these changes only mitigate the issue. As long as it made at least one
byte of progress, the recursion is still an issue. This patch
mitigates the problem, but does not fix the underlying cause. I have
patches that will do that, but it's an intrusive fix that's currently
pending for the next merge window.
The oops generated by KASAN looks something like:
BUG: TASK stack guard page was hit at ffffc9000482ff48 (stack is ffffc90004830000..ffffc90004838000)
Oops: stack guard page: 0000 [#1] PREEMPT SMP KASAN NOPTI
...
RIP: 0010:mark_lock+0x25/0xc60 kernel/locking/lockdep.c:4686
...
mark_usage kernel/locking/lockdep.c:4646 [inline]
__lock_acquire+0x906/0x3ce0 kernel/locking/lockdep.c:5156
lock_acquire.part.0+0x11b/0x380 kernel/locking/lockdep.c:5825
local_lock_acquire include/linux/local_lock_internal.h:29 [inline]
___slab_alloc+0x123/0x1880 mm/slub.c:3695
__slab_alloc.constprop.0+0x56/0xb0 mm/slub.c:3908
__slab_alloc_node mm/slub.c:3961 [inline]
slab_alloc_node mm/slub.c:4122 [inline]
kmem_cache_alloc_noprof+0x2a7/0x2f0 mm/slub.c:4141
radix_tree_node_alloc.constprop.0+0x1e8/0x350 lib/radix-tree.c:253
idr_get_free+0x528/0xa40 lib/radix-tree.c:1506
idr_alloc_u32+0x191/0x2f0 lib/idr.c:46
idr_alloc+0xc1/0x130 lib/idr.c:87
p9_tag_alloc+0x394/0x870 net/9p/client.c:321
p9_client_prepare_req+0x19f/0x4d0 net/9p/client.c:644
p9_client_zc_rpc.constprop.0+0x105/0x880 net/9p/client.c:793
p9_client_read_once+0x443/0x820 net/9p/client.c:1570
p9_client_read+0x13f/0x1b0 net/9p/client.c:1534
v9fs_issue_read+0x115/0x310 fs/9p/vfs_addr.c:74
netfs_retry_read_subrequests fs/netfs/read_retry.c:60 [inline]
netfs_retry_reads+0x153a/0x1d00 fs/netfs/read_retry.c:232
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
...
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_dispatch_unbuffered_reads fs/netfs/direct_read.c:103 [inline]
netfs_unbuffered_read fs/netfs/direct_read.c:127 [inline]
netfs_unbuffered_read_iter_locked+0x12f6/0x19b0 fs/netfs/direct_read.c:221
netfs_unbuffered_read_iter+0xc5/0x100 fs/netfs/direct_read.c:256
v9fs_file_read_iter+0xbf/0x100 fs/9p/vfs_file.c:361
do_iter_readv_writev+0x614/0x7f0 fs/read_write.c:832
vfs_readv+0x4cf/0x890 fs/read_write.c:1025
do_preadv fs/read_write.c:1142 [inline]
__do_sys_preadv fs/read_write.c:1192 [inline]
__se_sys_preadv fs/read_write.c:1187 [inline]
__x64_sys_preadv+0x22d/0x310 fs/read_write.c:1187
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0xcd/0x250 arch/x86/entry/common.c:83
Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
Closes: https://syzkaller.appspot.com/bug?extid=1fc6f64c40a9d143cfb6
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241108034020.3695718-1-lizhi.xu@windriver.com/ [1]
Link: https://lore.kernel.org/r/20241213135013.2964079-9-dhowells@redhat.com
Tested-by: syzbot+885c03ad650731743489@syzkaller.appspotmail.com
Suggested-by: Lizhi Xu <lizhi.xu@windriver.com>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Jeff Layton <jlayton@kernel.org>
cc: v9fs@lists.linux.dev
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Reported-by: syzbot+885c03ad650731743489@syzkaller.appspotmail.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-12-13 13:50:08 +00:00
|
|
|
if (len > 0)
|
|
|
|
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
2025-05-19 10:07:03 +01:00
|
|
|
netfs_write_subrequest_terminated(subreq, len ?: err);
|
2024-03-18 20:29:53 +00:00
|
|
|
}
|
|
|
|
|
2006-01-18 17:43:02 -08:00
|
|
|
/**
|
2022-02-17 10:14:32 +00:00
|
|
|
* v9fs_issue_read - Issue a read from 9P
|
2021-11-02 08:29:55 +00:00
|
|
|
* @subreq: The read to make
|
2006-01-18 17:43:02 -08:00
|
|
|
*/
|
2022-02-17 10:14:32 +00:00
|
|
|
static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
|
2006-01-18 17:43:02 -08:00
|
|
|
{
|
2022-02-17 10:01:23 +00:00
|
|
|
struct netfs_io_request *rreq = subreq->rreq;
|
2021-11-02 08:29:55 +00:00
|
|
|
struct p9_fid *fid = rreq->netfs_priv;
|
2024-07-02 00:40:22 +01:00
|
|
|
unsigned long long pos = subreq->start + subreq->transferred;
|
2021-11-02 08:29:55 +00:00
|
|
|
int total, err;
|
2007-02-11 13:21:39 -06:00
|
|
|
|
2024-07-02 00:40:22 +01:00
|
|
|
total = p9_client_read(fid, pos, &subreq->io_iter, &err);
|
2022-01-10 20:10:31 +09:00
|
|
|
|
|
|
|
/* if we just extended the file size, any portion not in
|
|
|
|
* cache won't be on server and is zeroes */
|
2025-05-23 08:57:52 +01:00
|
|
|
if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
|
|
|
|
subreq->rreq->origin != NETFS_DIO_READ)
|
2024-08-08 14:29:38 +01:00
|
|
|
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
2024-07-02 00:40:22 +01:00
|
|
|
if (pos + total >= i_size_read(rreq->inode))
|
|
|
|
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
|
2024-12-16 20:41:17 +00:00
|
|
|
if (!err && total) {
|
2024-07-02 00:40:22 +01:00
|
|
|
subreq->transferred += total;
|
netfs: Work around recursion by abandoning retry if nothing read
syzkaller reported recursion with a loop of three calls (netfs_rreq_assess,
netfs_retry_reads and netfs_rreq_terminated) hitting the limit of the stack
during an unbuffered or direct I/O read.
There are a number of issues:
(1) There is no limit on the number of retries.
(2) A subrequest is supposed to be abandoned if it does not transfer
anything (NETFS_SREQ_NO_PROGRESS), but that isn't checked under all
circumstances.
(3) The actual root cause, which is this:
if (atomic_dec_and_test(&rreq->nr_outstanding))
netfs_rreq_terminated(rreq, ...);
When we do a retry, we bump the rreq->nr_outstanding counter to
prevent the final cleanup phase running before we've finished
dispatching the retries. The problem is if we hit 0, we have to do
the cleanup phase - but we're in the cleanup phase and end up
repeating the retry cycle, hence the recursion.
Work around the problem by limiting the number of retries. This is based
on Lizhi Xu's patch[1], and makes the following changes:
(1) Replace NETFS_SREQ_NO_PROGRESS with NETFS_SREQ_MADE_PROGRESS and make
the filesystem set it if it managed to read or write at least one byte
of data. Clear this bit before issuing a subrequest.
(2) Add a ->retry_count member to the subrequest and increment it any time
we do a retry.
(3) Remove the NETFS_SREQ_RETRYING flag as it is superfluous with
->retry_count. If the latter is non-zero, we're doing a retry.
(4) Abandon a subrequest if retry_count is non-zero and we made no
progress.
(5) Use ->retry_count in both the write-side and the read-size.
[?] Question: Should I set a hard limit on retry_count in both read and
write? Say it hits 50, we always abandon it. The problem is that
these changes only mitigate the issue. As long as it made at least one
byte of progress, the recursion is still an issue. This patch
mitigates the problem, but does not fix the underlying cause. I have
patches that will do that, but it's an intrusive fix that's currently
pending for the next merge window.
The oops generated by KASAN looks something like:
BUG: TASK stack guard page was hit at ffffc9000482ff48 (stack is ffffc90004830000..ffffc90004838000)
Oops: stack guard page: 0000 [#1] PREEMPT SMP KASAN NOPTI
...
RIP: 0010:mark_lock+0x25/0xc60 kernel/locking/lockdep.c:4686
...
mark_usage kernel/locking/lockdep.c:4646 [inline]
__lock_acquire+0x906/0x3ce0 kernel/locking/lockdep.c:5156
lock_acquire.part.0+0x11b/0x380 kernel/locking/lockdep.c:5825
local_lock_acquire include/linux/local_lock_internal.h:29 [inline]
___slab_alloc+0x123/0x1880 mm/slub.c:3695
__slab_alloc.constprop.0+0x56/0xb0 mm/slub.c:3908
__slab_alloc_node mm/slub.c:3961 [inline]
slab_alloc_node mm/slub.c:4122 [inline]
kmem_cache_alloc_noprof+0x2a7/0x2f0 mm/slub.c:4141
radix_tree_node_alloc.constprop.0+0x1e8/0x350 lib/radix-tree.c:253
idr_get_free+0x528/0xa40 lib/radix-tree.c:1506
idr_alloc_u32+0x191/0x2f0 lib/idr.c:46
idr_alloc+0xc1/0x130 lib/idr.c:87
p9_tag_alloc+0x394/0x870 net/9p/client.c:321
p9_client_prepare_req+0x19f/0x4d0 net/9p/client.c:644
p9_client_zc_rpc.constprop.0+0x105/0x880 net/9p/client.c:793
p9_client_read_once+0x443/0x820 net/9p/client.c:1570
p9_client_read+0x13f/0x1b0 net/9p/client.c:1534
v9fs_issue_read+0x115/0x310 fs/9p/vfs_addr.c:74
netfs_retry_read_subrequests fs/netfs/read_retry.c:60 [inline]
netfs_retry_reads+0x153a/0x1d00 fs/netfs/read_retry.c:232
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
...
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_dispatch_unbuffered_reads fs/netfs/direct_read.c:103 [inline]
netfs_unbuffered_read fs/netfs/direct_read.c:127 [inline]
netfs_unbuffered_read_iter_locked+0x12f6/0x19b0 fs/netfs/direct_read.c:221
netfs_unbuffered_read_iter+0xc5/0x100 fs/netfs/direct_read.c:256
v9fs_file_read_iter+0xbf/0x100 fs/9p/vfs_file.c:361
do_iter_readv_writev+0x614/0x7f0 fs/read_write.c:832
vfs_readv+0x4cf/0x890 fs/read_write.c:1025
do_preadv fs/read_write.c:1142 [inline]
__do_sys_preadv fs/read_write.c:1192 [inline]
__se_sys_preadv fs/read_write.c:1187 [inline]
__x64_sys_preadv+0x22d/0x310 fs/read_write.c:1187
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0xcd/0x250 arch/x86/entry/common.c:83
Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
Closes: https://syzkaller.appspot.com/bug?extid=1fc6f64c40a9d143cfb6
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241108034020.3695718-1-lizhi.xu@windriver.com/ [1]
Link: https://lore.kernel.org/r/20241213135013.2964079-9-dhowells@redhat.com
Tested-by: syzbot+885c03ad650731743489@syzkaller.appspotmail.com
Suggested-by: Lizhi Xu <lizhi.xu@windriver.com>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Jeff Layton <jlayton@kernel.org>
cc: v9fs@lists.linux.dev
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Reported-by: syzbot+885c03ad650731743489@syzkaller.appspotmail.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-12-13 13:50:08 +00:00
|
|
|
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
|
|
|
}
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:40:58 +00:00
|
|
|
subreq->error = err;
|
2024-12-16 20:40:59 +00:00
|
|
|
netfs_read_subreq_terminated(subreq);
|
2021-11-02 08:29:55 +00:00
|
|
|
}
|
2009-09-23 13:00:27 -05:00
|
|
|
|
2021-11-02 08:29:55 +00:00
|
|
|
/**
|
2023-12-06 12:48:56 +00:00
|
|
|
* v9fs_init_request - Initialise a request
|
2021-11-02 08:29:55 +00:00
|
|
|
* @rreq: The read request
|
|
|
|
* @file: The file being read from
|
|
|
|
*/
|
2022-01-20 21:55:46 +00:00
|
|
|
static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
|
2021-11-02 08:29:55 +00:00
|
|
|
{
|
2023-12-06 12:48:56 +00:00
|
|
|
struct p9_fid *fid;
|
|
|
|
bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
|
|
|
|
rreq->origin == NETFS_WRITETHROUGH ||
|
|
|
|
rreq->origin == NETFS_UNBUFFERED_WRITE ||
|
|
|
|
rreq->origin == NETFS_DIO_WRITE);
|
|
|
|
|
2024-03-18 20:29:53 +00:00
|
|
|
if (rreq->origin == NETFS_WRITEBACK)
|
|
|
|
return 0; /* We don't get the write handle until we find we
|
|
|
|
* have actually dirty data and not just
|
|
|
|
* copy-to-cache data.
|
|
|
|
*/
|
|
|
|
|
2023-12-06 12:48:56 +00:00
|
|
|
if (file) {
|
|
|
|
fid = file->private_data;
|
2024-01-03 12:08:46 +00:00
|
|
|
if (!fid)
|
|
|
|
goto no_fid;
|
2023-12-06 12:48:56 +00:00
|
|
|
p9_fid_get(fid);
|
|
|
|
} else {
|
|
|
|
fid = v9fs_fid_find_inode(rreq->inode, writing, INVALID_UID, true);
|
2024-01-03 12:08:46 +00:00
|
|
|
if (!fid)
|
|
|
|
goto no_fid;
|
2023-12-06 12:48:56 +00:00
|
|
|
}
|
2022-06-14 12:19:02 +09:00
|
|
|
|
2024-03-18 20:29:53 +00:00
|
|
|
rreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
|
|
|
|
if (fid->iounit)
|
|
|
|
rreq->wsize = min(rreq->wsize, fid->iounit);
|
|
|
|
|
2022-06-14 12:19:02 +09:00
|
|
|
/* we might need to read from a fid that was opened write-only
|
|
|
|
* for read-modify-write of page cache, use the writeback fid
|
|
|
|
* for that */
|
2023-12-06 12:48:56 +00:00
|
|
|
WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE && !(fid->mode & P9_ORDWR));
|
2021-11-02 08:29:55 +00:00
|
|
|
rreq->netfs_priv = fid;
|
2022-01-20 21:55:46 +00:00
|
|
|
return 0;
|
2024-01-03 12:08:46 +00:00
|
|
|
|
|
|
|
no_fid:
|
|
|
|
WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
|
|
|
|
rreq->inode->i_ino);
|
|
|
|
return -EINVAL;
|
2021-11-02 08:29:55 +00:00
|
|
|
}
|
2006-01-18 17:43:02 -08:00
|
|
|
|
2021-11-02 08:29:55 +00:00
|
|
|
/**
|
2022-02-25 11:19:14 +00:00
|
|
|
* v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
|
|
|
|
* @rreq: The I/O request to clean up
|
2021-11-02 08:29:55 +00:00
|
|
|
*/
|
2022-02-25 11:19:14 +00:00
|
|
|
static void v9fs_free_request(struct netfs_io_request *rreq)
|
2021-11-02 08:29:55 +00:00
|
|
|
{
|
2022-02-25 11:19:14 +00:00
|
|
|
struct p9_fid *fid = rreq->netfs_priv;
|
2006-01-18 17:43:02 -08:00
|
|
|
|
2022-06-12 13:42:32 +09:00
|
|
|
p9_fid_put(fid);
|
2021-11-02 08:29:55 +00:00
|
|
|
}
|
2009-09-23 13:00:27 -05:00
|
|
|
|
2021-06-29 22:37:05 +01:00
|
|
|
const struct netfs_request_ops v9fs_req_ops = {
|
2022-02-17 10:01:23 +00:00
|
|
|
.init_request = v9fs_init_request,
|
2022-02-25 11:19:14 +00:00
|
|
|
.free_request = v9fs_free_request,
|
2022-02-17 10:14:32 +00:00
|
|
|
.issue_read = v9fs_issue_read,
|
2024-03-18 20:29:53 +00:00
|
|
|
.begin_writeback = v9fs_begin_writeback,
|
|
|
|
.issue_write = v9fs_issue_write,
|
2021-11-02 08:29:55 +00:00
|
|
|
};
|
|
|
|
|
2006-06-28 04:26:44 -07:00
|
|
|
const struct address_space_operations v9fs_addr_operations = {
|
2023-12-06 12:48:56 +00:00
|
|
|
.read_folio = netfs_read_folio,
|
|
|
|
.readahead = netfs_readahead,
|
|
|
|
.dirty_folio = netfs_dirty_folio,
|
|
|
|
.release_folio = netfs_release_folio,
|
|
|
|
.invalidate_folio = netfs_invalidate_folio,
|
|
|
|
.direct_IO = noop_direct_IO,
|
|
|
|
.writepages = netfs_writepages,
|
2025-04-02 15:59:55 +01:00
|
|
|
.migrate_folio = filemap_migrate_folio,
|
2006-01-18 17:43:02 -08:00
|
|
|
};
|