netfs: Fix enomem handling in buffered reads

If netfs_read_to_pagecache() gets an error from either ->prepare_read() or
from netfs_prepare_read_iterator(), it needs to decrement ->nr_outstanding,
cancel the subrequest and break out of the issuing loop.  Currently, it
only does this for two of the cases, but there are two more that aren't
handled.

Fix this by moving the handling to a common place and jumping to it from
all four places.  This is in preference to inserting a wrapper around
netfs_prepare_read_iterator() as proposed by Dmitry Antipov[1].

Link: https://lore.kernel.org/r/20241202093943.227786-1-dmantipov@yandex.ru/ [1]

Fixes: ee4cdf7ba8 ("netfs: Speed up buffered reading")
Reported-by: syzbot+404b4b745080b6210c6c@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=404b4b745080b6210c6c
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241213135013.2964079-4-dhowells@redhat.com
Tested-by: syzbot+404b4b745080b6210c6c@syzkaller.appspotmail.com
cc: Dmitry Antipov <dmantipov@yandex.ru>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
David Howells 2024-12-13 13:50:03 +00:00 committed by Christian Brauner
parent c8b90d40d5
commit 105549d09a
No known key found for this signature in database
GPG key ID: 91C61BC06578DCA2

View file

@ -275,22 +275,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
if (ret < 0) {
atomic_dec(&rreq->nr_outstanding);
netfs_put_subrequest(subreq, false,
netfs_sreq_trace_put_cancel);
break;
}
if (ret < 0)
goto prep_failed;
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
}
slice = netfs_prepare_read_iterator(subreq);
if (slice < 0) {
atomic_dec(&rreq->nr_outstanding);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
ret = slice;
break;
}
if (slice < 0)
goto prep_iter_failed;
rreq->netfs_ops->issue_read(subreq);
goto done;
@ -302,6 +294,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
netfs_stat(&netfs_n_rh_zero);
slice = netfs_prepare_read_iterator(subreq);
if (slice < 0)
goto prep_iter_failed;
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_read_subreq_terminated(subreq, 0, false);
goto done;
@ -310,6 +304,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
if (source == NETFS_READ_FROM_CACHE) {
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
slice = netfs_prepare_read_iterator(subreq);
if (slice < 0)
goto prep_iter_failed;
netfs_read_cache_to_pagecache(rreq, subreq);
goto done;
}
@ -318,6 +314,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
WARN_ON_ONCE(1);
break;
prep_iter_failed:
ret = slice;
prep_failed:
subreq->error = ret;
atomic_dec(&rreq->nr_outstanding);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
break;
done:
size -= slice;
start += slice;