2024-07-02 00:40:22 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Network filesystem read subrequest result collection, assessment and
|
|
|
|
* retrying.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/task_io_accounting_ops.h>
|
|
|
|
#include "internal.h"
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
/* Notes made in the collector */
|
|
|
|
#define HIT_PENDING 0x01 /* A front op was still pending */
|
|
|
|
#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
|
|
|
|
#define BUFFERED 0x08 /* The pagecache needs cleaning up */
|
|
|
|
#define NEED_RETRY 0x10 /* A front op requests retrying */
|
|
|
|
#define COPY_TO_CACHE 0x40 /* Need to copy subrequest to cache */
|
|
|
|
#define ABANDON_SREQ 0x80 /* Need to abandon untransferred part of subrequest */
|
|
|
|
|
2024-07-02 00:40:22 +01:00
|
|
|
/*
|
|
|
|
* Clear the unread part of an I/O request.
|
|
|
|
*/
|
|
|
|
static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
|
|
|
|
{
|
|
|
|
netfs_reset_iter(subreq);
|
|
|
|
WARN_ON_ONCE(subreq->len - subreq->transferred != iov_iter_count(&subreq->io_iter));
|
|
|
|
iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
|
|
|
|
if (subreq->start + subreq->transferred >= subreq->rreq->i_size)
|
|
|
|
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush, mark and unlock a folio that's now completely read. If we want to
|
|
|
|
* cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it
|
|
|
|
* dirty and let writeback handle it.
|
|
|
|
*/
|
2024-12-16 20:41:17 +00:00
|
|
|
static void netfs_unlock_read_folio(struct netfs_io_request *rreq,
|
2024-07-02 00:40:22 +01:00
|
|
|
struct folio_queue *folioq,
|
|
|
|
int slot)
|
|
|
|
{
|
|
|
|
struct netfs_folio *finfo;
|
|
|
|
struct folio *folio = folioq_folio(folioq, slot);
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
if (unlikely(folio_pos(folio) < rreq->abandon_to)) {
|
|
|
|
trace_netfs_folio(folio, netfs_folio_trace_abandon);
|
|
|
|
goto just_unlock;
|
|
|
|
}
|
|
|
|
|
2024-07-02 00:40:22 +01:00
|
|
|
flush_dcache_folio(folio);
|
|
|
|
folio_mark_uptodate(folio);
|
|
|
|
|
|
|
|
if (!test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
|
|
|
|
finfo = netfs_folio_info(folio);
|
|
|
|
if (finfo) {
|
|
|
|
trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
|
|
|
|
if (finfo->netfs_group)
|
|
|
|
folio_change_private(folio, finfo->netfs_group);
|
|
|
|
else
|
|
|
|
folio_detach_private(folio);
|
|
|
|
kfree(finfo);
|
|
|
|
}
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags)) {
|
2024-07-02 00:40:22 +01:00
|
|
|
if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
|
|
|
|
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
|
|
|
|
folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
|
|
|
|
folio_mark_dirty(folio);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
trace_netfs_folio(folio, netfs_folio_trace_read_done);
|
|
|
|
}
|
2024-12-13 13:50:09 +00:00
|
|
|
|
|
|
|
folioq_clear(folioq, slot);
|
2024-07-02 00:40:22 +01:00
|
|
|
} else {
|
|
|
|
// TODO: Use of PG_private_2 is deprecated.
|
2024-12-16 20:41:17 +00:00
|
|
|
if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags))
|
|
|
|
netfs_pgpriv2_copy_to_cache(rreq, folio);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
just_unlock:
|
2025-05-19 14:48:06 +01:00
|
|
|
if (folio->index == rreq->no_unlock_folio &&
|
|
|
|
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
|
|
|
|
_debug("no unlock");
|
|
|
|
} else {
|
|
|
|
trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
|
|
|
|
folio_unlock(folio);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
2024-12-16 20:41:17 +00:00
|
|
|
|
|
|
|
folioq_clear(folioq, slot);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2024-12-16 20:41:17 +00:00
|
|
|
* Unlock any folios we've finished with.
|
2024-07-02 00:40:22 +01:00
|
|
|
*/
|
2024-12-16 20:41:17 +00:00
|
|
|
static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
|
|
|
|
unsigned int *notes)
|
2024-07-02 00:40:22 +01:00
|
|
|
{
|
2024-12-16 20:41:17 +00:00
|
|
|
struct folio_queue *folioq = rreq->buffer.tail;
|
|
|
|
unsigned long long collected_to = rreq->collected_to;
|
|
|
|
unsigned int slot = rreq->buffer.first_tail_slot;
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
if (rreq->cleaned_to >= rreq->collected_to)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// TODO: Begin decryption
|
|
|
|
|
|
|
|
if (slot >= folioq_nr_slots(folioq)) {
|
|
|
|
folioq = rolling_buffer_delete_spent(&rreq->buffer);
|
|
|
|
if (!folioq) {
|
|
|
|
rreq->front_folio_order = 0;
|
|
|
|
return;
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
2024-12-16 20:41:17 +00:00
|
|
|
slot = 0;
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
for (;;) {
|
|
|
|
struct folio *folio;
|
|
|
|
unsigned long long fpos, fend;
|
|
|
|
unsigned int order;
|
|
|
|
size_t fsize;
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
if (*notes & COPY_TO_CACHE)
|
|
|
|
set_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
folio = folioq_folio(folioq, slot);
|
|
|
|
if (WARN_ONCE(!folio_test_locked(folio),
|
|
|
|
"R=%08x: folio %lx is not locked\n",
|
|
|
|
rreq->debug_id, folio->index))
|
|
|
|
trace_netfs_folio(folio, netfs_folio_trace_not_locked);
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
order = folioq_folio_order(folioq, slot);
|
|
|
|
rreq->front_folio_order = order;
|
|
|
|
fsize = PAGE_SIZE << order;
|
|
|
|
fpos = folio_pos(folio);
|
|
|
|
fend = umin(fpos + fsize, rreq->i_size);
|
|
|
|
|
|
|
|
trace_netfs_collect_folio(rreq, folio, fend, collected_to);
|
|
|
|
|
|
|
|
/* Unlock any folio we've transferred all of. */
|
|
|
|
if (collected_to < fend)
|
|
|
|
break;
|
|
|
|
|
|
|
|
netfs_unlock_read_folio(rreq, folioq, slot);
|
|
|
|
WRITE_ONCE(rreq->cleaned_to, fpos + fsize);
|
|
|
|
*notes |= MADE_PROGRESS;
|
|
|
|
|
|
|
|
clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
|
|
|
|
|
|
|
|
/* Clean up the head folioq. If we clear an entire folioq, then
|
|
|
|
* we can get rid of it provided it's not also the tail folioq
|
|
|
|
* being filled by the issuer.
|
|
|
|
*/
|
|
|
|
folioq_clear(folioq, slot);
|
2024-07-02 00:40:22 +01:00
|
|
|
slot++;
|
|
|
|
if (slot >= folioq_nr_slots(folioq)) {
|
2024-12-16 20:41:17 +00:00
|
|
|
folioq = rolling_buffer_delete_spent(&rreq->buffer);
|
|
|
|
if (!folioq)
|
|
|
|
goto done;
|
2024-07-02 00:40:22 +01:00
|
|
|
slot = 0;
|
2024-12-16 20:40:54 +00:00
|
|
|
trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
2024-12-16 20:41:17 +00:00
|
|
|
|
|
|
|
if (fpos + fsize >= collected_to)
|
|
|
|
break;
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
rreq->buffer.tail = folioq;
|
|
|
|
done:
|
|
|
|
rreq->buffer.first_tail_slot = slot;
|
|
|
|
}
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
/*
|
|
|
|
* Collect and assess the results of various read subrequests. We may need to
|
|
|
|
* retry some of the results.
|
|
|
|
*
|
|
|
|
* Note that we have a sequence of subrequests, which may be drawing on
|
|
|
|
* different sources and may or may not be the same size or starting position
|
|
|
|
* and may not even correspond in boundary alignment.
|
|
|
|
*/
|
|
|
|
static void netfs_collect_read_results(struct netfs_io_request *rreq)
|
|
|
|
{
|
|
|
|
struct netfs_io_subrequest *front, *remove;
|
|
|
|
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
|
|
|
unsigned int notes;
|
|
|
|
|
|
|
|
_enter("%llx-%llx", rreq->start, rreq->start + rreq->len);
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_collect);
|
|
|
|
trace_netfs_collect(rreq);
|
|
|
|
|
|
|
|
reassess:
|
|
|
|
if (rreq->origin == NETFS_READAHEAD ||
|
|
|
|
rreq->origin == NETFS_READPAGE ||
|
|
|
|
rreq->origin == NETFS_READ_FOR_WRITE)
|
|
|
|
notes = BUFFERED;
|
|
|
|
else
|
|
|
|
notes = 0;
|
|
|
|
|
|
|
|
/* Remove completed subrequests from the front of the stream and
|
|
|
|
* advance the completion point. We stop when we hit something that's
|
|
|
|
* in progress. The issuer thread may be adding stuff to the tail
|
|
|
|
* whilst we're doing this.
|
2024-07-02 00:40:22 +01:00
|
|
|
*/
|
2024-12-16 20:41:17 +00:00
|
|
|
front = READ_ONCE(stream->front);
|
|
|
|
while (front) {
|
|
|
|
size_t transferred;
|
|
|
|
|
|
|
|
trace_netfs_collect_sreq(rreq, front);
|
|
|
|
_debug("sreq [%x] %llx %zx/%zx",
|
|
|
|
front->debug_index, front->start, front->transferred, front->len);
|
|
|
|
|
|
|
|
if (stream->collected_to < front->start) {
|
|
|
|
trace_netfs_collect_gap(rreq, stream, front->start, 'F');
|
|
|
|
stream->collected_to = front->start;
|
|
|
|
}
|
|
|
|
|
2025-07-01 17:38:38 +01:00
|
|
|
if (netfs_check_subreq_in_progress(front))
|
2024-12-16 20:41:17 +00:00
|
|
|
notes |= HIT_PENDING;
|
|
|
|
smp_rmb(); /* Read counters after IN_PROGRESS flag. */
|
|
|
|
transferred = READ_ONCE(front->transferred);
|
|
|
|
|
|
|
|
/* If we can now collect the next folio, do so. We don't want
|
|
|
|
* to defer this as we have to decide whether we need to copy
|
|
|
|
* to the cache or not, and that may differ between adjacent
|
|
|
|
* subreqs.
|
|
|
|
*/
|
|
|
|
if (notes & BUFFERED) {
|
|
|
|
size_t fsize = PAGE_SIZE << rreq->front_folio_order;
|
|
|
|
|
|
|
|
/* Clear the tail of a short read. */
|
|
|
|
if (!(notes & HIT_PENDING) &&
|
|
|
|
front->error == 0 &&
|
|
|
|
transferred < front->len &&
|
|
|
|
(test_bit(NETFS_SREQ_HIT_EOF, &front->flags) ||
|
|
|
|
test_bit(NETFS_SREQ_CLEAR_TAIL, &front->flags))) {
|
|
|
|
netfs_clear_unread(front);
|
|
|
|
transferred = front->transferred = front->len;
|
|
|
|
trace_netfs_sreq(front, netfs_sreq_trace_clear);
|
|
|
|
}
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
stream->collected_to = front->start + transferred;
|
|
|
|
rreq->collected_to = stream->collected_to;
|
|
|
|
|
|
|
|
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &front->flags))
|
|
|
|
notes |= COPY_TO_CACHE;
|
|
|
|
|
|
|
|
if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
|
|
|
|
rreq->abandon_to = front->start + front->len;
|
|
|
|
front->transferred = front->len;
|
|
|
|
transferred = front->len;
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_set_abandon);
|
|
|
|
}
|
|
|
|
if (front->start + transferred >= rreq->cleaned_to + fsize ||
|
|
|
|
test_bit(NETFS_SREQ_HIT_EOF, &front->flags))
|
|
|
|
netfs_read_unlock_folios(rreq, ¬es);
|
|
|
|
} else {
|
|
|
|
stream->collected_to = front->start + transferred;
|
|
|
|
rreq->collected_to = stream->collected_to;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stall if the front is still undergoing I/O. */
|
|
|
|
if (notes & HIT_PENDING)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
|
|
|
|
if (!stream->failed) {
|
|
|
|
stream->error = front->error;
|
|
|
|
rreq->error = front->error;
|
|
|
|
set_bit(NETFS_RREQ_FAILED, &rreq->flags);
|
|
|
|
stream->failed = true;
|
|
|
|
}
|
|
|
|
notes |= MADE_PROGRESS | ABANDON_SREQ;
|
|
|
|
} else if (test_bit(NETFS_SREQ_NEED_RETRY, &front->flags)) {
|
|
|
|
stream->need_retry = true;
|
|
|
|
notes |= NEED_RETRY | MADE_PROGRESS;
|
|
|
|
break;
|
netfs: Fix setting of transferred bytes with short DIO reads
A netfslib request comprises an ordered stream of subrequests that, when
doing an unbuffered/DIO read, are contiguous. The subrequests may be
performed in parallel, but may not be fully completed.
For instance, if we try and make a 256KiB DIO read from a 3-byte file with
a 64KiB rsize and 256KiB bsize, netfslib will attempt to make a read of
256KiB, broken up into four 64KiB subreads, with the expectation that the
first will be short and the subsequent three be completely devoid - but we
do all four on the basis that the file may have been changed by a third
party.
The read-collection code, however, walks through all the subreqs and
advances the notion of how much data has been read in the stream to the
start of each subreq plus its amount transferred (which are 3, 0, 0, 0 for
the example above) - which gives an amount apparently read of 3*64KiB -
which is incorrect.
Fix the collection code to cut short the calculation of the transferred
amount with the first short subrequest in an unbuffered read; everything
beyond that must be ignored as there's a hole that cannot be filled. This
applies both to shortness due to hitting the EOF and shortness due to an
error.
This is achieved by setting a flag on the request when we collect the first
short subrequest (collection is done in ascending order).
This can be tested by mounting a cifs volume with rsize=65536,bsize=262144
and doing a 256k DIO read of a very small file (e.g. 3 bytes). read()
should return 3, not >3.
This problem came in when netfs_read_collection() set rreq->transferred to
stream->transferred, even for DIO. Prior to that, netfs_rreq_assess_dio()
just went over the list and added up the subreqs till it met a short one -
but now the subreqs are discarded earlier.
Fixes: e2d46f2ec332 ("netfs: Change the read result collector to only use one work item")
Reported-by: Nicolas Baranger <nicolas.baranger@3xo.fr>
Closes: https://lore.kernel.org/all/10bec2430ed4df68bde10ed95295d093@3xo.fr/
Signed-off-by: "Paulo Alcantara (Red Hat)" <pc@manguebit.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/20250519090707.2848510-3-dhowells@redhat.com
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-05-19 10:07:02 +01:00
|
|
|
} else if (test_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags)) {
|
|
|
|
notes |= MADE_PROGRESS;
|
2024-12-16 20:41:17 +00:00
|
|
|
} else {
|
|
|
|
if (!stream->failed)
|
netfs: Fix setting of transferred bytes with short DIO reads
A netfslib request comprises an ordered stream of subrequests that, when
doing an unbuffered/DIO read, are contiguous. The subrequests may be
performed in parallel, but may not be fully completed.
For instance, if we try and make a 256KiB DIO read from a 3-byte file with
a 64KiB rsize and 256KiB bsize, netfslib will attempt to make a read of
256KiB, broken up into four 64KiB subreads, with the expectation that the
first will be short and the subsequent three be completely devoid - but we
do all four on the basis that the file may have been changed by a third
party.
The read-collection code, however, walks through all the subreqs and
advances the notion of how much data has been read in the stream to the
start of each subreq plus its amount transferred (which are 3, 0, 0, 0 for
the example above) - which gives an amount apparently read of 3*64KiB -
which is incorrect.
Fix the collection code to cut short the calculation of the transferred
amount with the first short subrequest in an unbuffered read; everything
beyond that must be ignored as there's a hole that cannot be filled. This
applies both to shortness due to hitting the EOF and shortness due to an
error.
This is achieved by setting a flag on the request when we collect the first
short subrequest (collection is done in ascending order).
This can be tested by mounting a cifs volume with rsize=65536,bsize=262144
and doing a 256k DIO read of a very small file (e.g. 3 bytes). read()
should return 3, not >3.
This problem came in when netfs_read_collection() set rreq->transferred to
stream->transferred, even for DIO. Prior to that, netfs_rreq_assess_dio()
just went over the list and added up the subreqs till it met a short one -
but now the subreqs are discarded earlier.
Fixes: e2d46f2ec332 ("netfs: Change the read result collector to only use one work item")
Reported-by: Nicolas Baranger <nicolas.baranger@3xo.fr>
Closes: https://lore.kernel.org/all/10bec2430ed4df68bde10ed95295d093@3xo.fr/
Signed-off-by: "Paulo Alcantara (Red Hat)" <pc@manguebit.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/20250519090707.2848510-3-dhowells@redhat.com
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-05-19 10:07:02 +01:00
|
|
|
stream->transferred += transferred;
|
|
|
|
if (front->transferred < front->len)
|
|
|
|
set_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags);
|
2024-12-16 20:41:17 +00:00
|
|
|
notes |= MADE_PROGRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove if completely consumed. */
|
|
|
|
stream->source = front->source;
|
|
|
|
spin_lock(&rreq->lock);
|
|
|
|
|
|
|
|
remove = front;
|
2025-07-01 17:38:48 +01:00
|
|
|
trace_netfs_sreq(front,
|
|
|
|
notes & ABANDON_SREQ ?
|
|
|
|
netfs_sreq_trace_abandoned : netfs_sreq_trace_consumed);
|
2024-12-16 20:41:17 +00:00
|
|
|
list_del_init(&front->rreq_link);
|
|
|
|
front = list_first_entry_or_null(&stream->subrequests,
|
|
|
|
struct netfs_io_subrequest, rreq_link);
|
|
|
|
stream->front = front;
|
2024-12-16 20:41:00 +00:00
|
|
|
spin_unlock(&rreq->lock);
|
2025-05-19 10:07:03 +01:00
|
|
|
netfs_put_subrequest(remove,
|
2024-12-16 20:41:17 +00:00
|
|
|
notes & ABANDON_SREQ ?
|
|
|
|
netfs_sreq_trace_put_abandon :
|
|
|
|
netfs_sreq_trace_put_done);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
trace_netfs_collect_stream(rreq, stream);
|
|
|
|
trace_netfs_collect_state(rreq, rreq->collected_to, notes);
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
if (!(notes & BUFFERED))
|
|
|
|
rreq->cleaned_to = rreq->collected_to;
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
if (notes & NEED_RETRY)
|
|
|
|
goto need_retry;
|
|
|
|
if (notes & MADE_PROGRESS) {
|
2025-05-19 10:07:04 +01:00
|
|
|
netfs_wake_rreq_flag(rreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
|
2024-12-16 20:41:17 +00:00
|
|
|
//cond_resched();
|
|
|
|
goto reassess;
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
2024-12-16 20:41:17 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
_leave(" = %x", notes);
|
|
|
|
return;
|
|
|
|
|
|
|
|
need_retry:
|
|
|
|
/* Okay... We're going to have to retry parts of the stream. Note
|
|
|
|
* that any partially completed op will have had any wholly transferred
|
|
|
|
* folios removed from it.
|
2024-07-02 00:40:22 +01:00
|
|
|
*/
|
2024-12-16 20:41:17 +00:00
|
|
|
_debug("retry");
|
|
|
|
netfs_retry_reads(rreq);
|
|
|
|
goto out;
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do page flushing and suchlike after DIO.
|
|
|
|
*/
|
|
|
|
static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2025-05-23 08:57:52 +01:00
|
|
|
if (rreq->origin == NETFS_UNBUFFERED_READ ||
|
|
|
|
rreq->origin == NETFS_DIO_READ) {
|
2024-07-02 00:40:22 +01:00
|
|
|
for (i = 0; i < rreq->direct_bv_count; i++) {
|
|
|
|
flush_dcache_page(rreq->direct_bv[i].bv_page);
|
|
|
|
// TODO: cifs marks pages in the destination buffer
|
|
|
|
// dirty under some circumstances after a read. Do we
|
|
|
|
// need to do that too?
|
|
|
|
set_page_dirty(rreq->direct_bv[i].bv_page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rreq->iocb) {
|
|
|
|
rreq->iocb->ki_pos += rreq->transferred;
|
2025-07-01 17:38:48 +01:00
|
|
|
if (rreq->iocb->ki_complete) {
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_ki_complete);
|
2024-07-02 00:40:22 +01:00
|
|
|
rreq->iocb->ki_complete(
|
|
|
|
rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
|
2025-07-01 17:38:48 +01:00
|
|
|
}
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
if (rreq->netfs_ops->done)
|
|
|
|
rreq->netfs_ops->done(rreq);
|
2025-05-23 08:57:52 +01:00
|
|
|
if (rreq->origin == NETFS_UNBUFFERED_READ ||
|
|
|
|
rreq->origin == NETFS_DIO_READ)
|
2024-07-02 00:40:22 +01:00
|
|
|
inode_dio_end(rreq->inode);
|
|
|
|
}
|
|
|
|
|
2024-12-16 20:41:09 +00:00
|
|
|
/*
|
|
|
|
* Do processing after reading a monolithic single object.
|
|
|
|
*/
|
|
|
|
static void netfs_rreq_assess_single(struct netfs_io_request *rreq)
|
|
|
|
{
|
|
|
|
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
if (!rreq->error && stream->source == NETFS_DOWNLOAD_FROM_SERVER &&
|
|
|
|
fscache_resources_valid(&rreq->cache_resources)) {
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_dirty);
|
|
|
|
netfs_single_mark_inode_dirty(rreq->inode);
|
2024-12-16 20:41:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rreq->iocb) {
|
|
|
|
rreq->iocb->ki_pos += rreq->transferred;
|
2025-07-01 17:38:48 +01:00
|
|
|
if (rreq->iocb->ki_complete) {
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_ki_complete);
|
2024-12-16 20:41:09 +00:00
|
|
|
rreq->iocb->ki_complete(
|
|
|
|
rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
|
2025-07-01 17:38:48 +01:00
|
|
|
}
|
2024-12-16 20:41:09 +00:00
|
|
|
}
|
|
|
|
if (rreq->netfs_ops->done)
|
|
|
|
rreq->netfs_ops->done(rreq);
|
|
|
|
}
|
|
|
|
|
2024-07-02 00:40:22 +01:00
|
|
|
/*
|
2024-12-16 20:41:17 +00:00
|
|
|
* Perform the collection of subrequests and folios.
|
2024-07-02 00:40:22 +01:00
|
|
|
*
|
|
|
|
* Note that we're in normal kernel thread context at this point, possibly
|
|
|
|
* running on a workqueue.
|
|
|
|
*/
|
2025-05-19 10:07:04 +01:00
|
|
|
bool netfs_read_collection(struct netfs_io_request *rreq)
|
2024-07-02 00:40:22 +01:00
|
|
|
{
|
2024-12-16 20:41:17 +00:00
|
|
|
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
netfs_collect_read_results(rreq);
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
/* We're done when the app thread has finished posting subreqs and the
|
|
|
|
* queue is empty.
|
|
|
|
*/
|
|
|
|
if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
|
2025-05-19 10:07:03 +01:00
|
|
|
return false;
|
2024-12-16 20:41:17 +00:00
|
|
|
smp_rmb(); /* Read ALL_QUEUED before subreq lists. */
|
|
|
|
|
|
|
|
if (!list_empty(&stream->subrequests))
|
2025-05-19 10:07:03 +01:00
|
|
|
return false;
|
2024-12-16 20:41:17 +00:00
|
|
|
|
|
|
|
/* Okay, declare that all I/O is complete. */
|
|
|
|
rreq->transferred = stream->transferred;
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_complete);
|
|
|
|
|
|
|
|
//netfs_rreq_is_still_valid(rreq);
|
2024-07-02 00:40:22 +01:00
|
|
|
|
2024-12-16 20:41:09 +00:00
|
|
|
switch (rreq->origin) {
|
2025-05-23 08:57:52 +01:00
|
|
|
case NETFS_UNBUFFERED_READ:
|
2024-12-16 20:41:09 +00:00
|
|
|
case NETFS_DIO_READ:
|
|
|
|
case NETFS_READ_GAPS:
|
2024-07-02 00:40:22 +01:00
|
|
|
netfs_rreq_assess_dio(rreq);
|
2024-12-16 20:41:09 +00:00
|
|
|
break;
|
|
|
|
case NETFS_READ_SINGLE:
|
|
|
|
netfs_rreq_assess_single(rreq);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2024-07-02 00:40:22 +01:00
|
|
|
task_io_account_read(rreq->transferred);
|
|
|
|
|
2025-05-19 10:07:04 +01:00
|
|
|
netfs_wake_rreq_flag(rreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
|
2025-05-19 10:07:03 +01:00
|
|
|
/* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
|
2024-07-02 00:40:22 +01:00
|
|
|
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_done);
|
2025-05-19 10:07:03 +01:00
|
|
|
netfs_clear_subrequests(rreq);
|
2024-07-02 00:40:22 +01:00
|
|
|
netfs_unlock_abandoned_read_pages(rreq);
|
2024-12-16 20:41:17 +00:00
|
|
|
if (unlikely(rreq->copy_to_cache))
|
|
|
|
netfs_pgpriv2_end_copy_to_cache(rreq);
|
2025-05-19 10:07:03 +01:00
|
|
|
return true;
|
2024-12-16 20:41:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void netfs_read_collection_worker(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
|
|
|
|
|
|
|
|
netfs_see_request(rreq, netfs_rreq_trace_see_work);
|
2025-07-01 17:38:38 +01:00
|
|
|
if (netfs_check_rreq_in_progress(rreq)) {
|
2025-05-19 10:07:03 +01:00
|
|
|
if (netfs_read_collection(rreq))
|
|
|
|
/* Drop the ref from the IN_PROGRESS flag. */
|
|
|
|
netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
|
|
|
|
else
|
|
|
|
netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
|
2024-12-16 20:41:17 +00:00
|
|
|
}
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* netfs_read_subreq_progress - Note progress of a read operation.
|
|
|
|
* @subreq: The read request that has terminated.
|
|
|
|
*
|
|
|
|
* This tells the read side of netfs lib that a contributory I/O operation has
|
|
|
|
* made some progress and that it may be possible to unlock some folios.
|
|
|
|
*
|
|
|
|
* Before calling, the filesystem should update subreq->transferred to track
|
|
|
|
* the amount of data copied into the output buffer.
|
|
|
|
*/
|
2024-12-16 20:40:59 +00:00
|
|
|
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq)
|
2024-07-02 00:40:22 +01:00
|
|
|
{
|
|
|
|
struct netfs_io_request *rreq = subreq->rreq;
|
2024-12-16 20:41:17 +00:00
|
|
|
struct netfs_io_stream *stream = &rreq->io_streams[0];
|
|
|
|
size_t fsize = PAGE_SIZE << rreq->front_folio_order;
|
2024-12-16 20:40:59 +00:00
|
|
|
|
2024-07-02 00:40:22 +01:00
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
/* If we are at the head of the queue, wake up the collector,
|
|
|
|
* getting a ref to it if we were the ones to do so.
|
|
|
|
*/
|
|
|
|
if (subreq->start + subreq->transferred > rreq->cleaned_to + fsize &&
|
2024-07-02 00:40:22 +01:00
|
|
|
(rreq->origin == NETFS_READAHEAD ||
|
|
|
|
rreq->origin == NETFS_READPAGE ||
|
2024-12-16 20:41:17 +00:00
|
|
|
rreq->origin == NETFS_READ_FOR_WRITE) &&
|
|
|
|
list_is_first(&subreq->rreq_link, &stream->subrequests)
|
|
|
|
) {
|
netfs: Work around recursion by abandoning retry if nothing read
syzkaller reported recursion with a loop of three calls (netfs_rreq_assess,
netfs_retry_reads and netfs_rreq_terminated) hitting the limit of the stack
during an unbuffered or direct I/O read.
There are a number of issues:
(1) There is no limit on the number of retries.
(2) A subrequest is supposed to be abandoned if it does not transfer
anything (NETFS_SREQ_NO_PROGRESS), but that isn't checked under all
circumstances.
(3) The actual root cause, which is this:
if (atomic_dec_and_test(&rreq->nr_outstanding))
netfs_rreq_terminated(rreq, ...);
When we do a retry, we bump the rreq->nr_outstanding counter to
prevent the final cleanup phase running before we've finished
dispatching the retries. The problem is if we hit 0, we have to do
the cleanup phase - but we're in the cleanup phase and end up
repeating the retry cycle, hence the recursion.
Work around the problem by limiting the number of retries. This is based
on Lizhi Xu's patch[1], and makes the following changes:
(1) Replace NETFS_SREQ_NO_PROGRESS with NETFS_SREQ_MADE_PROGRESS and make
the filesystem set it if it managed to read or write at least one byte
of data. Clear this bit before issuing a subrequest.
(2) Add a ->retry_count member to the subrequest and increment it any time
we do a retry.
(3) Remove the NETFS_SREQ_RETRYING flag as it is superfluous with
->retry_count. If the latter is non-zero, we're doing a retry.
(4) Abandon a subrequest if retry_count is non-zero and we made no
progress.
(5) Use ->retry_count in both the write-side and the read-size.
[?] Question: Should I set a hard limit on retry_count in both read and
write? Say it hits 50, we always abandon it. The problem is that
these changes only mitigate the issue. As long as it made at least one
byte of progress, the recursion is still an issue. This patch
mitigates the problem, but does not fix the underlying cause. I have
patches that will do that, but it's an intrusive fix that's currently
pending for the next merge window.
The oops generated by KASAN looks something like:
BUG: TASK stack guard page was hit at ffffc9000482ff48 (stack is ffffc90004830000..ffffc90004838000)
Oops: stack guard page: 0000 [#1] PREEMPT SMP KASAN NOPTI
...
RIP: 0010:mark_lock+0x25/0xc60 kernel/locking/lockdep.c:4686
...
mark_usage kernel/locking/lockdep.c:4646 [inline]
__lock_acquire+0x906/0x3ce0 kernel/locking/lockdep.c:5156
lock_acquire.part.0+0x11b/0x380 kernel/locking/lockdep.c:5825
local_lock_acquire include/linux/local_lock_internal.h:29 [inline]
___slab_alloc+0x123/0x1880 mm/slub.c:3695
__slab_alloc.constprop.0+0x56/0xb0 mm/slub.c:3908
__slab_alloc_node mm/slub.c:3961 [inline]
slab_alloc_node mm/slub.c:4122 [inline]
kmem_cache_alloc_noprof+0x2a7/0x2f0 mm/slub.c:4141
radix_tree_node_alloc.constprop.0+0x1e8/0x350 lib/radix-tree.c:253
idr_get_free+0x528/0xa40 lib/radix-tree.c:1506
idr_alloc_u32+0x191/0x2f0 lib/idr.c:46
idr_alloc+0xc1/0x130 lib/idr.c:87
p9_tag_alloc+0x394/0x870 net/9p/client.c:321
p9_client_prepare_req+0x19f/0x4d0 net/9p/client.c:644
p9_client_zc_rpc.constprop.0+0x105/0x880 net/9p/client.c:793
p9_client_read_once+0x443/0x820 net/9p/client.c:1570
p9_client_read+0x13f/0x1b0 net/9p/client.c:1534
v9fs_issue_read+0x115/0x310 fs/9p/vfs_addr.c:74
netfs_retry_read_subrequests fs/netfs/read_retry.c:60 [inline]
netfs_retry_reads+0x153a/0x1d00 fs/netfs/read_retry.c:232
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
...
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_retry_reads+0x155e/0x1d00 fs/netfs/read_retry.c:235
netfs_rreq_assess+0x5d3/0x870 fs/netfs/read_collect.c:371
netfs_rreq_terminated+0xe5/0x110 fs/netfs/read_collect.c:407
netfs_dispatch_unbuffered_reads fs/netfs/direct_read.c:103 [inline]
netfs_unbuffered_read fs/netfs/direct_read.c:127 [inline]
netfs_unbuffered_read_iter_locked+0x12f6/0x19b0 fs/netfs/direct_read.c:221
netfs_unbuffered_read_iter+0xc5/0x100 fs/netfs/direct_read.c:256
v9fs_file_read_iter+0xbf/0x100 fs/9p/vfs_file.c:361
do_iter_readv_writev+0x614/0x7f0 fs/read_write.c:832
vfs_readv+0x4cf/0x890 fs/read_write.c:1025
do_preadv fs/read_write.c:1142 [inline]
__do_sys_preadv fs/read_write.c:1192 [inline]
__se_sys_preadv fs/read_write.c:1187 [inline]
__x64_sys_preadv+0x22d/0x310 fs/read_write.c:1187
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0xcd/0x250 arch/x86/entry/common.c:83
Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
Closes: https://syzkaller.appspot.com/bug?extid=1fc6f64c40a9d143cfb6
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241108034020.3695718-1-lizhi.xu@windriver.com/ [1]
Link: https://lore.kernel.org/r/20241213135013.2964079-9-dhowells@redhat.com
Tested-by: syzbot+885c03ad650731743489@syzkaller.appspotmail.com
Suggested-by: Lizhi Xu <lizhi.xu@windriver.com>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Jeff Layton <jlayton@kernel.org>
cc: v9fs@lists.linux.dev
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Reported-by: syzbot+885c03ad650731743489@syzkaller.appspotmail.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-12-13 13:50:08 +00:00
|
|
|
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
2025-05-19 10:07:04 +01:00
|
|
|
netfs_wake_collector(rreq);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netfs_read_subreq_progress);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* netfs_read_subreq_terminated - Note the termination of an I/O operation.
|
|
|
|
* @subreq: The I/O request that has terminated.
|
|
|
|
*
|
|
|
|
* This tells the read helper that a contributory I/O operation has terminated,
|
|
|
|
* one way or another, and that it should integrate the results.
|
|
|
|
*
|
2024-12-16 20:40:58 +00:00
|
|
|
* The caller indicates the outcome of the operation through @subreq->error,
|
|
|
|
* supplying 0 to indicate a successful or retryable transfer (if
|
|
|
|
* NETFS_SREQ_NEED_RETRY is set) or a negative error code. The helper will
|
|
|
|
* look after reissuing I/O operations as appropriate and writing downloaded
|
|
|
|
* data to the cache.
|
2024-07-02 00:40:22 +01:00
|
|
|
*
|
|
|
|
* Before calling, the filesystem should update subreq->transferred to track
|
|
|
|
* the amount of data copied into the output buffer.
|
|
|
|
*/
|
2024-12-16 20:40:59 +00:00
|
|
|
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
|
2024-07-02 00:40:22 +01:00
|
|
|
{
|
|
|
|
struct netfs_io_request *rreq = subreq->rreq;
|
2024-12-16 20:40:58 +00:00
|
|
|
|
2024-07-02 00:40:22 +01:00
|
|
|
switch (subreq->source) {
|
|
|
|
case NETFS_READ_FROM_CACHE:
|
|
|
|
netfs_stat(&netfs_n_rh_read_done);
|
|
|
|
break;
|
|
|
|
case NETFS_DOWNLOAD_FROM_SERVER:
|
|
|
|
netfs_stat(&netfs_n_rh_download_done);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Deal with retry requests, short reads and errors. If we retry
|
|
|
|
* but don't make progress, we abandon the attempt.
|
|
|
|
*/
|
2024-12-16 20:40:58 +00:00
|
|
|
if (!subreq->error && subreq->transferred < subreq->len) {
|
2024-07-02 00:40:22 +01:00
|
|
|
if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
|
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
|
2024-12-16 20:41:17 +00:00
|
|
|
} else if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
|
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_need_clear);
|
|
|
|
} else if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
|
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_need_retry);
|
|
|
|
} else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
|
|
|
|
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_partial_read);
|
2024-07-02 00:40:22 +01:00
|
|
|
} else {
|
2024-12-16 20:41:17 +00:00
|
|
|
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
|
|
|
subreq->error = -ENODATA;
|
2024-07-02 00:40:22 +01:00
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_short);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-16 20:40:58 +00:00
|
|
|
if (unlikely(subreq->error < 0)) {
|
|
|
|
trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read);
|
2024-07-02 00:40:22 +01:00
|
|
|
if (subreq->source == NETFS_READ_FROM_CACHE) {
|
|
|
|
netfs_stat(&netfs_n_rh_read_failed);
|
2024-12-16 20:41:17 +00:00
|
|
|
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
2024-07-02 00:40:22 +01:00
|
|
|
} else {
|
|
|
|
netfs_stat(&netfs_n_rh_download_failed);
|
2024-12-16 20:41:17 +00:00
|
|
|
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
2024-12-16 20:41:17 +00:00
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause);
|
|
|
|
set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
|
2025-05-19 10:07:04 +01:00
|
|
|
netfs_subreq_clear_in_progress(subreq);
|
2025-05-19 10:07:03 +01:00
|
|
|
netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
|
2024-07-02 00:40:22 +01:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netfs_read_subreq_terminated);
|
2024-12-16 20:40:58 +00:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
/*
|
|
|
|
* Handle termination of a read from the cache.
|
2024-12-16 20:40:58 +00:00
|
|
|
*/
|
2025-05-19 10:07:03 +01:00
|
|
|
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error)
|
2024-12-16 20:40:58 +00:00
|
|
|
{
|
2024-12-16 20:41:17 +00:00
|
|
|
struct netfs_io_subrequest *subreq = priv;
|
2024-12-16 20:40:58 +00:00
|
|
|
|
2024-12-16 20:41:17 +00:00
|
|
|
if (transferred_or_error > 0) {
|
|
|
|
subreq->error = 0;
|
|
|
|
if (transferred_or_error > 0) {
|
|
|
|
subreq->transferred += transferred_or_error;
|
|
|
|
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
subreq->error = transferred_or_error;
|
|
|
|
}
|
2024-12-16 20:40:59 +00:00
|
|
|
netfs_read_subreq_terminated(subreq);
|
2024-12-16 20:40:58 +00:00
|
|
|
}
|