mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-04-13 09:59:31 +00:00
9p/trans_fd: mark concurrent read and writes to p9_conn->err
Writes for the error value of a connection are spinlock-protected inside p9_conn_cancel, but lockless reads are present elsewhere to avoid performing unnecessary work after an error has been met. Mark the write and lockless reads to make KCSAN happy. Mark the write as exclusive following the recommendation in "Lock-Protected Writes with Lockless Reads" in tools/memory-model/Documentation/access-marking.txt while we are at it. Mark p9_fd_request and p9_conn_cancel m->err reads despite the fact that they do not race with concurrent writes for stylistic reasons. Reported-by: syzbot+d69a7cc8c683c2cb7506@syzkaller.appspotmail.com Reported-by: syzbot+483d6c9b9231ea7e1851@syzkaller.appspotmail.com Signed-off-by: Ignacio Encinas <ignacio@iencinas.com> Message-ID: <20250318-p9_conn_err_benign_data_race-v3-1-290bb18335cc@iencinas.com> Signed-off-by: Dominique Martinet <asmadeus@codewreck.org>
This commit is contained in:
parent
ad6d4558a7
commit
fbc0283fbe
1 changed files with 10 additions and 7 deletions
|
@ -192,12 +192,13 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
|
|||
|
||||
spin_lock(&m->req_lock);
|
||||
|
||||
if (m->err) {
|
||||
if (READ_ONCE(m->err)) {
|
||||
spin_unlock(&m->req_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
m->err = err;
|
||||
WRITE_ONCE(m->err, err);
|
||||
ASSERT_EXCLUSIVE_WRITER(m->err);
|
||||
|
||||
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
|
||||
list_move(&req->req_list, &cancel_list);
|
||||
|
@ -284,7 +285,7 @@ static void p9_read_work(struct work_struct *work)
|
|||
|
||||
m = container_of(work, struct p9_conn, rq);
|
||||
|
||||
if (m->err < 0)
|
||||
if (READ_ONCE(m->err) < 0)
|
||||
return;
|
||||
|
||||
p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
|
||||
|
@ -451,7 +452,7 @@ static void p9_write_work(struct work_struct *work)
|
|||
|
||||
m = container_of(work, struct p9_conn, wq);
|
||||
|
||||
if (m->err < 0) {
|
||||
if (READ_ONCE(m->err) < 0) {
|
||||
clear_bit(Wworksched, &m->wsched);
|
||||
return;
|
||||
}
|
||||
|
@ -623,7 +624,7 @@ static void p9_poll_mux(struct p9_conn *m)
|
|||
__poll_t n;
|
||||
int err = -ECONNRESET;
|
||||
|
||||
if (m->err < 0)
|
||||
if (READ_ONCE(m->err) < 0)
|
||||
return;
|
||||
|
||||
n = p9_fd_poll(m->client, NULL, &err);
|
||||
|
@ -666,6 +667,7 @@ static void p9_poll_mux(struct p9_conn *m)
|
|||
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
|
||||
{
|
||||
__poll_t n;
|
||||
int err;
|
||||
struct p9_trans_fd *ts = client->trans;
|
||||
struct p9_conn *m = &ts->conn;
|
||||
|
||||
|
@ -674,9 +676,10 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
|
|||
|
||||
spin_lock(&m->req_lock);
|
||||
|
||||
if (m->err < 0) {
|
||||
err = READ_ONCE(m->err);
|
||||
if (err < 0) {
|
||||
spin_unlock(&m->req_lock);
|
||||
return m->err;
|
||||
return err;
|
||||
}
|
||||
|
||||
WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
|
||||
|
|
Loading…
Add table
Reference in a new issue