9p/trans_fd: mark concurrent read and writes to p9_conn->err
authorIgnacio Encinas <ignacio@iencinas.com>
Tue, 18 Mar 2025 21:39:02 +0000 (22:39 +0100)
committerDominique Martinet <asmadeus@codewreck.org>
Wed, 19 Mar 2025 12:20:31 +0000 (21:20 +0900)
Writes for the error value of a connection are spinlock-protected inside
p9_conn_cancel, but lockless reads are present elsewhere to avoid
performing unnecessary work after an error has been met.

Mark the write and lockless reads to make KCSAN happy. Mark the write as
exclusive following the recommendation in "Lock-Protected Writes with
Lockless Reads" in tools/memory-model/Documentation/access-marking.txt
while we are at it.

Mark p9_fd_request and p9_conn_cancel m->err reads despite the fact that
they do not race with concurrent writes for stylistic reasons.

Reported-by: syzbot+d69a7cc8c683c2cb7506@syzkaller.appspotmail.com
Reported-by: syzbot+483d6c9b9231ea7e1851@syzkaller.appspotmail.com
Signed-off-by: Ignacio Encinas <ignacio@iencinas.com>
Message-ID: <20250318-p9_conn_err_benign_data_race-v3-1-290bb18335cc@iencinas.com>
Signed-off-by: Dominique Martinet <asmadeus@codewreck.org>
net/9p/trans_fd.c

index 2fea50bf047abd9f220233111bf4d26b60c0fd09..339ec4e54778f30904392e8bdb4455874d1da5d7 100644 (file)
@@ -192,12 +192,13 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
 
        spin_lock(&m->req_lock);
 
-       if (m->err) {
+       if (READ_ONCE(m->err)) {
                spin_unlock(&m->req_lock);
                return;
        }
 
-       m->err = err;
+       WRITE_ONCE(m->err, err);
+       ASSERT_EXCLUSIVE_WRITER(m->err);
 
        list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
                list_move(&req->req_list, &cancel_list);
@@ -284,7 +285,7 @@ static void p9_read_work(struct work_struct *work)
 
        m = container_of(work, struct p9_conn, rq);
 
-       if (m->err < 0)
+       if (READ_ONCE(m->err) < 0)
                return;
 
        p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
@@ -451,7 +452,7 @@ static void p9_write_work(struct work_struct *work)
 
        m = container_of(work, struct p9_conn, wq);
 
-       if (m->err < 0) {
+       if (READ_ONCE(m->err) < 0) {
                clear_bit(Wworksched, &m->wsched);
                return;
        }
@@ -623,7 +624,7 @@ static void p9_poll_mux(struct p9_conn *m)
        __poll_t n;
        int err = -ECONNRESET;
 
-       if (m->err < 0)
+       if (READ_ONCE(m->err) < 0)
                return;
 
        n = p9_fd_poll(m->client, NULL, &err);
@@ -666,6 +667,7 @@ static void p9_poll_mux(struct p9_conn *m)
 static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
 {
        __poll_t n;
+       int err;
        struct p9_trans_fd *ts = client->trans;
        struct p9_conn *m = &ts->conn;
 
@@ -674,9 +676,10 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
 
        spin_lock(&m->req_lock);
 
-       if (m->err < 0) {
+       err = READ_ONCE(m->err);
+       if (err < 0) {
                spin_unlock(&m->req_lock);
-               return m->err;
+               return err;
        }
 
        WRITE_ONCE(req->status, REQ_STATUS_UNSENT);