ksmbd: prevent connection release during oplock break notification

ksmbd_work could be freed when after connection release.
Increment r_count of ksmbd_conn to indicate that requests
are not finished yet and to not release the connection.

Cc: stable@vger.kernel.org
Reported-by: Norbert Szetei <norbert@doyensec.com>
Tested-by: Norbert Szetei <norbert@doyensec.com>
Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
Namjae Jeon 2025-03-06 14:14:58 +09:00 committed by Steve French
parent bb39ed4706
commit 3aa660c059
4 changed files with 30 additions and 12 deletions

View file

@ -433,6 +433,26 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
default_conn_ops.terminate_fn = ops->terminate_fn;
}
void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn)
{
atomic_inc(&conn->r_count);
}
void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn)
{
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
atomic_inc(&conn->refcnt);
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
if (atomic_dec_and_test(&conn->refcnt))
kfree(conn);
}
int ksmbd_conn_transport_init(void)
{
int ret;

View file

@ -168,6 +168,8 @@ int ksmbd_conn_transport_init(void);
void ksmbd_conn_transport_destroy(void);
void ksmbd_conn_lock(struct ksmbd_conn *conn);
void ksmbd_conn_unlock(struct ksmbd_conn *conn);
void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn);
void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn);
/*
* WARNING

View file

@ -634,6 +634,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
{
struct smb2_oplock_break *rsp = NULL;
struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
struct ksmbd_conn *conn = work->conn;
struct oplock_break_info *br_info = work->request_buf;
struct smb2_hdr *rsp_hdr;
struct ksmbd_file *fp;
@ -689,6 +690,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
out:
ksmbd_free_work_struct(work);
ksmbd_conn_r_count_dec(conn);
}
/**
@ -723,6 +725,7 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
work->sess = opinfo->sess;
if (opinfo->op_state == OPLOCK_ACK_WAIT) {
ksmbd_conn_r_count_inc(conn);
INIT_WORK(&work->work, __smb2_oplock_break_noti);
ksmbd_queue_work(work);
@ -744,6 +747,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
{
struct smb2_lease_break *rsp = NULL;
struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
struct ksmbd_conn *conn = work->conn;
struct lease_break_info *br_info = work->request_buf;
struct smb2_hdr *rsp_hdr;
@ -790,6 +794,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
out:
ksmbd_free_work_struct(work);
ksmbd_conn_r_count_dec(conn);
}
/**
@ -829,6 +834,7 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
work->sess = opinfo->sess;
if (opinfo->op_state == OPLOCK_ACK_WAIT) {
ksmbd_conn_r_count_inc(conn);
INIT_WORK(&work->work, __smb2_lease_break_noti);
ksmbd_queue_work(work);
wait_for_break_ack(opinfo);

View file

@ -270,17 +270,7 @@ static void handle_ksmbd_work(struct work_struct *wk)
ksmbd_conn_try_dequeue_request(work);
ksmbd_free_work_struct(work);
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
atomic_inc(&conn->refcnt);
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
if (atomic_dec_and_test(&conn->refcnt))
kfree(conn);
ksmbd_conn_r_count_dec(conn);
}
/**
@ -310,7 +300,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
conn->request_buf = NULL;
ksmbd_conn_enqueue_request(work);
atomic_inc(&conn->r_count);
ksmbd_conn_r_count_inc(conn);
/* update activity on connection */
conn->last_active = jiffies;
INIT_WORK(&work->work, handle_ksmbd_work);