mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-05-24 10:39:52 +00:00
nvme-fcloop: fix inconsistent lock state warnings
With extra debug on, inconsistent lock state warnings are being called out as the tfcp_req->reqlock is being taken out without irq, while some calling sequences have the sequence in a softirq state. Change the lock taking/release to raise/drop irq. Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
c9b3007fec
commit
c38dbbfab1
1 changed files with 21 additions and 21 deletions
|
@ -434,7 +434,7 @@ fcloop_fcp_recv_work(struct work_struct *work)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
bool aborted = false;
|
bool aborted = false;
|
||||||
|
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
switch (tfcp_req->inistate) {
|
switch (tfcp_req->inistate) {
|
||||||
case INI_IO_START:
|
case INI_IO_START:
|
||||||
tfcp_req->inistate = INI_IO_ACTIVE;
|
tfcp_req->inistate = INI_IO_ACTIVE;
|
||||||
|
@ -443,11 +443,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
|
||||||
aborted = true;
|
aborted = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
|
|
||||||
if (unlikely(aborted))
|
if (unlikely(aborted))
|
||||||
ret = -ECANCELED;
|
ret = -ECANCELED;
|
||||||
|
@ -469,7 +469,7 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
||||||
struct nvmefc_fcp_req *fcpreq;
|
struct nvmefc_fcp_req *fcpreq;
|
||||||
bool completed = false;
|
bool completed = false;
|
||||||
|
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
fcpreq = tfcp_req->fcpreq;
|
fcpreq = tfcp_req->fcpreq;
|
||||||
switch (tfcp_req->inistate) {
|
switch (tfcp_req->inistate) {
|
||||||
case INI_IO_ABORTED:
|
case INI_IO_ABORTED:
|
||||||
|
@ -478,11 +478,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
||||||
completed = true;
|
completed = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
|
|
||||||
if (unlikely(completed)) {
|
if (unlikely(completed)) {
|
||||||
/* remove reference taken in original abort downcall */
|
/* remove reference taken in original abort downcall */
|
||||||
|
@ -494,9 +494,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
|
||||||
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
||||||
&tfcp_req->tgt_fcp_req);
|
&tfcp_req->tgt_fcp_req);
|
||||||
|
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
tfcp_req->fcpreq = NULL;
|
tfcp_req->fcpreq = NULL;
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
|
|
||||||
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
||||||
/* call_host_done releases reference for abort downcall */
|
/* call_host_done releases reference for abort downcall */
|
||||||
|
@ -513,10 +513,10 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
|
||||||
container_of(work, struct fcloop_fcpreq, tio_done_work);
|
container_of(work, struct fcloop_fcpreq, tio_done_work);
|
||||||
struct nvmefc_fcp_req *fcpreq;
|
struct nvmefc_fcp_req *fcpreq;
|
||||||
|
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
fcpreq = tfcp_req->fcpreq;
|
fcpreq = tfcp_req->fcpreq;
|
||||||
tfcp_req->inistate = INI_IO_COMPLETED;
|
tfcp_req->inistate = INI_IO_COMPLETED;
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
|
|
||||||
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
||||||
}
|
}
|
||||||
|
@ -621,12 +621,12 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||||
int fcp_err = 0, active, aborted;
|
int fcp_err = 0, active, aborted;
|
||||||
u8 op = tgt_fcpreq->op;
|
u8 op = tgt_fcpreq->op;
|
||||||
|
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
fcpreq = tfcp_req->fcpreq;
|
fcpreq = tfcp_req->fcpreq;
|
||||||
active = tfcp_req->active;
|
active = tfcp_req->active;
|
||||||
aborted = tfcp_req->aborted;
|
aborted = tfcp_req->aborted;
|
||||||
tfcp_req->active = true;
|
tfcp_req->active = true;
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
|
|
||||||
if (unlikely(active))
|
if (unlikely(active))
|
||||||
/* illegal - call while i/o active */
|
/* illegal - call while i/o active */
|
||||||
|
@ -634,9 +634,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||||
|
|
||||||
if (unlikely(aborted)) {
|
if (unlikely(aborted)) {
|
||||||
/* target transport has aborted i/o prior */
|
/* target transport has aborted i/o prior */
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
tfcp_req->active = false;
|
tfcp_req->active = false;
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
tgt_fcpreq->transferred_length = 0;
|
tgt_fcpreq->transferred_length = 0;
|
||||||
tgt_fcpreq->fcp_error = -ECANCELED;
|
tgt_fcpreq->fcp_error = -ECANCELED;
|
||||||
tgt_fcpreq->done(tgt_fcpreq);
|
tgt_fcpreq->done(tgt_fcpreq);
|
||||||
|
@ -693,9 +693,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
tfcp_req->active = false;
|
tfcp_req->active = false;
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
|
|
||||||
tgt_fcpreq->transferred_length = xfrlen;
|
tgt_fcpreq->transferred_length = xfrlen;
|
||||||
tgt_fcpreq->fcp_error = fcp_err;
|
tgt_fcpreq->fcp_error = fcp_err;
|
||||||
|
@ -715,9 +715,9 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||||
* (one doing io, other doing abort) and only kills ops posted
|
* (one doing io, other doing abort) and only kills ops posted
|
||||||
* after the abort request
|
* after the abort request
|
||||||
*/
|
*/
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
tfcp_req->aborted = true;
|
tfcp_req->aborted = true;
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
|
|
||||||
tfcp_req->status = NVME_SC_INTERNAL;
|
tfcp_req->status = NVME_SC_INTERNAL;
|
||||||
|
|
||||||
|
@ -765,7 +765,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* break initiator/target relationship for io */
|
/* break initiator/target relationship for io */
|
||||||
spin_lock(&tfcp_req->reqlock);
|
spin_lock_irq(&tfcp_req->reqlock);
|
||||||
switch (tfcp_req->inistate) {
|
switch (tfcp_req->inistate) {
|
||||||
case INI_IO_START:
|
case INI_IO_START:
|
||||||
case INI_IO_ACTIVE:
|
case INI_IO_ACTIVE:
|
||||||
|
@ -775,11 +775,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
||||||
abortio = false;
|
abortio = false;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_unlock(&tfcp_req->reqlock);
|
spin_unlock_irq(&tfcp_req->reqlock);
|
||||||
|
|
||||||
if (abortio)
|
if (abortio)
|
||||||
/* leave the reference while the work item is scheduled */
|
/* leave the reference while the work item is scheduled */
|
||||||
|
|
Loading…
Add table
Reference in a new issue