scsi: lpfc: Add cleanup of nvmels_wq after HBA reset

An HBA reset request that is executed when there are outstanding NVME-LS
commands can cause delays for the reset process to complete.  Fix by
introducing a new routine called lpfc_nvmels_flush_cmd() that walks the
phba->nvmels_wq list and cancels outstanding submitted NVME-LS requests
speeding up the HBA reset process.

Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20241031223219.152342-7-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Justin Tee 2024-10-31 15:32:14 -07:00 committed by Martin K. Petersen
parent 98f8d35880
commit eb038363d8
3 changed files with 51 additions and 2 deletions

View file

@ -660,6 +660,7 @@ void lpfc_wqe_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
uint32_t stat, uint32_t param);
void lpfc_nvmels_flush_cmd(struct lpfc_hba *phba);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;

View file

@ -1943,6 +1943,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
lpfc_offline_prep(phba, mbx_action);
lpfc_sli_flush_io_rings(phba);
lpfc_nvmels_flush_cmd(phba);
lpfc_offline(phba);
/* release interrupt for possible resource change */
lpfc_sli4_disable_intr(phba);

View file

@ -2231,6 +2231,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli4_hdw_queue *qp;
int abts_scsi, abts_nvme;
u16 nvmels_cnt;
/* Host transport has to clean up and confirm requiring an indefinite
* wait. Print a message if a 10 second wait expires and renew the
@ -2243,6 +2244,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
pending = 0;
abts_scsi = 0;
abts_nvme = 0;
nvmels_cnt = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
if (!vport->localport || !qp || !qp->io_wq)
@ -2255,6 +2257,11 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_scsi += qp->abts_scsi_io_bufs;
abts_nvme += qp->abts_nvme_io_bufs;
}
if (phba->sli4_hba.nvmels_wq) {
pring = phba->sli4_hba.nvmels_wq->pring;
if (pring)
nvmels_cnt = pring->txcmplq_cnt;
}
if (!vport->localport ||
test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
phba->link_state == LPFC_HBA_ERROR ||
@ -2263,10 +2270,10 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6176 Lport x%px Localport x%px wait "
"timed out. Pending %d [%d:%d]. "
"timed out. Pending %d [%d:%d:%d]. "
"Renewing.\n",
lport, vport->localport, pending,
abts_scsi, abts_nvme);
abts_scsi, abts_nvme, nvmels_cnt);
continue;
}
break;
@ -2841,3 +2848,43 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
(pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
#endif
}
/**
* lpfc_nvmels_flush_cmd - Clean up outstanding nvmels commands for a port
* @phba: Pointer to HBA context object.
*
**/
void
lpfc_nvmels_flush_cmd(struct lpfc_hba *phba)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
LIST_HEAD(cancel_list);
struct lpfc_sli_ring *pring = NULL;
struct lpfc_iocbq *piocb, *tmp_iocb;
unsigned long iflags;
if (phba->sli4_hba.nvmels_wq)
pring = phba->sli4_hba.nvmels_wq->pring;
if (unlikely(!pring))
return;
spin_lock_irqsave(&phba->hbalock, iflags);
spin_lock(&pring->ring_lock);
list_splice_init(&pring->txq, &cancel_list);
pring->txq_cnt = 0;
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
if (piocb->cmd_flag & LPFC_IO_NVME_LS) {
list_move_tail(&piocb->list, &cancel_list);
pring->txcmplq_cnt--;
piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
}
}
spin_unlock(&pring->ring_lock);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&cancel_list))
lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
#endif
}