Merge branch '5.11/scsi-fixes' into 5.12/scsi-queue

The UFS core has received a substantial rework this cycle. This in
turn has caused a merge conflict in linux-next. Merge 5.11/scsi-fixes
into 5.12/scsi-queue and resolve the conflict.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen 2021-01-26 21:36:54 -05:00
commit 4d82e9db42
11 changed files with 92 additions and 44 deletions

View file

@ -916,21 +916,25 @@ Date: September 2014
Contact: Subhash Jadavani <subhashj@codeaurora.org> Contact: Subhash Jadavani <subhashj@codeaurora.org>
Description: This entry could be used to set or show the UFS device Description: This entry could be used to set or show the UFS device
runtime power management level. The current driver runtime power management level. The current driver
implementation supports 6 levels with next target states: implementation supports 7 levels with next target states:
== ==================================================== == ====================================================
0 an UFS device will stay active, an UIC link will 0 UFS device will stay active, UIC link will
stay active stay active
1 an UFS device will stay active, an UIC link will 1 UFS device will stay active, UIC link will
hibernate hibernate
2 an UFS device will moved to sleep, an UIC link will 2 UFS device will be moved to sleep, UIC link will
stay active stay active
3 an UFS device will moved to sleep, an UIC link will 3 UFS device will be moved to sleep, UIC link will
hibernate hibernate
4 an UFS device will be powered off, an UIC link will 4 UFS device will be powered off, UIC link will
hibernate hibernate
5 an UFS device will be powered off, an UIC link will 5 UFS device will be powered off, UIC link will
be powered off be powered off
6 UFS device will be moved to deep sleep, UIC link
will be powered off. Note, deep sleep might not be
supported in which case this value will not be
accepted
== ==================================================== == ====================================================
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
@ -954,21 +958,25 @@ Date: September 2014
Contact: Subhash Jadavani <subhashj@codeaurora.org> Contact: Subhash Jadavani <subhashj@codeaurora.org>
Description: This entry could be used to set or show the UFS device Description: This entry could be used to set or show the UFS device
system power management level. The current driver system power management level. The current driver
implementation supports 6 levels with next target states: implementation supports 7 levels with next target states:
== ==================================================== == ====================================================
0 an UFS device will stay active, an UIC link will 0 UFS device will stay active, UIC link will
stay active stay active
1 an UFS device will stay active, an UIC link will 1 UFS device will stay active, UIC link will
hibernate hibernate
2 an UFS device will moved to sleep, an UIC link will 2 UFS device will be moved to sleep, UIC link will
stay active stay active
3 an UFS device will moved to sleep, an UIC link will 3 UFS device will be moved to sleep, UIC link will
hibernate hibernate
4 an UFS device will be powered off, an UIC link will 4 UFS device will be powered off, UIC link will
hibernate hibernate
5 an UFS device will be powered off, an UIC link will 5 UFS device will be powered off, UIC link will
be powered off be powered off
6 UFS device will be moved to deep sleep, UIC link
will be powered off. Note, deep sleep might not be
supported in which case this value will not be
accepted
== ==================================================== == ====================================================
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state

View file

@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
pr_err("error in devcmd2 init"); pr_err("error in devcmd2 init");
return -ENODEV; err = -ENODEV;
goto err_free_wq;
} }
/* /*
@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err) if (err)
goto err_free_wq; goto err_disable_wq;
vdev->devcmd2->result = vdev->devcmd2->result =
(struct devcmd2_result *) vdev->devcmd2->results_ring.descs; (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err_free_desc_ring: err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
err_free_wq: err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq); vnic_wq_disable(&vdev->devcmd2->wq);
err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq); vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2: err_free_devcmd2:
kfree(vdev->devcmd2); kfree(vdev->devcmd2);

View file

@ -1930,7 +1930,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
iu->pri_task_attr = IBMVFC_SIMPLE_TASK; iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
} }
vfc_cmd->correlation = cpu_to_be64(evt); vfc_cmd->correlation = cpu_to_be64((u64)evt);
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev)))) if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0); return ibmvfc_send_event(evt, vhost, 0);
@ -2707,7 +2707,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
evt->sync_iu = &rsp_iu; evt->sync_iu = &rsp_iu;
tmf->correlation = cpu_to_be64(evt); tmf->correlation = cpu_to_be64((u64)evt);
init_completion(&evt->comp); init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
@ -3296,8 +3296,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
unsigned long flags = 0; unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
if (sdev->type == TYPE_DISK) if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1; sdev->allow_restart = 1;
blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
}
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
return 0; return 0;
} }

View file

@ -1623,8 +1623,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
rc = fc_exch_done_locked(ep); rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep); WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
if (!rc) if (!rc) {
fc_exch_delete(ep); fc_exch_delete(ep);
} else {
FC_EXCH_DBG(ep, "ep is completed already,"
"hence skip calling the resp\n");
goto skip_resp;
}
} }
/* /*
@ -1643,6 +1648,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
if (!fc_invoke_resp(ep, sp, fp)) if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp); fc_frame_free(fp);
skip_resp:
fc_exch_release(ep); fc_exch_release(ep);
return; return;
rel: rel:
@ -1899,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep)
fc_exch_hold(ep); fc_exch_hold(ep);
if (!rc) if (!rc) {
fc_exch_delete(ep); fc_exch_delete(ep);
} else {
FC_EXCH_DBG(ep, "ep is completed already,"
"hence skip calling the resp\n");
goto skip_resp;
}
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
skip_resp:
fc_seq_set_resp(sp, NULL, ep->arg); fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep); fc_exch_release(ep);
} }

View file

@ -8244,11 +8244,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
goto out; goto out;
} }
/* always store 64 bits regardless of addressing */
sense_ptr = (void *)cmd->frame + ioc->sense_off; sense_ptr = (void *)cmd->frame + ioc->sense_off;
if (instance->consistent_mask_64bit) put_unaligned_le64(sense_handle, sense_ptr);
put_unaligned_le64(sense_handle, sense_ptr);
else
put_unaligned_le32(sense_handle, sense_ptr);
} }
/* /*

View file

@ -42,7 +42,7 @@ MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
int ql2xenforce_iocb_limit = 1; int ql2xenforce_iocb_limit = 1;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit, MODULE_PARM_DESC(ql2xenforce_iocb_limit,
"Enforce IOCB throttling, to avoid FW congestion. (default: 0)"); "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
/* /*
* CT6 CTX allocation cache * CT6 CTX allocation cache

View file

@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
res = mutex_lock_interruptible(&rport->mutex); res = mutex_lock_interruptible(&rport->mutex);
if (res) if (res)
goto out; goto out;
scsi_target_block(&shost->shost_gendev); if (rport->state != SRP_RPORT_FAIL_FAST)
/*
* sdev state must be SDEV_TRANSPORT_OFFLINE, transition
* to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
* later is ok though, scsi_internal_device_unblock_nowait()
* treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
*/
scsi_target_block(&shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n", pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res); dev_name(&shost->shost_gendev), rport->state, res);

View file

@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI
config SCSI_UFSHCD_PLATFORM config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support" tristate "Platform bus based UFS Controller support"
depends on SCSI_UFSHCD depends on SCSI_UFSHCD
depends on HAS_IOMEM
help help
This selects the UFS host controller support. Select this if This selects the UFS host controller support. Select this if
you have an UFS controller on Platform bus. you have an UFS controller on Platform bus.

View file

@ -4030,6 +4030,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
if (ret) if (ret)
dev_err(hba->dev, "%s: link recovery failed, err %d", dev_err(hba->dev, "%s: link recovery failed, err %d",
__func__, ret); __func__, ret);
else
ufshcd_clear_ua_wluns(hba);
return ret; return ret;
} }
@ -5029,7 +5031,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break; break;
} /* end of switch */ } /* end of switch */
if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) if ((host_byte(result) != DID_OK) &&
(host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result; return result;
} }
@ -6037,6 +6040,9 @@ skip_err_handling:
ufshcd_scsi_unblock_requests(hba); ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba); ufshcd_err_handling_unprepare(hba);
up(&hba->host_sem); up(&hba->host_sem);
if (!err && needs_reset)
ufshcd_clear_ua_wluns(hba);
} }
/** /**
@ -6330,9 +6336,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
} }
if (enabled_intr_status && retval == IRQ_NONE) { if (enabled_intr_status && retval == IRQ_NONE &&
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", !ufshcd_eh_in_progress(hba)) {
__func__, intr_status); dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
hba->ufs_stats.last_intr_status,
enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
} }
@ -6376,7 +6386,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely, * Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT. * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/ */
req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED); req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req->end_io_data = &wait; req->end_io_data = &wait;
free_slot = req->tag; free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@ -6973,14 +6986,11 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshcd_set_clk_freq(hba, true); ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba); err = ufshcd_hba_enable(hba);
if (err)
goto out;
/* Establish the link again and restore the device */ /* Establish the link again and restore the device */
err = ufshcd_probe_hba(hba, false);
if (!err) if (!err)
ufshcd_clear_ua_wluns(hba); err = ufshcd_probe_hba(hba, false);
out:
if (err) if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
@ -7747,6 +7757,8 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret) if (ret)
goto out; goto out;
ufshcd_clear_ua_wluns(hba);
/* Initialize devfreq after UFS device is detected */ /* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) { if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info.info, memcpy(&hba->clk_scaling.saved_pwr_info.info,
@ -7948,8 +7960,6 @@ out:
if (ret) { if (ret) {
pm_runtime_put_sync(hba->dev); pm_runtime_put_sync(hba->dev);
ufshcd_hba_exit(hba); ufshcd_hba_exit(hba);
} else {
ufshcd_clear_ua_wluns(hba);
} }
} }
@ -8805,6 +8815,7 @@ enable_gating:
hba->clk_gating.is_suspended = false; hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false; hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_clear_ua_wluns(hba);
ufshcd_release(hba); ufshcd_release(hba);
out: out:
if (hba->dev_info.b_rpm_dev_flush_capable) { if (hba->dev_info.b_rpm_dev_flush_capable) {
@ -8915,6 +8926,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
} }
ufshcd_clear_ua_wluns(hba);
/* Schedule clock gating in case of no access to UFS device yet */ /* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba); ufshcd_release(hba);

View file

@ -896,7 +896,7 @@ int iscsit_setup_np(
else else
len = sizeof(struct sockaddr_in); len = sizeof(struct sockaddr_in);
/* /*
* Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY. * Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY.
*/ */
if (np->np_network_transport == ISCSI_TCP) if (np->np_network_transport == ISCSI_TCP)
tcp_sock_set_nodelay(sock->sk); tcp_sock_set_nodelay(sock->sk);

View file

@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{ {
if (tcmu_cmd->se_cmd)
tcmu_cmd->se_cmd->priv = NULL;
kfree(tcmu_cmd->dbi); kfree(tcmu_cmd->dbi);
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
} }
@ -1174,11 +1172,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
mutex_lock(&udev->cmdr_lock); mutex_lock(&udev->cmdr_lock);
se_cmd->priv = tcmu_cmd;
if (!(se_cmd->transport_state & CMD_T_ABORTED)) if (!(se_cmd->transport_state & CMD_T_ABORTED))
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0) if (ret < 0)
tcmu_free_cmd(tcmu_cmd); tcmu_free_cmd(tcmu_cmd);
else
se_cmd->priv = tcmu_cmd;
mutex_unlock(&udev->cmdr_lock); mutex_unlock(&udev->cmdr_lock);
return scsi_ret; return scsi_ret;
} }
@ -1241,6 +1240,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
list_del_init(&cmd->queue_entry); list_del_init(&cmd->queue_entry);
tcmu_free_cmd(cmd); tcmu_free_cmd(cmd);
se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
unqueued = true; unqueued = true;
} }
@ -1332,6 +1332,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
} }
done: done:
se_cmd->priv = NULL;
if (read_len_valid) { if (read_len_valid) {
pr_debug("read_len = %d\n", read_len); pr_debug("read_len = %d\n", read_len);
target_complete_cmd_with_length(cmd->se_cmd, target_complete_cmd_with_length(cmd->se_cmd,
@ -1478,6 +1479,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
se_cmd = cmd->se_cmd; se_cmd = cmd->se_cmd;
tcmu_free_cmd(cmd); tcmu_free_cmd(cmd);
se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
} }
@ -1592,6 +1594,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* removed then LIO core will do the right thing and * removed then LIO core will do the right thing and
* fail the retry. * fail the retry.
*/ */
tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
tcmu_free_cmd(tcmu_cmd); tcmu_free_cmd(tcmu_cmd);
continue; continue;
@ -1605,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* Ignore scsi_ret for now. target_complete_cmd * Ignore scsi_ret for now. target_complete_cmd
* drops it. * drops it.
*/ */
tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd, target_complete_cmd(tcmu_cmd->se_cmd,
SAM_STAT_CHECK_CONDITION); SAM_STAT_CHECK_CONDITION);
tcmu_free_cmd(tcmu_cmd); tcmu_free_cmd(tcmu_cmd);
@ -2212,6 +2216,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON(!cmd->se_cmd); WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry); list_del_init(&cmd->queue_entry);
cmd->se_cmd->priv = NULL;
if (err_level == 1) { if (err_level == 1) {
/* /*
* Userspace was not able to start the * Userspace was not able to start the