2022-04-19 15:58:09 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
|
|
|
|
#ifndef _UFSHCD_PRIV_H_
|
|
|
|
#define _UFSHCD_PRIV_H_
|
|
|
|
|
|
|
|
#include <linux/pm_runtime.h>
|
2022-05-11 14:25:52 -07:00
|
|
|
#include <ufs/ufshcd.h>
|
2022-04-19 15:58:09 -07:00
|
|
|
|
|
|
|
static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
return !hba->shutting_down;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
|
|
|
|
|
|
|
|
static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
|
|
|
|
struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
|
|
|
|
return hba->dev_info.wb_dedicated_lu;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-08-04 16:54:44 +09:00
|
|
|
static inline bool ufshcd_is_wb_buf_flush_allowed(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
return ufshcd_is_wb_allowed(hba) &&
|
|
|
|
!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL);
|
|
|
|
}
|
|
|
|
|
2022-04-19 15:58:09 -07:00
|
|
|
#ifdef CONFIG_SCSI_UFS_HWMON
|
|
|
|
void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
|
|
|
|
void ufs_hwmon_remove(struct ufs_hba *hba);
|
|
|
|
void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask);
|
|
|
|
#else
|
|
|
|
static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {}
|
|
|
|
static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
|
|
|
|
static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
|
|
|
|
#endif
|
|
|
|
|
2022-09-21 14:58:05 +03:00
|
|
|
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
|
|
|
|
enum query_opcode opcode,
|
|
|
|
enum desc_idn idn, u8 index,
|
|
|
|
u8 selector,
|
|
|
|
u8 *desc_buf, int *buf_len);
|
2022-04-19 15:58:09 -07:00
|
|
|
int ufshcd_read_desc_param(struct ufs_hba *hba,
|
|
|
|
enum desc_idn desc_id,
|
|
|
|
int desc_index,
|
|
|
|
u8 param_offset,
|
|
|
|
u8 *param_read_buf,
|
|
|
|
u8 param_size);
|
|
|
|
int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
|
|
|
|
enum attr_idn idn, u8 index, u8 selector,
|
|
|
|
u32 *attr_val);
|
|
|
|
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
|
|
|
|
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
|
|
|
|
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
|
|
|
|
enum flag_idn idn, u8 index, bool *flag_res);
|
|
|
|
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
|
2023-01-13 12:48:49 -08:00
|
|
|
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
|
|
|
|
struct cq_entry *cqe);
|
2023-01-13 12:48:41 -08:00
|
|
|
int ufshcd_mcq_init(struct ufs_hba *hba);
|
2024-07-08 14:16:05 -07:00
|
|
|
void ufshcd_mcq_disable(struct ufs_hba *hba);
|
2023-01-13 12:48:43 -08:00
|
|
|
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
|
2023-01-13 12:48:44 -08:00
|
|
|
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
|
2023-01-13 12:48:48 -08:00
|
|
|
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
|
|
|
|
struct request *req);
|
2023-05-29 15:12:26 -07:00
|
|
|
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
|
|
|
|
struct ufs_hw_queue *hwq);
|
2023-05-29 15:12:23 -07:00
|
|
|
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
|
2023-05-29 15:12:22 -07:00
|
|
|
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
|
2023-05-29 15:12:24 -07:00
|
|
|
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
|
|
|
|
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
|
|
|
|
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
|
|
|
|
struct ufshcd_lrb *lrbp);
|
2022-04-19 15:58:09 -07:00
|
|
|
|
|
|
|
#define SD_ASCII_STD true
|
|
|
|
#define SD_RAW false
|
|
|
|
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
|
|
|
|
u8 **buf, bool ascii);
|
|
|
|
|
|
|
|
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
|
scsi: ufs: core: Add ufshcd_send_bsg_uic_cmd() for UFS BSG
User layer applications can send UIC GET/SET commands via the BSG
framework, and if the user layer application sends a UIC SET command to the
PA_PWRMODE attribute, a power mode change shall be initiated in UniPro and
two interrupts shall be triggered if the power mode is successfully
changed, i.e., UIC Command Completion interrupt and UIC Power Mode
interrupt.
The current UFS BSG code calls ufshcd_send_uic_cmd() directly, with which
the second interrupt, i.e., UIC Power Mode interrupt, shall be treated as
unhandled interrupt. In addition, after the UIC command is completed, user
layer application has to poll UniPro and/or M-PHY state machine to confirm
the power mode change is finished.
Add a new wrapper function ufshcd_send_bsg_uic_cmd() and call it from
ufs_bsg_request() so that if a UIC SET command is targeting the PA_PWRMODE
attribute it can be redirected to ufshcd_uic_pwr_ctrl().
Fixes: e77044c5a842 ("scsi: ufs-bsg: Add support for uic commands in ufs_bsg_request()")
Co-developed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Ziqi Chen <quic_ziqichen@quicinc.com>
Link: https://lore.kernel.org/r/20241119095613.121385-1-quic_ziqichen@quicinc.com
Reviewed-by: Bean Huo <beanhuo@micron.com>
Reviewed-by: Avri Altman <avri.altman@wdc.com>
Reviewed-by: Peter Wang <peter.wang@mediatek.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2024-11-19 17:56:04 +08:00
|
|
|
int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
|
2022-04-19 15:58:09 -07:00
|
|
|
|
|
|
|
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
|
|
|
|
struct utp_upiu_req *req_upiu,
|
|
|
|
struct utp_upiu_req *rsp_upiu,
|
2023-07-27 12:41:19 -07:00
|
|
|
enum upiu_request_transaction msgcode,
|
2022-04-19 15:58:09 -07:00
|
|
|
u8 *desc_buff, int *buff_len,
|
|
|
|
enum query_opcode desc_op);
|
|
|
|
|
|
|
|
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
|
2025-03-28 14:46:13 -07:00
|
|
|
int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
|
2022-04-19 15:58:09 -07:00
|
|
|
|
|
|
|
/* Wrapper functions for safely calling variant operations */
|
|
|
|
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops)
|
|
|
|
return hba->vops->name;
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_vops_exit(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->exit)
|
|
|
|
return hba->vops->exit(hba);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->get_ufs_hci_version)
|
|
|
|
return hba->vops->get_ufs_hci_version(hba);
|
|
|
|
|
|
|
|
return ufshcd_readl(hba, REG_UFS_VERSION);
|
|
|
|
}
|
|
|
|
|
2025-02-13 16:00:01 +08:00
|
|
|
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up,
|
|
|
|
unsigned long target_freq,
|
|
|
|
enum ufs_notify_change_status status)
|
2022-04-19 15:58:09 -07:00
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->clk_scale_notify)
|
2025-02-13 16:00:01 +08:00
|
|
|
return hba->vops->clk_scale_notify(hba, up, target_freq, status);
|
2022-04-19 15:58:09 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_vops_event_notify(struct ufs_hba *hba,
|
|
|
|
enum ufs_event_type evt,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->event_notify)
|
|
|
|
hba->vops->event_notify(hba, evt, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
|
|
|
|
enum ufs_notify_change_status status)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->setup_clocks)
|
|
|
|
return hba->vops->setup_clocks(hba, on, status);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
|
|
|
|
bool status)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->hce_enable_notify)
|
|
|
|
return hba->vops->hce_enable_notify(hba, status);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
|
|
|
|
bool status)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->link_startup_notify)
|
|
|
|
return hba->vops->link_startup_notify(hba, status);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
|
2025-02-12 13:38:02 -08:00
|
|
|
enum ufs_notify_change_status status,
|
|
|
|
const struct ufs_pa_layer_attr *dev_max_params,
|
|
|
|
struct ufs_pa_layer_attr *dev_req_params)
|
2022-04-19 15:58:09 -07:00
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->pwr_change_notify)
|
|
|
|
return hba->vops->pwr_change_notify(hba, status,
|
|
|
|
dev_max_params, dev_req_params);
|
|
|
|
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
|
|
|
|
int tag, u8 tm_function)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->setup_task_mgmt)
|
|
|
|
return hba->vops->setup_task_mgmt(hba, tag, tm_function);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
|
|
|
|
enum uic_cmd_dme cmd,
|
|
|
|
enum ufs_notify_change_status status)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->hibern8_notify)
|
|
|
|
return hba->vops->hibern8_notify(hba, cmd, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->apply_dev_quirks)
|
|
|
|
return hba->vops->apply_dev_quirks(hba);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->fixup_dev_quirks)
|
|
|
|
hba->vops->fixup_dev_quirks(hba);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op,
|
|
|
|
enum ufs_notify_change_status status)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->suspend)
|
|
|
|
return hba->vops->suspend(hba, op, status);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->resume)
|
|
|
|
return hba->vops->resume(hba, op);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->dbg_register_dump)
|
|
|
|
hba->vops->dbg_register_dump(hba);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->device_reset)
|
|
|
|
return hba->vops->device_reset(hba);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
|
|
|
|
struct devfreq_dev_profile *p,
|
|
|
|
struct devfreq_simple_ondemand_data *data)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->config_scaling_param)
|
|
|
|
hba->vops->config_scaling_param(hba, p, data);
|
|
|
|
}
|
|
|
|
|
2023-01-13 12:48:42 -08:00
|
|
|
static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->mcq_config_resource)
|
|
|
|
return hba->vops->mcq_config_resource(hba);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2023-01-13 12:48:45 -08:00
|
|
|
static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->op_runtime_config)
|
|
|
|
return hba->vops->op_runtime_config(hba);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2023-01-13 12:48:50 -08:00
|
|
|
static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba,
|
|
|
|
unsigned long *ocqs)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->get_outstanding_cqs)
|
|
|
|
return hba->vops->get_outstanding_cqs(hba, ocqs);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2022-12-14 19:06:20 -08:00
|
|
|
static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->config_esi)
|
|
|
|
return hba->vops->config_esi(hba);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2025-02-13 16:00:03 +08:00
|
|
|
static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
|
|
|
|
{
|
|
|
|
if (hba->vops && hba->vops->freq_to_gear_speed)
|
|
|
|
return hba->vops->freq_to_gear_speed(hba, freq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-23 12:24:32 +02:00
|
|
|
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
|
2022-04-19 15:58:09 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
|
|
|
|
* @scsi_lun: scsi LUN id
|
|
|
|
*
|
2023-07-27 12:41:13 -07:00
|
|
|
* Return: UPIU LUN id
|
2022-04-19 15:58:09 -07:00
|
|
|
*/
|
|
|
|
static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
|
|
|
|
{
|
|
|
|
if (scsi_is_wlun(scsi_lun))
|
|
|
|
return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
|
|
|
|
| UFS_UPIU_WLUN_ID;
|
|
|
|
else
|
|
|
|
return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
|
|
|
|
int ufshcd_write_ee_control(struct ufs_hba *hba);
|
2022-06-23 12:24:32 +02:00
|
|
|
int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
|
|
|
|
const u16 *other_mask, u16 set, u16 clr);
|
2022-04-19 15:58:09 -07:00
|
|
|
|
|
|
|
static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
|
|
|
|
u16 set, u16 clr)
|
|
|
|
{
|
|
|
|
return ufshcd_update_ee_control(hba, &hba->ee_drv_mask,
|
|
|
|
&hba->ee_usr_mask, set, clr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
|
|
|
|
u16 set, u16 clr)
|
|
|
|
{
|
|
|
|
return ufshcd_update_ee_control(hba, &hba->ee_usr_mask,
|
|
|
|
&hba->ee_drv_mask, set, clr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev);
|
|
|
|
}
|
|
|
|
|
2024-07-15 14:38:31 +08:00
|
|
|
static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev);
|
|
|
|
}
|
|
|
|
|
2022-04-19 15:58:09 -07:00
|
|
|
static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
pm_runtime_get_noresume(&hba->ufs_device_wlun->sdev_gendev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_rpm_resume(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
return pm_runtime_resume(&hba->ufs_device_wlun->sdev_gendev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ufshcd_rpm_put(struct ufs_hba *hba)
|
|
|
|
{
|
|
|
|
return pm_runtime_put(&hba->ufs_device_wlun->sdev_gendev);
|
|
|
|
}
|
|
|
|
|
2022-04-19 15:58:11 -07:00
|
|
|
/**
|
|
|
|
* ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
|
|
|
|
* @dev_info: pointer of instance of struct ufs_dev_info
|
|
|
|
* @lun: LU number to check
|
|
|
|
* @return: true if the lun has a matching unit descriptor, false otherwise
|
|
|
|
*/
|
2022-10-26 00:24:28 +02:00
|
|
|
static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8 lun)
|
2022-04-19 15:58:11 -07:00
|
|
|
{
|
|
|
|
if (!dev_info || !dev_info->max_lu_supported) {
|
|
|
|
pr_err("Max General LU supported by UFS isn't initialized\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
|
|
|
|
}
|
|
|
|
|
2023-01-13 12:48:47 -08:00
|
|
|
static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
|
2023-03-29 13:13:02 +03:00
|
|
|
__must_hold(&q->sq_lock)
|
2023-01-13 12:48:47 -08:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
2023-06-01 20:46:14 +08:00
|
|
|
q->sq_tail_slot++;
|
|
|
|
if (q->sq_tail_slot == q->max_entries)
|
|
|
|
q->sq_tail_slot = 0;
|
2023-01-13 12:48:47 -08:00
|
|
|
val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc);
|
|
|
|
writel(val, q->mcq_sq_tail);
|
|
|
|
}
|
|
|
|
|
2023-01-13 12:48:50 -08:00
|
|
|
static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
|
|
|
|
{
|
|
|
|
u32 val = readl(q->mcq_cq_tail);
|
|
|
|
|
|
|
|
q->cq_tail_slot = val / sizeof(struct cq_entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
|
|
|
|
{
|
|
|
|
return q->cq_head_slot == q->cq_tail_slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
|
|
|
|
{
|
|
|
|
q->cq_head_slot++;
|
|
|
|
if (q->cq_head_slot == q->max_entries)
|
|
|
|
q->cq_head_slot = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
|
|
|
|
{
|
|
|
|
writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
|
|
|
|
{
|
|
|
|
struct cq_entry *cqe = q->cqe_base_addr;
|
|
|
|
|
|
|
|
return cqe + q->cq_head_slot;
|
|
|
|
}
|
2023-05-29 15:12:22 -07:00
|
|
|
|
|
|
|
static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
|
|
|
|
{
|
|
|
|
u32 val = readl(q->mcq_sq_head);
|
|
|
|
|
|
|
|
return val / sizeof(struct utp_transfer_req_desc);
|
|
|
|
}
|
|
|
|
|
2022-04-19 15:58:09 -07:00
|
|
|
#endif /* _UFSHCD_PRIV_H_ */
|