2020-08-27 14:16:55 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/delay.h>
|
2025-05-18 14:21:38 +03:00
|
|
|
#include <linux/iopoll.h>
|
2024-01-26 20:26:29 +02:00
|
|
|
#include <linux/phy/phy.h>
|
2020-09-09 17:49:02 -07:00
|
|
|
#include <drm/drm_print.h>
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
#include "dp_reg.h"
|
|
|
|
#include "dp_aux.h"
|
|
|
|
|
2021-05-07 14:25:05 -07:00
|
|
|
enum msm_dp_aux_err {
|
|
|
|
DP_AUX_ERR_NONE,
|
|
|
|
DP_AUX_ERR_ADDR,
|
|
|
|
DP_AUX_ERR_TOUT,
|
|
|
|
DP_AUX_ERR_NACK,
|
|
|
|
DP_AUX_ERR_DEFER,
|
|
|
|
DP_AUX_ERR_NACK_DEFER,
|
|
|
|
DP_AUX_ERR_PHY,
|
|
|
|
};
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private {
|
2020-08-27 14:16:55 -07:00
|
|
|
struct device *dev;
|
2025-05-18 14:21:44 +03:00
|
|
|
void __iomem *aux_base;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-01-26 20:26:29 +02:00
|
|
|
struct phy *phy;
|
|
|
|
|
2020-08-27 14:16:55 -07:00
|
|
|
struct mutex mutex;
|
|
|
|
struct completion comp;
|
|
|
|
|
2021-05-07 14:25:05 -07:00
|
|
|
enum msm_dp_aux_err aux_error_num;
|
2020-08-27 14:16:55 -07:00
|
|
|
u32 retry_cnt;
|
|
|
|
bool cmd_busy;
|
|
|
|
bool native;
|
|
|
|
bool read;
|
|
|
|
bool no_send_addr;
|
|
|
|
bool no_send_stop;
|
2021-11-09 10:04:18 -08:00
|
|
|
bool initted;
|
2022-04-25 17:14:31 +05:30
|
|
|
bool is_edp;
|
2024-03-15 14:36:29 -07:00
|
|
|
bool enable_xfers;
|
2020-08-27 14:16:55 -07:00
|
|
|
u32 offset;
|
|
|
|
u32 segment;
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
struct drm_dp_aux msm_dp_aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
};
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
static inline u32 msm_dp_read_aux(struct msm_dp_aux_private *aux, u32 offset)
|
|
|
|
{
|
|
|
|
return readl_relaxed(aux->aux_base + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void msm_dp_write_aux(struct msm_dp_aux_private *aux,
|
|
|
|
u32 offset, u32 data)
|
2025-05-18 14:21:38 +03:00
|
|
|
{
|
2025-05-18 14:21:44 +03:00
|
|
|
/*
|
|
|
|
* To make sure aux reg writes happens before any other operation,
|
|
|
|
* this function uses writel() instread of writel_relaxed()
|
|
|
|
*/
|
|
|
|
writel(data, aux->aux_base + offset);
|
|
|
|
}
|
2025-05-18 14:21:38 +03:00
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
static void msm_dp_aux_clear_hw_interrupts(struct msm_dp_aux_private *aux)
|
|
|
|
{
|
|
|
|
msm_dp_read_aux(aux, REG_DP_PHY_AUX_INTERRUPT_STATUS);
|
|
|
|
msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
|
|
|
|
msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
|
|
|
|
msm_dp_write_aux(aux, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
|
2025-05-18 14:21:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: resetting AUX controller will also clear any pending HPD related interrupts
|
|
|
|
*/
|
|
|
|
static void msm_dp_aux_reset(struct msm_dp_aux_private *aux)
|
|
|
|
{
|
|
|
|
u32 aux_ctrl;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
|
2025-05-18 14:21:38 +03:00
|
|
|
|
|
|
|
aux_ctrl |= DP_AUX_CTRL_RESET;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
|
2025-05-18 14:21:38 +03:00
|
|
|
usleep_range(1000, 1100); /* h/w recommended delay */
|
|
|
|
|
|
|
|
aux_ctrl &= ~DP_AUX_CTRL_RESET;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
|
2025-05-18 14:21:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void msm_dp_aux_enable(struct msm_dp_aux_private *aux)
|
|
|
|
{
|
|
|
|
u32 aux_ctrl;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
|
2025-05-18 14:21:38 +03:00
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_TIMEOUT_COUNT, 0xffff);
|
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_LIMITS, 0xffff);
|
2025-05-18 14:21:38 +03:00
|
|
|
|
|
|
|
aux_ctrl |= DP_AUX_CTRL_ENABLE;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
|
2025-05-18 14:21:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void msm_dp_aux_disable(struct msm_dp_aux_private *aux)
|
|
|
|
{
|
|
|
|
u32 aux_ctrl;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
aux_ctrl = msm_dp_read_aux(aux, REG_DP_AUX_CTRL);
|
2025-05-18 14:21:38 +03:00
|
|
|
aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_CTRL, aux_ctrl);
|
2025-05-18 14:21:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int msm_dp_aux_wait_for_hpd_connect_state(struct msm_dp_aux_private *aux,
|
|
|
|
unsigned long wait_us)
|
|
|
|
{
|
|
|
|
u32 state;
|
|
|
|
|
|
|
|
/* poll for hpd connected status every 2ms and timeout after wait_us */
|
2025-05-18 14:21:44 +03:00
|
|
|
return readl_poll_timeout(aux->aux_base +
|
2025-05-18 14:21:38 +03:00
|
|
|
REG_DP_DP_HPD_INT_STATUS,
|
|
|
|
state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
|
|
|
|
min(wait_us, 2000), wait_us);
|
|
|
|
}
|
|
|
|
|
2021-02-26 13:08:21 -08:00
|
|
|
#define MAX_AUX_RETRIES 5
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
|
2020-08-27 14:16:55 -07:00
|
|
|
struct drm_dp_aux_msg *msg)
|
|
|
|
{
|
2021-05-07 14:25:05 -07:00
|
|
|
u8 data[4];
|
|
|
|
u32 reg;
|
|
|
|
ssize_t len;
|
2020-08-27 14:16:55 -07:00
|
|
|
u8 *msgdata = msg->buffer;
|
|
|
|
int const AUX_CMD_FIFO_LEN = 128;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
if (aux->read)
|
2021-05-07 14:25:05 -07:00
|
|
|
len = 0;
|
2020-08-27 14:16:55 -07:00
|
|
|
else
|
2021-05-07 14:25:05 -07:00
|
|
|
len = msg->size;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* cmd fifo only has depth of 144 bytes
|
|
|
|
* limit buf length to 128 bytes here
|
|
|
|
*/
|
2021-05-07 14:25:05 -07:00
|
|
|
if (len > AUX_CMD_FIFO_LEN - 4) {
|
2020-08-27 14:16:55 -07:00
|
|
|
DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
|
2021-05-07 14:25:05 -07:00
|
|
|
return -EINVAL;
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Pack cmd and write to HW */
|
2021-05-07 14:25:05 -07:00
|
|
|
data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
|
2020-08-27 14:16:55 -07:00
|
|
|
if (aux->read)
|
2021-05-07 14:25:05 -07:00
|
|
|
data[0] |= BIT(4); /* R/W */
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2021-05-07 14:25:05 -07:00
|
|
|
data[1] = msg->address >> 8; /* addr[15:8] */
|
|
|
|
data[2] = msg->address; /* addr[7:0] */
|
|
|
|
data[3] = msg->size - 1; /* len[7:0] */
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2021-05-07 14:25:05 -07:00
|
|
|
for (i = 0; i < len + 4; i++) {
|
2020-08-27 14:16:55 -07:00
|
|
|
reg = (i < 4) ? data[i] : msgdata[i - 4];
|
2021-05-07 14:25:05 -07:00
|
|
|
reg <<= DP_AUX_DATA_OFFSET;
|
|
|
|
reg &= DP_AUX_DATA_MASK;
|
|
|
|
reg |= DP_AUX_DATA_WRITE;
|
2020-08-27 14:16:55 -07:00
|
|
|
/* index = 0, write */
|
|
|
|
if (i == 0)
|
|
|
|
reg |= DP_AUX_DATA_INDEX_WRITE;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_DATA, reg);
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, 0);
|
2025-05-18 14:21:38 +03:00
|
|
|
msm_dp_aux_clear_hw_interrupts(aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
reg = 0; /* Transaction number == 1 */
|
|
|
|
if (!aux->native) { /* i2c */
|
|
|
|
reg |= DP_AUX_TRANS_CTRL_I2C;
|
|
|
|
|
|
|
|
if (aux->no_send_addr)
|
|
|
|
reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
|
|
|
|
|
|
|
|
if (aux->no_send_stop)
|
|
|
|
reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
|
|
|
|
}
|
|
|
|
|
|
|
|
reg |= DP_AUX_TRANS_CTRL_GO;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, reg);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
static ssize_t msm_dp_aux_cmd_fifo_tx(struct msm_dp_aux_private *aux,
|
2020-08-27 14:16:55 -07:00
|
|
|
struct drm_dp_aux_msg *msg)
|
|
|
|
{
|
2021-05-07 14:25:05 -07:00
|
|
|
ssize_t ret;
|
|
|
|
unsigned long time_left;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
reinit_completion(&aux->comp);
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
ret = msm_dp_aux_write(aux, msg);
|
2021-05-07 14:25:05 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2021-05-07 14:25:05 -07:00
|
|
|
time_left = wait_for_completion_timeout(&aux->comp,
|
|
|
|
msecs_to_jiffies(250));
|
|
|
|
if (!time_left)
|
2020-08-27 14:16:55 -07:00
|
|
|
return -ETIMEDOUT;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux,
|
2020-08-27 14:16:55 -07:00
|
|
|
struct drm_dp_aux_msg *msg)
|
|
|
|
{
|
|
|
|
u32 data;
|
|
|
|
u8 *dp;
|
|
|
|
u32 i, actual_i;
|
|
|
|
u32 len = msg->size;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
data = msm_dp_read_aux(aux, REG_DP_AUX_TRANS_CTRL);
|
2025-05-18 14:21:38 +03:00
|
|
|
data &= ~DP_AUX_TRANS_CTRL_GO;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_TRANS_CTRL, data);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
|
|
|
|
data |= DP_AUX_DATA_READ; /* read */
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_AUX_DATA, data);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
dp = msg->buffer;
|
|
|
|
|
|
|
|
/* discard first byte */
|
2025-05-18 14:21:44 +03:00
|
|
|
data = msm_dp_read_aux(aux, REG_DP_AUX_DATA);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
for (i = 0; i < len; i++) {
|
2025-05-18 14:21:44 +03:00
|
|
|
data = msm_dp_read_aux(aux, REG_DP_AUX_DATA);
|
2020-08-27 14:16:55 -07:00
|
|
|
*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
|
|
|
|
|
|
|
|
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
|
|
|
|
if (i != actual_i)
|
2021-05-07 14:25:05 -07:00
|
|
|
break;
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
2021-05-07 14:25:05 -07:00
|
|
|
|
|
|
|
return i;
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
static void msm_dp_aux_update_offset_and_segment(struct msm_dp_aux_private *aux,
|
2020-08-27 14:16:55 -07:00
|
|
|
struct drm_dp_aux_msg *input_msg)
|
|
|
|
{
|
|
|
|
u32 edid_address = 0x50;
|
|
|
|
u32 segment_address = 0x30;
|
|
|
|
bool i2c_read = input_msg->request &
|
|
|
|
(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
|
|
|
|
u8 *data;
|
|
|
|
|
|
|
|
if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
|
|
|
|
(input_msg->address != segment_address)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
|
|
data = input_msg->buffer;
|
|
|
|
if (input_msg->address == segment_address)
|
|
|
|
aux->segment = *data;
|
|
|
|
else
|
|
|
|
aux->offset = *data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2024-10-29 22:28:24 +02:00
|
|
|
* msm_dp_aux_transfer_helper() - helper function for EDID read transactions
|
2020-08-27 14:16:55 -07:00
|
|
|
*
|
|
|
|
* @aux: DP AUX private structure
|
|
|
|
* @input_msg: input message from DRM upstream APIs
|
|
|
|
* @send_seg: send the segment to sink
|
|
|
|
*
|
|
|
|
* return: void
|
|
|
|
*
|
|
|
|
* This helper function is used to fix EDID reads for non-compliant
|
|
|
|
* sinks that do not handle the i2c middle-of-transaction flag correctly.
|
|
|
|
*/
|
2024-10-29 22:28:24 +02:00
|
|
|
static void msm_dp_aux_transfer_helper(struct msm_dp_aux_private *aux,
|
2020-08-27 14:16:55 -07:00
|
|
|
struct drm_dp_aux_msg *input_msg,
|
|
|
|
bool send_seg)
|
|
|
|
{
|
|
|
|
struct drm_dp_aux_msg helper_msg;
|
|
|
|
u32 message_size = 0x10;
|
|
|
|
u32 segment_address = 0x30;
|
|
|
|
u32 const edid_block_length = 0x80;
|
|
|
|
bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
|
|
|
|
bool i2c_read = input_msg->request &
|
|
|
|
(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
|
|
|
|
|
|
|
|
if (!i2c_mot || !i2c_read || (input_msg->size == 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sending the segment value and EDID offset will be performed
|
|
|
|
* from the DRM upstream EDID driver for each block. Avoid
|
|
|
|
* duplicate AUX transactions related to this while reading the
|
|
|
|
* first 16 bytes of each block.
|
|
|
|
*/
|
|
|
|
if (!(aux->offset % edid_block_length) || !send_seg)
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
aux->read = false;
|
|
|
|
aux->cmd_busy = true;
|
|
|
|
aux->no_send_addr = true;
|
|
|
|
aux->no_send_stop = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send the segment address for every i2c read in which the
|
|
|
|
* middle-of-tranaction flag is set. This is required to support EDID
|
|
|
|
* reads of more than 2 blocks as the segment address is reset to 0
|
|
|
|
* since we are overriding the middle-of-transaction flag for read
|
|
|
|
* transactions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (aux->segment) {
|
|
|
|
memset(&helper_msg, 0, sizeof(helper_msg));
|
|
|
|
helper_msg.address = segment_address;
|
|
|
|
helper_msg.buffer = &aux->segment;
|
|
|
|
helper_msg.size = 1;
|
2024-10-29 22:28:24 +02:00
|
|
|
msm_dp_aux_cmd_fifo_tx(aux, &helper_msg);
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send the offset address for every i2c read in which the
|
|
|
|
* middle-of-transaction flag is set. This will ensure that the sink
|
|
|
|
* will update its read pointer and return the correct portion of the
|
|
|
|
* EDID buffer in the subsequent i2c read trasntion triggered in the
|
|
|
|
* native AUX transfer function.
|
|
|
|
*/
|
|
|
|
memset(&helper_msg, 0, sizeof(helper_msg));
|
|
|
|
helper_msg.address = input_msg->address;
|
|
|
|
helper_msg.buffer = &aux->offset;
|
|
|
|
helper_msg.size = 1;
|
2024-10-29 22:28:24 +02:00
|
|
|
msm_dp_aux_cmd_fifo_tx(aux, &helper_msg);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
end:
|
|
|
|
aux->offset += message_size;
|
|
|
|
if (aux->offset == 0x80 || aux->offset == 0x100)
|
|
|
|
aux->segment = 0x0; /* reset segment at end of block */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function does the real job to process an AUX transaction.
|
|
|
|
* It will call aux_reset() function to reset the AUX channel,
|
|
|
|
* if the waiting is timeout.
|
|
|
|
*/
|
2024-10-29 22:28:24 +02:00
|
|
|
static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux,
|
2020-08-27 14:16:55 -07:00
|
|
|
struct drm_dp_aux_msg *msg)
|
|
|
|
{
|
|
|
|
ssize_t ret;
|
|
|
|
int const aux_cmd_native_max = 16;
|
|
|
|
int const aux_cmd_i2c_max = 128;
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
|
|
|
|
|
|
|
|
/* Ignore address only message */
|
2021-05-07 14:25:04 -07:00
|
|
|
if (msg->size == 0 || !msg->buffer) {
|
2020-08-27 14:16:55 -07:00
|
|
|
msg->reply = aux->native ?
|
|
|
|
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
|
2021-05-07 14:25:04 -07:00
|
|
|
return msg->size;
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* msg sanity check */
|
2021-05-07 14:25:04 -07:00
|
|
|
if ((aux->native && msg->size > aux_cmd_native_max) ||
|
|
|
|
msg->size > aux_cmd_i2c_max) {
|
2020-08-27 14:16:55 -07:00
|
|
|
DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
|
|
|
|
__func__, msg->size, msg->request);
|
2021-05-07 14:25:04 -07:00
|
|
|
return -EINVAL;
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
ret = pm_runtime_resume_and_get(msm_dp_aux->dev);
|
2023-12-01 15:19:47 -08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-05-07 14:25:04 -07:00
|
|
|
mutex_lock(&aux->mutex);
|
2021-11-09 10:04:18 -08:00
|
|
|
if (!aux->initted) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto exit;
|
|
|
|
}
|
2021-05-07 14:25:04 -07:00
|
|
|
|
2024-03-15 14:36:29 -07:00
|
|
|
/*
|
|
|
|
* If we're using DP and an external display isn't connected then the
|
|
|
|
* transfer won't succeed. Return right away. If we don't do this we
|
|
|
|
* can end up with long timeouts if someone tries to access the DP AUX
|
|
|
|
* character device when no DP device is connected.
|
|
|
|
*/
|
|
|
|
if (!aux->is_edp && !aux->enable_xfers) {
|
|
|
|
ret = -ENXIO;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
msm_dp_aux_update_offset_and_segment(aux, msg);
|
|
|
|
msm_dp_aux_transfer_helper(aux, msg, true);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
|
|
|
|
aux->cmd_busy = true;
|
|
|
|
|
|
|
|
if (aux->read) {
|
|
|
|
aux->no_send_addr = true;
|
|
|
|
aux->no_send_stop = false;
|
|
|
|
} else {
|
|
|
|
aux->no_send_addr = true;
|
|
|
|
aux->no_send_stop = true;
|
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
ret = msm_dp_aux_cmd_fifo_tx(aux, msg);
|
2020-08-27 14:16:55 -07:00
|
|
|
if (ret < 0) {
|
2021-02-26 13:08:21 -08:00
|
|
|
if (aux->native) {
|
|
|
|
aux->retry_cnt++;
|
|
|
|
if (!(aux->retry_cnt % MAX_AUX_RETRIES))
|
2024-01-26 20:26:29 +02:00
|
|
|
phy_calibrate(aux->phy);
|
2021-02-26 13:08:21 -08:00
|
|
|
}
|
2021-08-05 13:44:52 -07:00
|
|
|
/* reset aux if link is in connected state */
|
2025-05-18 14:21:42 +03:00
|
|
|
if (msm_dp_aux_is_link_connected(msm_dp_aux))
|
2025-05-18 14:21:38 +03:00
|
|
|
msm_dp_aux_reset(aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
} else {
|
2021-05-07 14:25:05 -07:00
|
|
|
aux->retry_cnt = 0;
|
|
|
|
switch (aux->aux_error_num) {
|
|
|
|
case DP_AUX_ERR_NONE:
|
|
|
|
if (aux->read)
|
2024-10-29 22:28:24 +02:00
|
|
|
ret = msm_dp_aux_cmd_fifo_rx(aux, msg);
|
2021-05-07 14:25:05 -07:00
|
|
|
msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
|
|
|
|
break;
|
|
|
|
case DP_AUX_ERR_DEFER:
|
|
|
|
msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
|
|
|
|
break;
|
|
|
|
case DP_AUX_ERR_PHY:
|
|
|
|
case DP_AUX_ERR_ADDR:
|
|
|
|
case DP_AUX_ERR_NACK:
|
|
|
|
case DP_AUX_ERR_NACK_DEFER:
|
|
|
|
msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_NACK : DP_AUX_I2C_REPLY_NACK;
|
|
|
|
break;
|
|
|
|
case DP_AUX_ERR_TOUT:
|
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
aux->cmd_busy = false;
|
2021-11-09 10:04:18 -08:00
|
|
|
|
|
|
|
exit:
|
2020-08-27 14:16:55 -07:00
|
|
|
mutex_unlock(&aux->mutex);
|
2024-10-29 22:28:24 +02:00
|
|
|
pm_runtime_put_sync(msm_dp_aux->dev);
|
2021-05-07 14:25:05 -07:00
|
|
|
|
2020-08-27 14:16:55 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2025-05-18 14:21:43 +03:00
|
|
|
irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux, u32 isr)
|
2020-08-27 14:16:55 -07:00
|
|
|
{
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
if (!msm_dp_aux) {
|
2020-08-27 14:16:55 -07:00
|
|
|
DRM_ERROR("invalid input\n");
|
drm/msm/dp: Return IRQ_NONE for unhandled interrupts
If our interrupt handler gets called and we don't really handle the
interrupt then we should return IRQ_NONE. The current interrupt
handler didn't do this, so let's fix it.
NOTE: for some of the cases it's clear that we should return IRQ_NONE
and some cases it's clear that we should return IRQ_HANDLED. However,
there are a few that fall somewhere in between. Specifically, the
documentation for when to return IRQ_NONE vs. IRQ_HANDLED is probably
best spelled out in the commit message of commit d9e4ad5badf4 ("Document
that IRQ_NONE should be returned when IRQ not actually handled"). That
commit makes it clear that we should return IRQ_HANDLED if we've done
something to make the interrupt stop happening.
The case where it's unclear is, for instance, in dp_aux_isr() after
we've read the interrupt using dp_catalog_aux_get_irq() and confirmed
that "isr" is non-zero. The function dp_catalog_aux_get_irq() not only
reads the interrupts but it also "ack"s all the interrupts that are
returned. For an "unknown" interrupt this has a very good chance of
actually stopping the interrupt from happening. That would mean we've
identified that it's our device and done something to stop them from
happening and should return IRQ_HANDLED. Specifically, it should be
noted that most interrupts that need "ack"ing are ones that are
one-time events and doing an "ack" is enough to clear them. However,
since these interrupts are unknown then, by definition, it's unknown
if "ack"ing them is truly enough to clear them. It's possible that we
also need to remove the original source of the interrupt. In this
case, IRQ_NONE would be a better choice.
Given that returning an occasional IRQ_NONE isn't the absolute end of
the world, however, let's choose that course of action. The IRQ
framework will forgive a few IRQ_NONE returns now and again (and it
won't even log them, which is why we have to log them ourselves). This
means that if we _do_ end hitting an interrupt where "ack"ing isn't
enough the kernel will eventually detect the problem and shut our
device down.
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Tested-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
Reviewed-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
Patchwork: https://patchwork.freedesktop.org/patch/520660/
Link: https://lore.kernel.org/r/20230126170745.v2.2.I2d7aec2fadb9c237cd0090a47d6a8ba2054bf0f8@changeid
[DB: reformatted commit message to make checkpatch happy]
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
2023-01-26 17:09:13 -08:00
|
|
|
return IRQ_NONE;
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2023-01-26 17:09:12 -08:00
|
|
|
if (!aux->cmd_busy) {
|
|
|
|
DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
|
drm/msm/dp: Return IRQ_NONE for unhandled interrupts
If our interrupt handler gets called and we don't really handle the
interrupt then we should return IRQ_NONE. The current interrupt
handler didn't do this, so let's fix it.
NOTE: for some of the cases it's clear that we should return IRQ_NONE
and some cases it's clear that we should return IRQ_HANDLED. However,
there are a few that fall somewhere in between. Specifically, the
documentation for when to return IRQ_NONE vs. IRQ_HANDLED is probably
best spelled out in the commit message of commit d9e4ad5badf4 ("Document
that IRQ_NONE should be returned when IRQ not actually handled"). That
commit makes it clear that we should return IRQ_HANDLED if we've done
something to make the interrupt stop happening.
The case where it's unclear is, for instance, in dp_aux_isr() after
we've read the interrupt using dp_catalog_aux_get_irq() and confirmed
that "isr" is non-zero. The function dp_catalog_aux_get_irq() not only
reads the interrupts but it also "ack"s all the interrupts that are
returned. For an "unknown" interrupt this has a very good chance of
actually stopping the interrupt from happening. That would mean we've
identified that it's our device and done something to stop them from
happening and should return IRQ_HANDLED. Specifically, it should be
noted that most interrupts that need "ack"ing are ones that are
one-time events and doing an "ack" is enough to clear them. However,
since these interrupts are unknown then, by definition, it's unknown
if "ack"ing them is truly enough to clear them. It's possible that we
also need to remove the original source of the interrupt. In this
case, IRQ_NONE would be a better choice.
Given that returning an occasional IRQ_NONE isn't the absolute end of
the world, however, let's choose that course of action. The IRQ
framework will forgive a few IRQ_NONE returns now and again (and it
won't even log them, which is why we have to log them ourselves). This
means that if we _do_ end hitting an interrupt where "ack"ing isn't
enough the kernel will eventually detect the problem and shut our
device down.
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Tested-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
Reviewed-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
Patchwork: https://patchwork.freedesktop.org/patch/520660/
Link: https://lore.kernel.org/r/20230126170745.v2.2.I2d7aec2fadb9c237cd0090a47d6a8ba2054bf0f8@changeid
[DB: reformatted commit message to make checkpatch happy]
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
2023-01-26 17:09:13 -08:00
|
|
|
return IRQ_NONE;
|
2023-01-26 17:09:12 -08:00
|
|
|
}
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2023-01-26 17:09:12 -08:00
|
|
|
/*
|
|
|
|
* The logic below assumes only one error bit is set (other than "done"
|
|
|
|
* which can apparently be set at the same time as some of the other
|
|
|
|
* bits). Warn if more than one get set so we know we need to improve
|
|
|
|
* the logic.
|
|
|
|
*/
|
|
|
|
if (hweight32(isr & ~DP_INTR_AUX_XFER_DONE) > 1)
|
|
|
|
DRM_WARN("Some DP AUX interrupts unhandled: %#010x\n", isr);
|
|
|
|
|
|
|
|
if (isr & DP_INTR_AUX_ERROR) {
|
|
|
|
aux->aux_error_num = DP_AUX_ERR_PHY;
|
2025-05-18 14:21:38 +03:00
|
|
|
msm_dp_aux_clear_hw_interrupts(aux);
|
2023-01-26 17:09:12 -08:00
|
|
|
} else if (isr & DP_INTR_NACK_DEFER) {
|
|
|
|
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
|
|
|
|
} else if (isr & DP_INTR_WRONG_ADDR) {
|
|
|
|
aux->aux_error_num = DP_AUX_ERR_ADDR;
|
|
|
|
} else if (isr & DP_INTR_TIMEOUT) {
|
|
|
|
aux->aux_error_num = DP_AUX_ERR_TOUT;
|
|
|
|
} else if (!aux->native && (isr & DP_INTR_I2C_NACK)) {
|
|
|
|
aux->aux_error_num = DP_AUX_ERR_NACK;
|
|
|
|
} else if (!aux->native && (isr & DP_INTR_I2C_DEFER)) {
|
|
|
|
if (isr & DP_INTR_AUX_XFER_DONE)
|
|
|
|
aux->aux_error_num = DP_AUX_ERR_NACK;
|
|
|
|
else
|
|
|
|
aux->aux_error_num = DP_AUX_ERR_DEFER;
|
|
|
|
} else if (isr & DP_INTR_AUX_XFER_DONE) {
|
|
|
|
aux->aux_error_num = DP_AUX_ERR_NONE;
|
|
|
|
} else {
|
|
|
|
DRM_WARN("Unexpected interrupt: %#010x\n", isr);
|
drm/msm/dp: Return IRQ_NONE for unhandled interrupts
If our interrupt handler gets called and we don't really handle the
interrupt then we should return IRQ_NONE. The current interrupt
handler didn't do this, so let's fix it.
NOTE: for some of the cases it's clear that we should return IRQ_NONE
and some cases it's clear that we should return IRQ_HANDLED. However,
there are a few that fall somewhere in between. Specifically, the
documentation for when to return IRQ_NONE vs. IRQ_HANDLED is probably
best spelled out in the commit message of commit d9e4ad5badf4 ("Document
that IRQ_NONE should be returned when IRQ not actually handled"). That
commit makes it clear that we should return IRQ_HANDLED if we've done
something to make the interrupt stop happening.
The case where it's unclear is, for instance, in dp_aux_isr() after
we've read the interrupt using dp_catalog_aux_get_irq() and confirmed
that "isr" is non-zero. The function dp_catalog_aux_get_irq() not only
reads the interrupts but it also "ack"s all the interrupts that are
returned. For an "unknown" interrupt this has a very good chance of
actually stopping the interrupt from happening. That would mean we've
identified that it's our device and done something to stop them from
happening and should return IRQ_HANDLED. Specifically, it should be
noted that most interrupts that need "ack"ing are ones that are
one-time events and doing an "ack" is enough to clear them. However,
since these interrupts are unknown then, by definition, it's unknown
if "ack"ing them is truly enough to clear them. It's possible that we
also need to remove the original source of the interrupt. In this
case, IRQ_NONE would be a better choice.
Given that returning an occasional IRQ_NONE isn't the absolute end of
the world, however, let's choose that course of action. The IRQ
framework will forgive a few IRQ_NONE returns now and again (and it
won't even log them, which is why we have to log them ourselves). This
means that if we _do_ end hitting an interrupt where "ack"ing isn't
enough the kernel will eventually detect the problem and shut our
device down.
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Tested-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
Reviewed-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
Patchwork: https://patchwork.freedesktop.org/patch/520660/
Link: https://lore.kernel.org/r/20230126170745.v2.2.I2d7aec2fadb9c237cd0090a47d6a8ba2054bf0f8@changeid
[DB: reformatted commit message to make checkpatch happy]
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
2023-01-26 17:09:13 -08:00
|
|
|
return IRQ_NONE;
|
2023-01-26 17:09:12 -08:00
|
|
|
}
|
2021-05-07 14:25:03 -07:00
|
|
|
|
|
|
|
complete(&aux->comp);
|
drm/msm/dp: Return IRQ_NONE for unhandled interrupts
If our interrupt handler gets called and we don't really handle the
interrupt then we should return IRQ_NONE. The current interrupt
handler didn't do this, so let's fix it.
NOTE: for some of the cases it's clear that we should return IRQ_NONE
and some cases it's clear that we should return IRQ_HANDLED. However,
there are a few that fall somewhere in between. Specifically, the
documentation for when to return IRQ_NONE vs. IRQ_HANDLED is probably
best spelled out in the commit message of commit d9e4ad5badf4 ("Document
that IRQ_NONE should be returned when IRQ not actually handled"). That
commit makes it clear that we should return IRQ_HANDLED if we've done
something to make the interrupt stop happening.
The case where it's unclear is, for instance, in dp_aux_isr() after
we've read the interrupt using dp_catalog_aux_get_irq() and confirmed
that "isr" is non-zero. The function dp_catalog_aux_get_irq() not only
reads the interrupts but it also "ack"s all the interrupts that are
returned. For an "unknown" interrupt this has a very good chance of
actually stopping the interrupt from happening. That would mean we've
identified that it's our device and done something to stop them from
happening and should return IRQ_HANDLED. Specifically, it should be
noted that most interrupts that need "ack"ing are ones that are
one-time events and doing an "ack" is enough to clear them. However,
since these interrupts are unknown then, by definition, it's unknown
if "ack"ing them is truly enough to clear them. It's possible that we
also need to remove the original source of the interrupt. In this
case, IRQ_NONE would be a better choice.
Given that returning an occasional IRQ_NONE isn't the absolute end of
the world, however, let's choose that course of action. The IRQ
framework will forgive a few IRQ_NONE returns now and again (and it
won't even log them, which is why we have to log them ourselves). This
means that if we _do_ end hitting an interrupt where "ack"ing isn't
enough the kernel will eventually detect the problem and shut our
device down.
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Tested-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
Reviewed-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
Patchwork: https://patchwork.freedesktop.org/patch/520660/
Link: https://lore.kernel.org/r/20230126170745.v2.2.I2d7aec2fadb9c237cd0090a47d6a8ba2054bf0f8@changeid
[DB: reformatted commit message to make checkpatch happy]
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
2023-01-26 17:09:13 -08:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled)
|
2024-03-15 14:36:29 -07:00
|
|
|
{
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2024-03-15 14:36:29 -07:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
2024-03-15 14:36:29 -07:00
|
|
|
aux->enable_xfers = enabled;
|
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux)
|
2020-08-27 14:16:55 -07:00
|
|
|
{
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-01-26 20:26:29 +02:00
|
|
|
phy_calibrate(aux->phy);
|
2025-05-18 14:21:38 +03:00
|
|
|
msm_dp_aux_reset(aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
|
2020-08-27 14:16:55 -07:00
|
|
|
{
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
if (!msm_dp_aux) {
|
2020-08-27 14:16:55 -07:00
|
|
|
DRM_ERROR("invalid input\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2021-11-09 10:04:18 -08:00
|
|
|
mutex_lock(&aux->mutex);
|
|
|
|
|
2025-05-18 14:21:38 +03:00
|
|
|
msm_dp_aux_enable(aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
aux->retry_cnt = 0;
|
2021-11-09 10:04:18 -08:00
|
|
|
aux->initted = true;
|
|
|
|
|
|
|
|
mutex_unlock(&aux->mutex);
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux)
|
2020-08-27 14:16:55 -07:00
|
|
|
{
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2021-11-09 10:04:18 -08:00
|
|
|
mutex_lock(&aux->mutex);
|
|
|
|
|
|
|
|
aux->initted = false;
|
2025-05-18 14:21:38 +03:00
|
|
|
msm_dp_aux_disable(aux);
|
2021-11-09 10:04:18 -08:00
|
|
|
|
|
|
|
mutex_unlock(&aux->mutex);
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux)
|
2020-08-27 14:16:55 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
if (!msm_dp_aux) {
|
2020-08-27 14:16:55 -07:00
|
|
|
DRM_ERROR("invalid input\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
ret = drm_dp_aux_register(msm_dp_aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux)
|
2020-08-27 14:16:55 -07:00
|
|
|
{
|
2024-10-29 22:28:24 +02:00
|
|
|
drm_dp_aux_unregister(msm_dp_aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux,
|
2023-12-01 15:19:49 -08:00
|
|
|
unsigned long wait_us)
|
|
|
|
{
|
|
|
|
int ret;
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2023-12-01 15:19:49 -08:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
2023-12-01 15:19:49 -08:00
|
|
|
|
2024-02-27 00:34:45 +02:00
|
|
|
ret = pm_runtime_resume_and_get(aux->dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2025-05-18 14:21:38 +03:00
|
|
|
ret = msm_dp_aux_wait_for_hpd_connect_state(aux, wait_us);
|
2023-12-01 15:19:49 -08:00
|
|
|
pm_runtime_put_sync(aux->dev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2025-05-18 14:21:42 +03:00
|
|
|
void msm_dp_aux_hpd_enable(struct drm_dp_aux *msm_dp_aux)
|
|
|
|
{
|
|
|
|
struct msm_dp_aux_private *aux =
|
|
|
|
container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/* Configure REFTIMER and enable it */
|
2025-05-18 14:21:44 +03:00
|
|
|
reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_REFTIMER);
|
2025-05-18 14:21:42 +03:00
|
|
|
reg |= DP_DP_HPD_REFTIMER_ENABLE;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_DP_HPD_REFTIMER, reg);
|
2025-05-18 14:21:42 +03:00
|
|
|
|
|
|
|
/* Enable HPD */
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
|
2025-05-18 14:21:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void msm_dp_aux_hpd_disable(struct drm_dp_aux *msm_dp_aux)
|
|
|
|
{
|
|
|
|
struct msm_dp_aux_private *aux =
|
|
|
|
container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
|
|
|
u32 reg;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_REFTIMER);
|
2025-05-18 14:21:42 +03:00
|
|
|
reg &= ~DP_DP_HPD_REFTIMER_ENABLE;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_DP_HPD_REFTIMER, reg);
|
2025-05-18 14:21:42 +03:00
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_DP_HPD_CTRL, 0);
|
2025-05-18 14:21:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void msm_dp_aux_hpd_intr_enable(struct drm_dp_aux *msm_dp_aux)
|
|
|
|
{
|
|
|
|
struct msm_dp_aux_private *aux =
|
|
|
|
container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
|
|
|
u32 reg;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
|
2025-05-18 14:21:42 +03:00
|
|
|
reg |= DP_DP_HPD_INT_MASK;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_MASK,
|
2025-05-18 14:21:42 +03:00
|
|
|
reg & DP_DP_HPD_INT_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
void msm_dp_aux_hpd_intr_disable(struct drm_dp_aux *msm_dp_aux)
|
|
|
|
{
|
|
|
|
struct msm_dp_aux_private *aux =
|
|
|
|
container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
|
|
|
u32 reg;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
reg = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
|
2025-05-18 14:21:42 +03:00
|
|
|
reg &= ~DP_DP_HPD_INT_MASK;
|
2025-05-18 14:21:44 +03:00
|
|
|
msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_MASK,
|
2025-05-18 14:21:42 +03:00
|
|
|
reg & DP_DP_HPD_INT_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 msm_dp_aux_get_hpd_intr_status(struct drm_dp_aux *msm_dp_aux)
|
|
|
|
{
|
|
|
|
struct msm_dp_aux_private *aux =
|
|
|
|
container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
|
|
|
int isr, mask;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
isr = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_STATUS);
|
|
|
|
msm_dp_write_aux(aux, REG_DP_DP_HPD_INT_ACK,
|
2025-05-18 14:21:42 +03:00
|
|
|
(isr & DP_DP_HPD_INT_MASK));
|
2025-05-18 14:21:44 +03:00
|
|
|
mask = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_MASK);
|
2025-05-18 14:21:42 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We only want to return interrupts that are unmasked to the caller.
|
|
|
|
* However, the interrupt status field also contains other
|
|
|
|
* informational bits about the HPD state status, so we only mask
|
|
|
|
* out the part of the register that tells us about which interrupts
|
|
|
|
* are pending.
|
|
|
|
*/
|
|
|
|
return isr & (mask | ~DP_DP_HPD_INT_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 msm_dp_aux_is_link_connected(struct drm_dp_aux *msm_dp_aux)
|
|
|
|
{
|
|
|
|
struct msm_dp_aux_private *aux =
|
|
|
|
container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
|
|
|
u32 status;
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
status = msm_dp_read_aux(aux, REG_DP_DP_HPD_INT_STATUS);
|
2025-05-18 14:21:42 +03:00
|
|
|
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
|
|
|
|
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2025-05-18 14:21:44 +03:00
|
|
|
struct drm_dp_aux *msm_dp_aux_get(struct device *dev,
|
2024-01-26 20:26:29 +02:00
|
|
|
struct phy *phy,
|
2025-05-18 14:21:44 +03:00
|
|
|
bool is_edp,
|
|
|
|
void __iomem *aux_base)
|
2020-08-27 14:16:55 -07:00
|
|
|
{
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
|
|
|
|
if (!aux)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
init_completion(&aux->comp);
|
|
|
|
aux->cmd_busy = false;
|
2022-04-25 17:14:31 +05:30
|
|
|
aux->is_edp = is_edp;
|
2020-08-27 14:16:55 -07:00
|
|
|
mutex_init(&aux->mutex);
|
|
|
|
|
|
|
|
aux->dev = dev;
|
2024-01-26 20:26:29 +02:00
|
|
|
aux->phy = phy;
|
2020-08-27 14:16:55 -07:00
|
|
|
aux->retry_cnt = 0;
|
2025-05-18 14:21:44 +03:00
|
|
|
aux->aux_base = aux_base;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2023-12-01 15:19:49 -08:00
|
|
|
/*
|
|
|
|
* Use the drm_dp_aux_init() to use the aux adapter
|
|
|
|
* before registering AUX with the DRM device so that
|
|
|
|
* msm eDP panel can be detected by generic_dep_panel_probe().
|
|
|
|
*/
|
2024-10-29 22:28:24 +02:00
|
|
|
aux->msm_dp_aux.name = "dpu_dp_aux";
|
|
|
|
aux->msm_dp_aux.dev = dev;
|
|
|
|
aux->msm_dp_aux.transfer = msm_dp_aux_transfer;
|
|
|
|
aux->msm_dp_aux.wait_hpd_asserted = msm_dp_wait_hpd_asserted;
|
|
|
|
drm_dp_aux_init(&aux->msm_dp_aux);
|
2023-12-01 15:19:49 -08:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
return &aux->msm_dp_aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
}
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
void msm_dp_aux_put(struct drm_dp_aux *msm_dp_aux)
|
2020-08-27 14:16:55 -07:00
|
|
|
{
|
2024-10-29 22:28:24 +02:00
|
|
|
struct msm_dp_aux_private *aux;
|
2020-08-27 14:16:55 -07:00
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
if (!msm_dp_aux)
|
2020-08-27 14:16:55 -07:00
|
|
|
return;
|
|
|
|
|
2024-10-29 22:28:24 +02:00
|
|
|
aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
|
2020-08-27 14:16:55 -07:00
|
|
|
|
|
|
|
mutex_destroy(&aux->mutex);
|
|
|
|
|
|
|
|
devm_kfree(aux->dev, aux);
|
|
|
|
}
|