2021-01-20 12:18:34 +02:00
|
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
/*
|
|
|
|
* Copyright © 2020-2021 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2025-04-17 12:10:37 +03:00
|
|
|
#include <drm/drm_print.h>
|
|
|
|
|
|
|
|
#include "i915_utils.h"
|
2022-12-07 19:17:24 +02:00
|
|
|
#include "intel_de.h"
|
2021-01-20 12:18:34 +02:00
|
|
|
#include "intel_display_types.h"
|
2024-01-04 10:30:06 +02:00
|
|
|
#include "intel_dp.h"
|
2021-01-20 12:18:34 +02:00
|
|
|
#include "intel_dp_aux.h"
|
2023-03-16 15:29:32 +02:00
|
|
|
#include "intel_dp_aux_regs.h"
|
2021-01-20 12:18:34 +02:00
|
|
|
#include "intel_pps.h"
|
2024-09-02 09:42:41 +03:00
|
|
|
#include "intel_quirks.h"
|
2021-01-20 12:18:34 +02:00
|
|
|
#include "intel_tc.h"
|
2024-11-20 14:03:33 +02:00
|
|
|
#include "intel_uncore_trace.h"
|
2021-01-20 12:18:34 +02:00
|
|
|
|
2023-09-22 13:56:45 +03:00
|
|
|
#define AUX_CH_NAME_BUFSIZE 6
|
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
static const char *aux_ch_name(struct intel_display *display,
|
2023-09-22 13:56:45 +03:00
|
|
|
char *buf, int size, enum aux_ch aux_ch)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD)
|
2023-09-22 13:56:45 +03:00
|
|
|
snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D);
|
2024-08-30 13:15:42 +03:00
|
|
|
else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1)
|
2023-09-22 13:56:45 +03:00
|
|
|
snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1);
|
|
|
|
else
|
|
|
|
snprintf(buf, size, "%c", 'A' + aux_ch);
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2023-06-09 17:13:56 +03:00
|
|
|
u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
|
2021-01-20 12:18:34 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u32 v = 0;
|
|
|
|
|
|
|
|
if (src_bytes > 4)
|
|
|
|
src_bytes = 4;
|
|
|
|
for (i = 0; i < src_bytes; i++)
|
|
|
|
v |= ((u32)src[i]) << ((3 - i) * 8);
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2022-01-12 12:57:03 +02:00
|
|
|
static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
|
2021-01-20 12:18:34 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (dst_bytes > 4)
|
|
|
|
dst_bytes = 4;
|
|
|
|
for (i = 0; i < dst_bytes; i++)
|
|
|
|
dst[i] = src >> ((3 - i) * 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32
|
|
|
|
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
|
|
|
|
const unsigned int timeout_ms = 10;
|
|
|
|
u32 status;
|
2022-12-21 09:02:09 +05:30
|
|
|
int ret;
|
2021-01-20 12:18:34 +02:00
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY,
|
|
|
|
0,
|
2024-03-20 18:01:22 +02:00
|
|
|
2, timeout_ms, &status);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
2022-12-21 09:02:09 +05:30
|
|
|
if (ret == -ETIMEDOUT)
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_err(display->drm,
|
2021-01-20 12:18:34 +02:00
|
|
|
"%s: did not complete or timeout within %ums (status 0x%08x)\n",
|
|
|
|
intel_dp->aux.name, timeout_ms, status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
if (index)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The clock divider is based off the hrawclk, and would like to run at
|
|
|
|
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
|
|
|
|
*/
|
2024-08-30 13:15:42 +03:00
|
|
|
return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000);
|
2021-01-20 12:18:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
u32 freq;
|
|
|
|
|
|
|
|
if (index)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The clock divider is based off the cdclk or PCH rawclk, and would
|
|
|
|
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
|
|
|
|
* divide by 2000 and use that
|
|
|
|
*/
|
|
|
|
if (dig_port->aux_ch == AUX_CH_A)
|
2024-08-30 13:15:42 +03:00
|
|
|
freq = display->cdclk.hw.cdclk;
|
2021-01-20 12:18:34 +02:00
|
|
|
else
|
2024-08-30 13:15:42 +03:00
|
|
|
freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq;
|
2021-01-20 12:18:34 +02:00
|
|
|
return DIV_ROUND_CLOSEST(freq, 2000);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
|
2025-04-17 12:10:36 +03:00
|
|
|
if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(display)) {
|
2021-01-20 12:18:34 +02:00
|
|
|
/* Workaround for non-ULT HSW */
|
|
|
|
switch (index) {
|
|
|
|
case 0: return 63;
|
|
|
|
case 1: return 72;
|
|
|
|
default: return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ilk_get_aux_clock_divider(intel_dp, index);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* SKL doesn't need us to program the AUX clock divider (Hardware will
|
|
|
|
* derive the clock from CDCLK automatically). We still implement the
|
|
|
|
* get_aux_clock_divider vfunc to plug-in into the existing code.
|
|
|
|
*/
|
|
|
|
return index ? 0 : 1;
|
|
|
|
}
|
|
|
|
|
2023-03-29 20:24:34 +03:00
|
|
|
static int intel_dp_aux_sync_len(void)
|
|
|
|
{
|
|
|
|
int precharge = 16; /* 10-16 */
|
|
|
|
int preamble = 16;
|
|
|
|
|
|
|
|
return precharge + preamble;
|
|
|
|
}
|
|
|
|
|
2024-09-02 09:42:41 +03:00
|
|
|
int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp)
|
2023-03-29 20:24:34 +03:00
|
|
|
{
|
2024-09-02 09:42:41 +03:00
|
|
|
int precharge = 10; /* 10-16 */
|
|
|
|
int preamble = 8;
|
|
|
|
|
2024-03-13 15:32:21 +02:00
|
|
|
/*
|
|
|
|
* We faced some glitches on Dell Precision 5490 MTL laptop with panel:
|
|
|
|
* "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
|
|
|
|
* is fixing these problems with the panel. It is still within range
|
2024-09-02 09:42:41 +03:00
|
|
|
* mentioned in eDP specification. Increasing Fast Wake sync length is
|
|
|
|
* causing problems with other panels: increase length as a quirk for
|
|
|
|
* this specific laptop.
|
2024-03-13 15:32:21 +02:00
|
|
|
*/
|
2024-09-02 09:42:41 +03:00
|
|
|
if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN))
|
|
|
|
precharge += 2;
|
2023-03-29 20:24:34 +03:00
|
|
|
|
|
|
|
return precharge + preamble;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int g4x_dp_aux_precharge_len(void)
|
|
|
|
{
|
|
|
|
int precharge_min = 10;
|
|
|
|
int preamble = 16;
|
|
|
|
|
|
|
|
/* HW wants the length of the extra precharge in 2us units */
|
|
|
|
return (intel_dp_aux_sync_len() -
|
|
|
|
precharge_min - preamble) / 2;
|
|
|
|
}
|
|
|
|
|
2021-01-20 12:18:34 +02:00
|
|
|
static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|
|
|
int send_bytes,
|
|
|
|
u32 aux_clock_divider)
|
|
|
|
{
|
2025-04-09 21:17:49 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
drm/i915: Fix pre-skl DP AUX precharge length
DP v1.1+ says:
"The DisplayPort transmitter, which is the driving end for a request
transaction, pre-charges the AUX-CH+ and AUX-CH- to a common mode
voltage by transmitting 10 to 16 consecutive 0’s in Manchester II code.
After the active pre-charge, the transmitter sends an AUX Sync pattern.
The AUX Sync pattern must be as follows:
Start with 16 consecutive 0s in Manchester-II code, which results in
a transition from low to high in the middle of each bit period.
Including active pre-charge pulses, there shall be 26 to 32
consecutive 0s before the end of the AUX_SYNC pattern."
BDW bspec says:
"Used to determine the precharge time for the Aux Channel. During this
time the Aux Channel will drive the SYNC pattern. Every microsecond
gives one additional SYNC pulse beyond the hard coded 26 SYNC pulses.
The value is the number of microseconds times 2. Default is 3 decimal
which gives 6us of precharge which is 6 extra SYN pulses for a total
of 32."
CPT bspec says the same thing apart from:
"... Default is 5 decimal which gives 10us of precharge which is 10
extra SYNC pulses for a total of 36."
So it looks like to match the max of 32 of the DP spec we should just
always program this extra precharge time to 3.
Unfortunately g4x/ibx bspec doesn't have this clarification, but
since the cpt default was still the same 5 as for g4x/ibx let's
assume the behaviour was always the same.
I also did a bit more archaeology and found the following:
commit e3421a189447 ("drm/i915: enable DP/eDP for Sandybridge/Cougarpoint")
added the precharge==3 for snb
commit 092945e11c5b ("drm/i915/dp: Use auxch precharge value of 5 everywhere")
tried to change it to be 5 for snb
commit 6b4e0a93ff6e ("Revert "drm/i915/dp: Use auxch precharge value of 5 everywhere"")
went back to 3 for snb due to a regression
So I think the value of 5 was just always wrong, but I guess very
few display actually get upset if we do too many SYNCs. Also DP 1.0
did not specify any max value for this, whereas DP 1.1+ added the
max==32 wording.
Additionally I hooked up a scope to a few machines with the following
findings:
- ibx and cpt both give us the expected 32 total sync pulses with
precharge==3
- ctg is a bit different, it has the 10 hardcoded precharge sync
pulses same as later platforms (so we get at least 26 sync
pulses in total). However the additional precharge length (which
is what we're changing here) is not done with sync pulses.
Instead ctg does this part of the precharge with a steady DC
voltage. If we wanted to 100% match DP 1.1+ here we should perhaps
set prechange length to 0, but less precharge might make AUX less
reliable, and so far we're not aware of any problems due to the DC
precharge. Hence I think precharge==3 is probably the best choice
here too to make the total length of precharge consistent with
the later platforms.
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210318181039.17260-1-ville.syrjala@linux.intel.com
Reviewed-by: Imre Deak <imre.deak@intel.com>
2021-03-18 20:10:38 +02:00
|
|
|
u32 timeout;
|
2021-01-20 12:18:34 +02:00
|
|
|
|
2021-03-17 20:48:59 +02:00
|
|
|
/* Max timeout value on G4x-BDW: 1.6ms */
|
2025-04-09 21:17:49 +03:00
|
|
|
if (display->platform.broadwell)
|
2021-01-20 12:18:34 +02:00
|
|
|
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
|
|
|
|
else
|
|
|
|
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
|
|
|
|
|
|
|
|
return DP_AUX_CH_CTL_SEND_BUSY |
|
2023-05-09 20:14:11 +03:00
|
|
|
DP_AUX_CH_CTL_DONE |
|
|
|
|
DP_AUX_CH_CTL_INTERRUPT |
|
|
|
|
DP_AUX_CH_CTL_TIME_OUT_ERROR |
|
|
|
|
timeout |
|
|
|
|
DP_AUX_CH_CTL_RECEIVE_ERROR |
|
|
|
|
DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
|
|
|
|
DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) |
|
|
|
|
DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider);
|
2021-01-20 12:18:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|
|
|
int send_bytes,
|
|
|
|
u32 unused)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
u32 ret;
|
|
|
|
|
2021-03-17 20:48:59 +02:00
|
|
|
/*
|
|
|
|
* Max timeout values:
|
|
|
|
* SKL-GLK: 1.6ms
|
|
|
|
* ICL+: 4ms
|
|
|
|
*/
|
2021-01-20 12:18:34 +02:00
|
|
|
ret = DP_AUX_CH_CTL_SEND_BUSY |
|
2023-05-09 20:14:11 +03:00
|
|
|
DP_AUX_CH_CTL_DONE |
|
|
|
|
DP_AUX_CH_CTL_INTERRUPT |
|
|
|
|
DP_AUX_CH_CTL_TIME_OUT_ERROR |
|
|
|
|
DP_AUX_CH_CTL_TIME_OUT_MAX |
|
|
|
|
DP_AUX_CH_CTL_RECEIVE_ERROR |
|
|
|
|
DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
|
2024-09-02 09:42:41 +03:00
|
|
|
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) |
|
2023-05-09 20:14:11 +03:00
|
|
|
DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
|
2021-01-20 12:18:34 +02:00
|
|
|
|
2021-09-21 03:23:05 +03:00
|
|
|
if (intel_tc_port_in_tbt_alt_mode(dig_port))
|
2021-01-20 12:18:34 +02:00
|
|
|
ret |= DP_AUX_CH_CTL_TBT_IO;
|
|
|
|
|
2022-09-01 23:03:37 -07:00
|
|
|
/*
|
|
|
|
* Power request bit is already set during aux power well enable.
|
|
|
|
* Preserve the bit across aux transactions.
|
|
|
|
*/
|
2024-08-30 13:15:42 +03:00
|
|
|
if (DISPLAY_VER(display) >= 14)
|
2022-09-01 23:03:37 -07:00
|
|
|
ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
|
|
|
|
|
2021-01-20 12:18:34 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
intel_dp_aux_xfer(struct intel_dp *intel_dp,
|
|
|
|
const u8 *send, int send_bytes,
|
|
|
|
u8 *recv, int recv_size,
|
|
|
|
u32 aux_send_ctl_flags)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
2024-01-04 10:30:06 +02:00
|
|
|
struct intel_encoder *encoder = &dig_port->base;
|
2021-01-20 12:18:34 +02:00
|
|
|
i915_reg_t ch_ctl, ch_data[5];
|
|
|
|
u32 aux_clock_divider;
|
|
|
|
enum intel_display_power_domain aux_domain;
|
|
|
|
intel_wakeref_t aux_wakeref;
|
2025-03-24 20:01:45 +02:00
|
|
|
intel_wakeref_t pps_wakeref = NULL;
|
2021-01-20 12:18:34 +02:00
|
|
|
int i, ret, recv_bytes;
|
|
|
|
int try, clock = 0;
|
|
|
|
u32 status;
|
|
|
|
bool vdd;
|
|
|
|
|
|
|
|
ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
|
|
|
|
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
|
|
|
|
|
2024-01-04 10:30:06 +02:00
|
|
|
intel_digital_port_lock(encoder);
|
|
|
|
/*
|
|
|
|
* Abort transfers on a disconnected port as required by
|
|
|
|
* DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
|
|
|
|
* timeouts that would otherwise happen.
|
|
|
|
*/
|
2024-01-04 10:30:08 +02:00
|
|
|
if (!intel_dp_is_edp(intel_dp) &&
|
2024-01-04 10:30:06 +02:00
|
|
|
!intel_digital_port_connected_locked(&dig_port->base)) {
|
|
|
|
ret = -ENXIO;
|
|
|
|
goto out_unlock;
|
2023-03-16 15:17:11 +02:00
|
|
|
}
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
aux_domain = intel_aux_power_domain(dig_port);
|
|
|
|
|
2025-02-06 20:55:27 +02:00
|
|
|
aux_wakeref = intel_display_power_get(display, aux_domain);
|
2025-03-24 20:01:45 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The PPS state needs to be locked for:
|
|
|
|
* - eDP on all platforms, since AUX transfers on eDP need VDD power
|
|
|
|
* (either forced or via panel power) which depends on the PPS
|
|
|
|
* state.
|
|
|
|
* - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV),
|
|
|
|
* since changing the PPS state (via a parallel modeset for
|
|
|
|
* instance) may interfere with the AUX transfers on a non-eDP
|
|
|
|
* output as well.
|
|
|
|
*/
|
|
|
|
if (intel_dp_is_edp(intel_dp) ||
|
|
|
|
display->platform.valleyview || display->platform.cherryview)
|
|
|
|
pps_wakeref = intel_pps_lock(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We will be called with VDD already enabled for dpcd/edid/oui reads.
|
|
|
|
* In such cases we want to leave VDD enabled and it's up to upper layers
|
|
|
|
* to turn it off. But for eg. i2c-dev access we need to turn it on/off
|
|
|
|
* ourselves.
|
|
|
|
*/
|
|
|
|
vdd = intel_pps_vdd_on_unlocked(intel_dp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dp aux is extremely sensitive to irq latency, hence request the
|
|
|
|
* lowest possible wakeup latency and so prevent the cpu from going into
|
|
|
|
* deep sleep states.
|
|
|
|
*/
|
|
|
|
cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
|
|
|
|
|
|
|
|
intel_pps_check_power_unlocked(intel_dp);
|
|
|
|
|
2023-04-11 22:14:27 +03:00
|
|
|
/*
|
|
|
|
* FIXME PSR should be disabled here to prevent
|
|
|
|
* it using the same AUX CH simultaneously
|
|
|
|
*/
|
|
|
|
|
2021-01-20 12:18:34 +02:00
|
|
|
/* Try to wait for any previous AUX channel activity */
|
|
|
|
for (try = 0; try < 3; try++) {
|
2024-08-30 13:15:42 +03:00
|
|
|
status = intel_de_read_notrace(display, ch_ctl);
|
2021-01-20 12:18:34 +02:00
|
|
|
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
|
|
|
|
break;
|
|
|
|
msleep(1);
|
|
|
|
}
|
|
|
|
/* just trace the final value */
|
|
|
|
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
|
|
|
|
|
|
|
|
if (try == 3) {
|
2024-08-30 13:15:42 +03:00
|
|
|
const u32 status = intel_de_read(display, ch_ctl);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
if (status != intel_dp->aux_busy_last_status) {
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_WARN(display->drm, 1,
|
2021-01-20 12:18:34 +02:00
|
|
|
"%s: not started (status 0x%08x)\n",
|
|
|
|
intel_dp->aux.name, status);
|
|
|
|
intel_dp->aux_busy_last_status = status;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only 5 data registers! */
|
2024-08-30 13:15:42 +03:00
|
|
|
if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) {
|
2021-01-20 12:18:34 +02:00
|
|
|
ret = -E2BIG;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
|
|
|
|
u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
|
|
|
|
send_bytes,
|
|
|
|
aux_clock_divider);
|
|
|
|
|
|
|
|
send_ctl |= aux_send_ctl_flags;
|
|
|
|
|
|
|
|
/* Must try at least 3 times according to DP spec */
|
|
|
|
for (try = 0; try < 5; try++) {
|
|
|
|
/* Load the send data into the aux channel data registers */
|
|
|
|
for (i = 0; i < send_bytes; i += 4)
|
2024-08-30 13:15:42 +03:00
|
|
|
intel_de_write(display, ch_data[i >> 2],
|
2022-12-07 19:17:24 +02:00
|
|
|
intel_dp_aux_pack(send + i,
|
|
|
|
send_bytes - i));
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
/* Send the command and wait for it to complete */
|
2024-08-30 13:15:42 +03:00
|
|
|
intel_de_write(display, ch_ctl, send_ctl);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
status = intel_dp_aux_wait_done(intel_dp);
|
|
|
|
|
|
|
|
/* Clear done status and any errors */
|
2024-08-30 13:15:42 +03:00
|
|
|
intel_de_write(display, ch_ctl,
|
2022-12-07 19:17:24 +02:00
|
|
|
status | DP_AUX_CH_CTL_DONE |
|
|
|
|
DP_AUX_CH_CTL_TIME_OUT_ERROR |
|
|
|
|
DP_AUX_CH_CTL_RECEIVE_ERROR);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
|
|
|
|
* 400us delay required for errors and timeouts
|
|
|
|
* Timeout errors from the HW already meet this
|
|
|
|
* requirement so skip to next iteration
|
|
|
|
*/
|
|
|
|
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
|
|
|
|
usleep_range(400, 500);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (status & DP_AUX_CH_CTL_DONE)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_err(display->drm, "%s: not done (status 0x%08x)\n",
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->aux.name, status);
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
/*
|
|
|
|
* Check for timeout or receive error. Timeouts occur when the sink is
|
|
|
|
* not connected.
|
|
|
|
*/
|
|
|
|
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_err(display->drm, "%s: receive error (status 0x%08x)\n",
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->aux.name, status);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Timeouts occur when the device isn't connected, so they're "normal"
|
|
|
|
* -- don't fill the kernel log with these
|
|
|
|
*/
|
|
|
|
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n",
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->aux.name, status);
|
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unload any bytes sent back from the other side */
|
2023-05-09 20:14:11 +03:00
|
|
|
recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* By BSpec: "Message sizes of 0 or >20 are not allowed."
|
|
|
|
* We have no idea of what happened so we return -EBUSY so
|
|
|
|
* drm layer takes care for the necessary retries.
|
|
|
|
*/
|
|
|
|
if (recv_bytes == 0 || recv_bytes > 20) {
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_dbg_kms(display->drm,
|
2021-01-20 12:18:34 +02:00
|
|
|
"%s: Forbidden recv_bytes = %d on aux transaction\n",
|
|
|
|
intel_dp->aux.name, recv_bytes);
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (recv_bytes > recv_size)
|
|
|
|
recv_bytes = recv_size;
|
|
|
|
|
|
|
|
for (i = 0; i < recv_bytes; i += 4)
|
2024-08-30 13:15:42 +03:00
|
|
|
intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]),
|
2021-01-20 12:18:34 +02:00
|
|
|
recv + i, recv_bytes - i);
|
|
|
|
|
|
|
|
ret = recv_bytes;
|
|
|
|
out:
|
|
|
|
cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
|
|
|
|
|
|
|
|
if (vdd)
|
|
|
|
intel_pps_vdd_off_unlocked(intel_dp, false);
|
|
|
|
|
2025-03-24 20:01:45 +02:00
|
|
|
if (pps_wakeref)
|
|
|
|
intel_pps_unlock(intel_dp, pps_wakeref);
|
|
|
|
|
2025-02-06 20:55:27 +02:00
|
|
|
intel_display_power_put_async(display, aux_domain, aux_wakeref);
|
2023-03-16 15:17:11 +02:00
|
|
|
out_unlock:
|
2024-01-04 10:30:06 +02:00
|
|
|
intel_digital_port_unlock(encoder);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define BARE_ADDRESS_SIZE 3
|
|
|
|
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
|
|
|
|
|
|
|
|
static void
|
|
|
|
intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
|
|
|
|
const struct drm_dp_aux_msg *msg)
|
|
|
|
{
|
|
|
|
txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
|
|
|
|
txbuf[1] = (msg->address >> 8) & 0xff;
|
|
|
|
txbuf[2] = msg->address & 0xff;
|
|
|
|
txbuf[3] = msg->size - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If we're trying to send the HDCP Aksv, we need to set a the Aksv
|
|
|
|
* select bit to inform the hardware to send the Aksv after our header
|
|
|
|
* since we can't access that data from software.
|
|
|
|
*/
|
|
|
|
if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
|
|
|
|
msg->address == DP_AUX_HDCP_AKSV)
|
|
|
|
return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
|
|
|
{
|
|
|
|
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
u8 txbuf[20], rxbuf[20];
|
|
|
|
size_t txsize, rxsize;
|
|
|
|
u32 flags = intel_dp_aux_xfer_flags(msg);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
intel_dp_aux_header(txbuf, msg);
|
|
|
|
|
|
|
|
switch (msg->request & ~DP_AUX_I2C_MOT) {
|
|
|
|
case DP_AUX_NATIVE_WRITE:
|
|
|
|
case DP_AUX_I2C_WRITE:
|
|
|
|
case DP_AUX_I2C_WRITE_STATUS_UPDATE:
|
|
|
|
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
|
|
|
|
rxsize = 2; /* 0 or 1 data bytes */
|
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
if (drm_WARN_ON(display->drm, txsize > 20))
|
2021-01-20 12:18:34 +02:00
|
|
|
return -E2BIG;
|
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_WARN_ON(display->drm, !msg->buffer != !msg->size);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
if (msg->buffer)
|
|
|
|
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
|
|
|
|
|
|
|
|
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
|
|
|
|
rxbuf, rxsize, flags);
|
|
|
|
if (ret > 0) {
|
|
|
|
msg->reply = rxbuf[0] >> 4;
|
|
|
|
|
|
|
|
if (ret > 1) {
|
|
|
|
/* Number of bytes written in a short write. */
|
|
|
|
ret = clamp_t(int, rxbuf[1], 0, msg->size);
|
|
|
|
} else {
|
|
|
|
/* Return payload size. */
|
|
|
|
ret = msg->size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DP_AUX_NATIVE_READ:
|
|
|
|
case DP_AUX_I2C_READ:
|
|
|
|
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
|
|
|
|
rxsize = msg->size + 1;
|
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
if (drm_WARN_ON(display->drm, rxsize > 20))
|
2021-01-20 12:18:34 +02:00
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
|
|
|
|
rxbuf, rxsize, flags);
|
|
|
|
if (ret > 0) {
|
|
|
|
msg->reply = rxbuf[0] >> 4;
|
|
|
|
/*
|
|
|
|
* Assume happy day, and copy the data. The caller is
|
|
|
|
* expected to check msg->reply before touching it.
|
|
|
|
*
|
|
|
|
* Return payload size.
|
|
|
|
*/
|
|
|
|
ret--;
|
|
|
|
memcpy(msg->buffer, rxbuf + 1, ret);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-10-11 23:15:32 +03:00
|
|
|
static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_D:
|
|
|
|
return VLV_DP_AUX_CH_CTL(aux_ch);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return VLV_DP_AUX_CH_CTL(AUX_CH_B);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_D:
|
|
|
|
return VLV_DP_AUX_CH_DATA(aux_ch, index);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return VLV_DP_AUX_CH_DATA(AUX_CH_B, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 12:18:34 +02:00
|
|
|
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_D:
|
|
|
|
return DP_AUX_CH_CTL(aux_ch);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return DP_AUX_CH_CTL(AUX_CH_B);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_D:
|
|
|
|
return DP_AUX_CH_DATA(aux_ch, index);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return DP_AUX_CH_DATA(AUX_CH_B, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_A:
|
|
|
|
return DP_AUX_CH_CTL(aux_ch);
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_D:
|
|
|
|
return PCH_DP_AUX_CH_CTL(aux_ch);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return DP_AUX_CH_CTL(AUX_CH_A);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_A:
|
|
|
|
return DP_AUX_CH_DATA(aux_ch, index);
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_D:
|
|
|
|
return PCH_DP_AUX_CH_DATA(aux_ch, index);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return DP_AUX_CH_DATA(AUX_CH_A, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_A:
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_D:
|
|
|
|
case AUX_CH_E:
|
|
|
|
case AUX_CH_F:
|
|
|
|
return DP_AUX_CH_CTL(aux_ch);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return DP_AUX_CH_CTL(AUX_CH_A);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_A:
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_D:
|
|
|
|
case AUX_CH_E:
|
|
|
|
case AUX_CH_F:
|
|
|
|
return DP_AUX_CH_DATA(aux_ch, index);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return DP_AUX_CH_DATA(AUX_CH_A, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_A:
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_USBC1:
|
|
|
|
case AUX_CH_USBC2:
|
|
|
|
case AUX_CH_USBC3:
|
|
|
|
case AUX_CH_USBC4:
|
2021-05-14 08:36:53 -07:00
|
|
|
case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
|
|
|
|
case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
|
2021-01-20 12:18:34 +02:00
|
|
|
return DP_AUX_CH_CTL(aux_ch);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return DP_AUX_CH_CTL(AUX_CH_A);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_A:
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_C:
|
|
|
|
case AUX_CH_USBC1:
|
|
|
|
case AUX_CH_USBC2:
|
|
|
|
case AUX_CH_USBC3:
|
|
|
|
case AUX_CH_USBC4:
|
2021-05-14 08:36:53 -07:00
|
|
|
case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
|
|
|
|
case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
|
2021-01-20 12:18:34 +02:00
|
|
|
return DP_AUX_CH_DATA(aux_ch, index);
|
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
|
|
|
return DP_AUX_CH_DATA(AUX_CH_A, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-01 23:03:38 -07:00
|
|
|
static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2022-09-01 23:03:38 -07:00
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_A:
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_USBC1:
|
|
|
|
case AUX_CH_USBC2:
|
|
|
|
case AUX_CH_USBC3:
|
|
|
|
case AUX_CH_USBC4:
|
2024-08-30 13:15:42 +03:00
|
|
|
return XELPDP_DP_AUX_CH_CTL(display, aux_ch);
|
2022-09-01 23:03:38 -07:00
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
2024-08-30 13:15:42 +03:00
|
|
|
return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A);
|
2022-09-01 23:03:38 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2022-09-01 23:03:38 -07:00
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
|
|
|
|
|
|
|
switch (aux_ch) {
|
|
|
|
case AUX_CH_A:
|
|
|
|
case AUX_CH_B:
|
|
|
|
case AUX_CH_USBC1:
|
|
|
|
case AUX_CH_USBC2:
|
|
|
|
case AUX_CH_USBC3:
|
|
|
|
case AUX_CH_USBC4:
|
2024-08-30 13:15:42 +03:00
|
|
|
return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index);
|
2022-09-01 23:03:38 -07:00
|
|
|
default:
|
|
|
|
MISSING_CASE(aux_ch);
|
2024-08-30 13:15:42 +03:00
|
|
|
return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index);
|
2022-09-01 23:03:38 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 12:18:34 +02:00
|
|
|
void intel_dp_aux_fini(struct intel_dp *intel_dp)
|
|
|
|
{
|
|
|
|
if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
|
|
|
|
cpu_latency_qos_remove_request(&intel_dp->pm_qos);
|
|
|
|
|
|
|
|
kfree(intel_dp->aux.name);
|
|
|
|
}
|
|
|
|
|
|
|
|
void intel_dp_aux_init(struct intel_dp *intel_dp)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(intel_dp);
|
2021-01-20 12:18:34 +02:00
|
|
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
|
|
|
struct intel_encoder *encoder = &dig_port->base;
|
|
|
|
enum aux_ch aux_ch = dig_port->aux_ch;
|
2023-09-22 13:56:45 +03:00
|
|
|
char buf[AUX_CH_NAME_BUFSIZE];
|
2021-01-20 12:18:34 +02:00
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
if (DISPLAY_VER(display) >= 14) {
|
2022-09-01 23:03:38 -07:00
|
|
|
intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
|
|
|
|
intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
|
2024-08-30 13:15:42 +03:00
|
|
|
} else if (DISPLAY_VER(display) >= 12) {
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
|
|
|
|
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
|
2024-08-30 13:15:42 +03:00
|
|
|
} else if (DISPLAY_VER(display) >= 9) {
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
|
|
|
|
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
|
2025-04-17 12:10:36 +03:00
|
|
|
} else if (HAS_PCH_SPLIT(display)) {
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
|
|
|
|
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
|
2025-04-09 21:17:49 +03:00
|
|
|
} else if (display->platform.valleyview || display->platform.cherryview) {
|
2023-10-11 23:15:32 +03:00
|
|
|
intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
|
|
|
|
intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
|
2021-01-20 12:18:34 +02:00
|
|
|
} else {
|
|
|
|
intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
|
|
|
|
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
|
|
|
|
}
|
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
if (DISPLAY_VER(display) >= 9)
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
|
2025-04-09 21:17:49 +03:00
|
|
|
else if (display->platform.broadwell || display->platform.haswell)
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
|
2025-04-17 12:10:36 +03:00
|
|
|
else if (HAS_PCH_SPLIT(display))
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
|
|
|
|
else
|
|
|
|
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
|
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
if (DISPLAY_VER(display) >= 9)
|
2021-01-20 12:18:34 +02:00
|
|
|
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
|
|
|
|
else
|
|
|
|
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
|
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
intel_dp->aux.drm_dev = display->drm;
|
2021-01-20 12:18:34 +02:00
|
|
|
drm_dp_aux_init(&intel_dp->aux);
|
|
|
|
|
|
|
|
/* Failure to allocate our preferred name is not critical */
|
2023-09-22 13:56:45 +03:00
|
|
|
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
|
2024-08-30 13:15:42 +03:00
|
|
|
aux_ch_name(display, buf, sizeof(buf), aux_ch),
|
2023-09-22 13:56:45 +03:00
|
|
|
encoder->base.name);
|
2021-01-20 12:18:34 +02:00
|
|
|
|
|
|
|
intel_dp->aux.transfer = intel_dp_aux_transfer;
|
|
|
|
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
|
2025-06-09 15:55:56 +03:00
|
|
|
|
|
|
|
intel_dp_dpcd_set_probe(intel_dp, true);
|
2021-01-20 12:18:34 +02:00
|
|
|
}
|
2023-02-17 01:13:09 +02:00
|
|
|
|
2023-02-17 01:13:11 +02:00
|
|
|
static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(encoder);
|
2023-02-17 01:13:11 +02:00
|
|
|
|
|
|
|
/* SKL has DDI E but no AUX E */
|
2024-08-30 13:15:42 +03:00
|
|
|
if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E)
|
2023-02-17 01:13:11 +02:00
|
|
|
return AUX_CH_A;
|
|
|
|
|
|
|
|
return (enum aux_ch)encoder->port;
|
|
|
|
}
|
|
|
|
|
2023-06-30 18:58:44 +03:00
|
|
|
static struct intel_encoder *
|
|
|
|
get_encoder_by_aux_ch(struct intel_encoder *encoder,
|
|
|
|
enum aux_ch aux_ch)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(encoder);
|
2023-06-30 18:58:44 +03:00
|
|
|
struct intel_encoder *other;
|
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
for_each_intel_encoder(display->drm, other) {
|
2023-06-30 18:58:44 +03:00
|
|
|
if (other == encoder)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!intel_encoder_is_dig_port(other))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (enc_to_dig_port(other)->aux_ch == aux_ch)
|
|
|
|
return other;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-02-17 01:13:09 +02:00
|
|
|
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
|
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
struct intel_display *display = to_intel_display(encoder);
|
2023-06-30 18:58:44 +03:00
|
|
|
struct intel_encoder *other;
|
|
|
|
const char *source;
|
2023-02-17 01:13:09 +02:00
|
|
|
enum aux_ch aux_ch;
|
2023-09-22 13:56:45 +03:00
|
|
|
char buf[AUX_CH_NAME_BUFSIZE];
|
2023-02-17 01:13:09 +02:00
|
|
|
|
|
|
|
aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
|
2023-06-30 18:58:44 +03:00
|
|
|
source = "VBT";
|
|
|
|
|
|
|
|
if (aux_ch == AUX_CH_NONE) {
|
|
|
|
aux_ch = default_aux_ch(encoder);
|
|
|
|
source = "platform default";
|
2023-02-17 01:13:09 +02:00
|
|
|
}
|
|
|
|
|
2023-06-30 18:58:44 +03:00
|
|
|
if (aux_ch == AUX_CH_NONE)
|
|
|
|
return AUX_CH_NONE;
|
|
|
|
|
|
|
|
/* FIXME validate aux_ch against platform caps */
|
|
|
|
|
|
|
|
other = get_encoder_by_aux_ch(encoder, aux_ch);
|
|
|
|
if (other) {
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_dbg_kms(display->drm,
|
2023-09-22 13:56:45 +03:00
|
|
|
"[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n",
|
|
|
|
encoder->base.base.id, encoder->base.name,
|
2024-08-30 13:15:42 +03:00
|
|
|
aux_ch_name(display, buf, sizeof(buf), aux_ch),
|
2023-06-30 18:58:44 +03:00
|
|
|
other->base.base.id, other->base.name);
|
|
|
|
return AUX_CH_NONE;
|
|
|
|
}
|
2023-02-17 01:13:09 +02:00
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
drm_dbg_kms(display->drm,
|
2023-09-22 13:56:45 +03:00
|
|
|
"[ENCODER:%d:%s] Using AUX CH %s (%s)\n",
|
2023-02-17 01:13:10 +02:00
|
|
|
encoder->base.base.id, encoder->base.name,
|
2024-08-30 13:15:42 +03:00
|
|
|
aux_ch_name(display, buf, sizeof(buf), aux_ch), source);
|
2023-02-17 01:13:09 +02:00
|
|
|
|
|
|
|
return aux_ch;
|
|
|
|
}
|
2023-05-04 19:57:29 +03:00
|
|
|
|
2024-08-30 13:15:42 +03:00
|
|
|
void intel_dp_aux_irq_handler(struct intel_display *display)
|
2023-05-04 19:57:29 +03:00
|
|
|
{
|
2024-08-30 13:15:42 +03:00
|
|
|
wake_up_all(&display->gmbus.wait_queue);
|
2023-05-04 19:57:29 +03:00
|
|
|
}
|