Merge drm/drm-next into drm-misc-next-fixes

Backmerging to get a few more commits that came from drm-misc-next.
See [1]

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://lore.kernel.org/dri-devel/20240229084806.GA21616@localhost.localdomain/ # 1
This commit is contained in:
Thomas Zimmermann 2024-03-07 13:30:43 +01:00
commit 031541c260
199 changed files with 16350 additions and 5275 deletions

View file

@ -24,37 +24,4 @@ restrictions later on.
As a remedy for such situations, the kernel configuration item
CONFIG_DRM_LOAD_EDID_FIRMWARE was introduced. It allows to provide an
individually prepared or corrected EDID data set in the /lib/firmware
directory from where it is loaded via the firmware interface. The code
(see drivers/gpu/drm/drm_edid_load.c) contains built-in data sets for
commonly used screen resolutions (800x600, 1024x768, 1280x1024, 1600x1200,
1680x1050, 1920x1080) as binary blobs, but the kernel source tree does
not contain code to create these data. In order to elucidate the origin
of the built-in binary EDID blobs and to facilitate the creation of
individual data for a specific misbehaving monitor, commented sources
and a Makefile environment are given here.
To create binary EDID and C source code files from the existing data
material, simply type "make" in tools/edid/.
If you want to create your own EDID file, copy the file 1024x768.S,
replace the settings with your own data and add a new target to the
Makefile. Please note that the EDID data structure expects the timing
values in a different way as compared to the standard X11 format.
X11:
HTimings:
hdisp hsyncstart hsyncend htotal
VTimings:
vdisp vsyncstart vsyncend vtotal
EDID::
#define XPIX hdisp
#define XBLANK htotal-hdisp
#define XOFFSET hsyncstart-hdisp
#define XPULSE hsyncend-hsyncstart
#define YPIX vdisp
#define YBLANK vtotal-vdisp
#define YOFFSET vsyncstart-vdisp
#define YPULSE vsyncend-vsyncstart
directory from where it is loaded via the firmware interface.

View file

@ -1162,16 +1162,10 @@
panels may send no or incorrect EDID data sets.
This parameter allows to specify an EDID data sets
in the /lib/firmware directory that are used instead.
Generic built-in EDID data sets are used, if one of
edid/1024x768.bin, edid/1280x1024.bin,
edid/1680x1050.bin, or edid/1920x1080.bin is given
and no file with the same name exists. Details and
instructions how to build your own EDID data are
available in Documentation/admin-guide/edid.rst. An EDID
data set will only be used for a particular connector,
if its name and a colon are prepended to the EDID
name. Each connector may use a unique EDID data
set by separating the files with a comma. An EDID
An EDID data set will only be used for a particular
connector, if its name and a colon are prepended to
the EDID name. Each connector may use a unique EDID
data set by separating the files with a comma. An EDID
data set with no connector name will be used for
any connectors not explicitly specified.

View file

@ -19,6 +19,7 @@ properties:
- qcom,msm8916-dsi-ctrl
- qcom,msm8953-dsi-ctrl
- qcom,msm8974-dsi-ctrl
- qcom,msm8976-dsi-ctrl
- qcom,msm8996-dsi-ctrl
- qcom,msm8998-dsi-ctrl
- qcom,qcm2290-dsi-ctrl
@ -248,6 +249,7 @@ allOf:
contains:
enum:
- qcom,msm8953-dsi-ctrl
- qcom,msm8976-dsi-ctrl
then:
properties:
clocks:

View file

@ -224,6 +224,7 @@ allOf:
enum:
- qcom,adreno-gmu-730.1
- qcom,adreno-gmu-740.1
- qcom,adreno-gmu-750.1
then:
properties:
reg:

View file

@ -23,7 +23,7 @@ properties:
The driver is parsing the compat string for Adreno to
figure out the gpu-id and patch level.
items:
- pattern: '^qcom,adreno-[3-7][0-9][0-9]\.[0-9]$'
- pattern: '^qcom,adreno-[3-7][0-9][0-9]\.[0-9]+$'
- const: qcom,adreno
- description: |
The driver is parsing the compat string for Imageon to
@ -127,7 +127,7 @@ allOf:
properties:
compatible:
contains:
pattern: '^qcom,adreno-[3-5][0-9][0-9]\.[0-9]$'
pattern: '^qcom,adreno-[3-5][0-9][0-9]\.[0-9]+$'
then:
properties:
@ -203,7 +203,7 @@ allOf:
properties:
compatible:
contains:
pattern: '^qcom,adreno-[67][0-9][0-9]\.[0-9]$'
pattern: '^qcom,adreno-[67][0-9][0-9]\.[0-9]+$'
then: # Starting with A6xx, the clocks are usually defined in the GMU node
properties:

View file

@ -127,6 +127,7 @@ patternProperties:
- qcom,dsi-phy-20nm
- qcom,dsi-phy-28nm-8226
- qcom,dsi-phy-28nm-hpm
- qcom,dsi-phy-28nm-hpm-fam-b
- qcom,dsi-phy-28nm-lp
- qcom,hdmi-phy-8084
- qcom,hdmi-phy-8660

View file

@ -13,7 +13,9 @@ $ref: /schemas/display/msm/dpu-common.yaml#
properties:
compatible:
const: qcom,sm8650-dpu
enum:
- qcom,sm8650-dpu
- qcom,x1e80100-dpu
reg:
items:

View file

@ -37,18 +37,21 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8650-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8650-dp
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -57,6 +60,7 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8650-dsi-phy-4nm

View file

@ -0,0 +1,251 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/qcom,x1e80100-mdss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm X1E80100 Display MDSS
maintainers:
- Abel Vesa <abel.vesa@linaro.org>
description:
X1E80100 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
DPU display controller, DP interfaces, etc.
$ref: /schemas/display/msm/mdss-common.yaml#
properties:
compatible:
const: qcom,x1e80100-mdss
clocks:
items:
- description: Display AHB
- description: Display hf AXI
- description: Display core
iommus:
maxItems: 1
interconnects:
maxItems: 3
interconnect-names:
maxItems: 3
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,x1e80100-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,x1e80100-dp
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,x1e80100-dp-phy
required:
- compatible
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interconnect/qcom,x1e80100-rpmh.h>
#include <dt-bindings/phy/phy-qcom-qmp.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
display-subsystem@ae00000 {
compatible = "qcom,x1e80100-mdss";
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
interconnects = <&mmss_noc MASTER_MDP 0 &gem_noc SLAVE_LLCC 0>,
<&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>,
<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_DISPLAY_CFG 0>;
interconnect-names = "mdp0-mem", "mdp1-mem", "cpu-cfg";
resets = <&dispcc_core_bcr>;
power-domains = <&dispcc_gdsc>;
clocks = <&dispcc_ahb_clk>,
<&gcc_disp_hf_axi_clk>,
<&dispcc_mdp_clk>;
clock-names = "bus", "nrt_bus", "core";
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <1>;
iommus = <&apps_smmu 0x1c00 0x2>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
display-controller@ae01000 {
compatible = "qcom,x1e80100-dpu";
reg = <0x0ae01000 0x8f000>,
<0x0aeb0000 0x2008>;
reg-names = "mdp", "vbif";
clocks = <&gcc_axi_clk>,
<&dispcc_ahb_clk>,
<&dispcc_mdp_lut_clk>,
<&dispcc_mdp_clk>,
<&dispcc_mdp_vsync_clk>;
clock-names = "nrt_bus",
"iface",
"lut",
"core",
"vsync";
assigned-clocks = <&dispcc_mdp_vsync_clk>;
assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
interrupt-parent = <&mdss>;
interrupts = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf1_out: endpoint {
remote-endpoint = <&dsi0_in>;
};
};
port@1 {
reg = <1>;
dpu_intf2_out: endpoint {
remote-endpoint = <&dsi1_in>;
};
};
};
mdp_opp_table: opp-table {
compatible = "operating-points-v2";
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-325000000 {
opp-hz = /bits/ 64 <325000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-375000000 {
opp-hz = /bits/ 64 <375000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
opp-514000000 {
opp-hz = /bits/ 64 <514000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
displayport-controller@ae90000 {
compatible = "qcom,x1e80100-dp";
reg = <0 0xae90000 0 0x200>,
<0 0xae90200 0 0x200>,
<0 0xae90400 0 0x600>,
<0 0xae91000 0 0x400>,
<0 0xae91400 0 0x400>;
interrupt-parent = <&mdss>;
interrupts = <12>;
clocks = <&dispcc_mdss_ahb_clk>,
<&dispcc_dptx0_aux_clk>,
<&dispcc_dptx0_link_clk>,
<&dispcc_dptx0_link_intf_clk>,
<&dispcc_dptx0_pixel0_clk>;
clock-names = "core_iface", "core_aux",
"ctrl_link",
"ctrl_link_iface",
"stream_pixel";
assigned-clocks = <&dispcc_mdss_dptx0_link_clk_src>,
<&dispcc_mdss_dptx0_pixel0_clk_src>;
assigned-clock-parents = <&usb_1_ss0_qmpphy QMP_USB43DP_DP_LINK_CLK>,
<&usb_1_ss0_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
operating-points-v2 = <&mdss_dp0_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
phys = <&usb_1_ss0_qmpphy QMP_USB43DP_DP_PHY>;
phy-names = "dp";
#sound-dai-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mdss_dp0_in: endpoint {
remote-endpoint = <&mdss_intf0_out>;
};
};
port@1 {
reg = <1>;
mdss_dp0_out: endpoint {
};
};
};
mdss_dp0_opp_table: opp-table {
compatible = "operating-points-v2";
opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-270000000 {
opp-hz = /bits/ 64 <270000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-540000000 {
opp-hz = /bits/ 64 <540000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
opp-810000000 {
opp-hz = /bits/ 64 <810000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
};
...

View file

@ -93,6 +93,7 @@ properties:
- qcom,sm8350-smmu-500
- qcom,sm8450-smmu-500
- qcom,sm8550-smmu-500
- qcom,sm8650-smmu-500
- const: qcom,adreno-smmu
- const: qcom,smmu-500
- const: arm,mmu-500
@ -484,7 +485,12 @@ allOf:
- if:
properties:
compatible:
const: qcom,sm8450-smmu-500
items:
- const: qcom,sm8450-smmu-500
- const: qcom,adreno-smmu
- const: qcom,smmu-500
- const: arm,mmu-500
then:
properties:
clock-names:
@ -508,7 +514,13 @@ allOf:
- if:
properties:
compatible:
const: qcom,sm8550-smmu-500
items:
- enum:
- qcom,sm8550-smmu-500
- qcom,sm8650-smmu-500
- const: qcom,adreno-smmu
- const: qcom,smmu-500
- const: arm,mmu-500
then:
properties:
clock-names:
@ -544,7 +556,6 @@ allOf:
- qcom,sdx65-smmu-500
- qcom,sm6350-smmu-500
- qcom,sm6375-smmu-500
- qcom,sm8650-smmu-500
- qcom,x1e80100-smmu-500
then:
properties:

View file

@ -614,7 +614,7 @@ AGPGART DRIVER
M: David Airlie <airlied@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm
T: git https://gitlab.freedesktop.org/drm/kernel.git
F: drivers/char/agp/
F: include/linux/agp*
F: include/uapi/linux/agp*
@ -6996,7 +6996,7 @@ L: dri-devel@lists.freedesktop.org
S: Maintained
B: https://gitlab.freedesktop.org/drm
C: irc://irc.oftc.net/dri-devel
T: git git://anongit.freedesktop.org/drm/drm
T: git https://gitlab.freedesktop.org/drm/kernel.git
F: Documentation/devicetree/bindings/display/
F: Documentation/devicetree/bindings/gpu/
F: Documentation/gpu/

View file

@ -20,7 +20,7 @@ static unsigned int mhi_timeout_ms = 2000; /* 2 sec default */
module_param(mhi_timeout_ms, uint, 0600);
MODULE_PARM_DESC(mhi_timeout_ms, "MHI controller timeout value");
static struct mhi_channel_config aic100_channels[] = {
static const struct mhi_channel_config aic100_channels[] = {
{
.name = "QAIC_LOOPBACK",
.num = 0,

View file

@ -17,6 +17,27 @@ config DRM_DISPLAY_DP_HELPER
help
DRM display helpers for DisplayPort.
config DRM_DISPLAY_DP_TUNNEL
bool
select DRM_DISPLAY_DP_HELPER
help
Enable support for DisplayPort tunnels. This allows drivers to use
DP tunnel features like the Bandwidth Allocation mode to maximize the
BW utilization for display streams on Thunderbolt links.
config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
bool "Enable debugging the DP tunnel state"
depends on REF_TRACKER
depends on DRM_DISPLAY_DP_TUNNEL
depends on DEBUG_KERNEL
depends on EXPERT
help
Enables debugging the DP tunnel manager's state, including the
consistency of all managed tunnels' reference counting and the state of
streams contained in tunnels.
If in doubt, say "N".
config DRM_DISPLAY_HDCP_HELPER
bool
depends on DRM_DISPLAY_HELPER

View file

@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_helper.o \
drm_dp_mst_topology.o \
drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
drm_dp_tunnel.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \

View file

@ -532,6 +532,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
mutex_lock(&aux->hw_mutex);
/*
* If the device attached to the aux bus is powered down then there's
* no reason to attempt a transfer. Error out immediately.
*/
if (aux->powered_down) {
ret = -EBUSY;
goto unlock;
}
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
@ -599,6 +608,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
}
EXPORT_SYMBOL(drm_dp_dpcd_probe);
/**
* drm_dp_dpcd_set_powered() - Set whether the DP device is powered
* @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here
* and the function will be a no-op.
* @powered: true if powered; false if not
*
* If the endpoint device on the DP AUX bus is known to be powered down
* then this function can be called to make future transfers fail immediately
* instead of needing to time out.
*
* If this function is never called then a device defaults to being powered.
*/
void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
{
if (!aux)
return;
mutex_lock(&aux->hw_mutex);
aux->powered_down = !powered;
mutex_unlock(&aux->hw_mutex);
}
EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
@ -1858,6 +1890,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
struct drm_dp_aux_msg msg;
int err = 0;
if (aux->powered_down)
return -EBUSY;
dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
memset(&msg, 0, sizeof(msg));
@ -2913,6 +2948,103 @@ void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc)
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
/**
* drm_dp_vsc_sdp_supported() - check if vsc sdp is supported
* @aux: DisplayPort AUX channel
* @dpcd: DisplayPort configuration data
*
* Returns true if vsc sdp is supported, else returns false
*/
bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
u8 rx_feature;
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
return false;
}
return (rx_feature & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED);
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_supported);
/**
* drm_dp_vsc_sdp_pack() - pack a given vsc sdp into generic dp_sdp
* @vsc: vsc sdp initialized according to its purpose as defined in
* table 2-118 - table 2-120 in DP 1.4a specification
* @sdp: valid handle to the generic dp_sdp which will be packed
*
* Returns length of sdp on success and error code on failure
*/
ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
struct dp_sdp *sdp)
{
size_t length = sizeof(struct dp_sdp);
memset(sdp, 0, sizeof(struct dp_sdp));
/*
* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
* VSC SDP Header Bytes
*/
sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
if (vsc->revision == 0x6) {
sdp->db[0] = 1;
sdp->db[3] = 1;
}
/*
* Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
* Format as per DP 1.4a spec and DP 2.0 respectively.
*/
if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
goto out;
/* VSC SDP Payload for DB16 through DB18 */
/* Pixel Encoding and Colorimetry Formats */
sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
switch (vsc->bpc) {
case 6:
/* 6bpc: 0x0 */
break;
case 8:
sdp->db[17] = 0x1; /* DB17[3:0] */
break;
case 10:
sdp->db[17] = 0x2;
break;
case 12:
sdp->db[17] = 0x3;
break;
case 16:
sdp->db[17] = 0x4;
break;
default:
WARN(1, "Missing case %d\n", vsc->bpc);
return -EINVAL;
}
/* Dynamic Range and Component Bit Depth */
if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
sdp->db[17] |= 0x80; /* DB17[7] */
/* Content Type */
sdp->db[18] = vsc->content_type & 0x7;
out:
return length;
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_pack);
/**
* drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
* @dpcd: DisplayPort configuration data
@ -4055,3 +4187,33 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr)
return 800000;
}
EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
/**
* drm_dp_max_dprx_data_rate - Get the max data bandwidth of a DPRX sink
* @max_link_rate: max DPRX link rate in 10kbps units
* @max_lanes: max DPRX lane count
*
* Given a link rate and lanes, get the data bandwidth.
*
* Data bandwidth is the actual payload rate, which depends on the data
* bandwidth efficiency and the link rate.
*
* Note that protocol layers above the DPRX link level considered here can
* further limit the maximum data rate. Such layers are the MST topology (with
* limits on the link between the source and first branch device as well as on
* the whole MST path until the DPRX link) and (Thunderbolt) DP tunnels -
* which in turn can encapsulate an MST link with its own limit - with each
* SST or MST encapsulated tunnel sharing the BW of a tunnel group.
*
* Returns the maximum data rate in kBps units.
*/
int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes)
{
int ch_coding_efficiency =
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate * 10 * max_lanes,
ch_coding_efficiency),
1000000 * 8);
}
EXPORT_SYMBOL(drm_dp_max_dprx_data_rate);

File diff suppressed because it is too large Load diff

View file

@ -107,18 +107,6 @@ int drm_crtc_force_disable(struct drm_crtc *crtc)
return drm_mode_set_config_internal(&set);
}
static unsigned int drm_num_crtcs(struct drm_device *dev)
{
unsigned int num = 0;
struct drm_crtc *tmp;
drm_for_each_crtc(tmp, dev) {
num++;
}
return num;
}
int drm_crtc_register_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
@ -278,8 +266,7 @@ static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *
if (name) {
crtc->name = kvasprintf(GFP_KERNEL, name, ap);
} else {
crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
drm_num_crtcs(dev));
crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", config->num_crtc);
}
if (!crtc->name) {
drm_mode_object_unregister(dev, &crtc->base);

View file

@ -20,162 +20,28 @@
static char edid_firmware[PATH_MAX];
module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
#define GENERIC_EDIDS 6
static const char * const generic_edid_name[GENERIC_EDIDS] = {
"edid/800x600.bin",
"edid/1024x768.bin",
"edid/1280x1024.bin",
"edid/1600x1200.bin",
"edid/1680x1050.bin",
"edid/1920x1080.bin",
};
static const u8 generic_edid[GENERIC_EDIDS][128] = {
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x1b, 0x14, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x01, 0x00, 0x00, 0x45, 0x40,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xa0, 0x0f,
0x20, 0x00, 0x31, 0x58, 0x1c, 0x20, 0x28, 0x80,
0x14, 0x00, 0x15, 0xd0, 0x10, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x24, 0x26, 0x05, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
0x56, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xc2,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19,
0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90,
0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58,
0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a,
0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70,
0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39,
0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0,
0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57,
0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26,
},
{
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78,
0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46,
0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05,
},
};
MODULE_PARM_DESC(edid_firmware,
"Do not probe monitor, use specified EDID blob from /lib/firmware instead.");
static const struct drm_edid *edid_load(struct drm_connector *connector, const char *name)
{
const struct firmware *fw = NULL;
const u8 *fwdata;
const struct drm_edid *drm_edid;
int fwsize, builtin;
int err;
builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
if (builtin >= 0) {
fwdata = generic_edid[builtin];
fwsize = sizeof(generic_edid[builtin]);
} else {
int err;
err = request_firmware(&fw, name, connector->dev->dev);
if (err) {
drm_err(connector->dev,
"[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n",
connector->base.id, connector->name,
name, err);
return ERR_PTR(err);
}
fwdata = fw->data;
fwsize = fw->size;
err = request_firmware(&fw, name, connector->dev->dev);
if (err) {
drm_err(connector->dev,
"[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n",
connector->base.id, connector->name,
name, err);
return ERR_PTR(err);
}
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded %s firmware EDID \"%s\"\n",
connector->base.id, connector->name,
builtin >= 0 ? "built-in" : "external", name);
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Loaded external firmware EDID \"%s\"\n",
connector->base.id, connector->name, name);
drm_edid = drm_edid_alloc(fwdata, fwsize);
drm_edid = drm_edid_alloc(fw->data, fw->size);
if (!drm_edid_valid(drm_edid)) {
drm_err(connector->dev, "Invalid firmware EDID \"%s\"\n", name);
drm_edid_free(drm_edid);

View file

@ -193,13 +193,22 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
if (!dev)
return 0;
/*
* Don't disable polling if it was never initialized
*/
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_disable(dev);
drm_kms_helper_poll_disable(dev);
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
drm_kms_helper_poll_enable(dev);
/*
* Don't enable polling if it was never initialized
*/
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);
return PTR_ERR(state);
}
@ -239,7 +248,11 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
dev->mode_config.suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
drm_kms_helper_poll_enable(dev);
/*
* Don't enable polling if it is not initialized
*/
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);
return ret;
}

View file

@ -293,14 +293,17 @@ static void reschedule_output_poll_work(struct drm_device *dev)
* Drivers can call this helper from their device resume implementation. It is
* not an error to call this even when output polling isn't enabled.
*
* If device polling was never initialized before, this call will trigger a
* warning and return.
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
dev->mode_config.poll_running)
if (drm_WARN_ON_ONCE(dev, !dev->mode_config.poll_enabled) ||
!drm_kms_helper_poll || dev->mode_config.poll_running)
return;
if (drm_kms_helper_enable_hpd(dev) ||
@ -619,8 +622,12 @@ retry:
0);
}
/* Re-enable polling in case the global poll config changed. */
drm_kms_helper_poll_enable(dev);
/*
* Re-enable polling in case the global poll config changed but polling
* is still initialized.
*/
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@ -871,12 +878,18 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
* not an error to call this even when output polling isn't enabled or already
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
*
* If however, the polling was never initialized, this call will trigger a
* warning and return
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (drm_WARN_ON(dev, !dev->mode_config.poll_enabled))
return;
if (dev->mode_config.poll_running)
drm_kms_helper_disable_hpd(dev);

View file

@ -155,6 +155,20 @@ config DRM_I915_PXP
protected session and manage the status of the alive software session,
as well as its life cycle.
config DRM_I915_DP_TUNNEL
bool "Enable DP tunnel support"
depends on DRM_I915
depends on USB4
select DRM_DISPLAY_DP_TUNNEL
default y
help
Choose this option to detect DP tunnels and enable the Bandwidth
Allocation mode for such tunnels. This allows using the maximum
resolution allowed by the link BW on all displays sharing the
link BW, for instance on a Thunderbolt link.
If in doubt, say "Y".
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT

View file

@ -28,6 +28,7 @@ config DRM_I915_DEBUG
select STACKDEPOT
select STACKTRACE
select DRM_DP_AUX_CHARDEV
select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y

View file

@ -369,6 +369,9 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
i915-$(CONFIG_DRM_I915_DP_TUNNEL) += \
display/intel_dp_tunnel.o
i915-y += \
i915_perf.o

View file

@ -205,7 +205,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
const char *str;
u8 val;
priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return false;

View file

@ -216,7 +216,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
u8 vendor, device;
char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
ch7xxx = kzalloc(sizeof(*ch7xxx), GFP_KERNEL);
if (ch7xxx == NULL)
return false;

View file

@ -267,7 +267,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
u16 temp;
int i;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return false;

View file

@ -476,7 +476,7 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
struct ns2501_priv *ns;
unsigned char ch;
ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
if (ns == NULL)
return false;
@ -551,7 +551,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
const struct drm_display_mode *adjusted_mode)
{
const struct ns2501_configuration *conf;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
struct ns2501_priv *ns = dvo->dev_priv;
int mode_idx, i;
DRM_DEBUG_KMS
@ -655,7 +655,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
/* set the NS2501 power state */
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
struct ns2501_priv *ns = dvo->dev_priv;
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);

View file

@ -141,7 +141,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
struct sil164_priv *sil;
unsigned char ch;
sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
sil = kzalloc(sizeof(*sil), GFP_KERNEL);
if (sil == NULL)
return false;

View file

@ -173,7 +173,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
struct tfp410_priv *tfp;
int id;
tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
tfp = kzalloc(sizeof(*tfp), GFP_KERNEL);
if (tfp == NULL)
return false;

View file

@ -70,26 +70,25 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
bool is_ddr3,
int fsb,
int mem)
static const struct cxsr_latency *intel_get_cxsr_latency(struct drm_i915_private *i915)
{
const struct cxsr_latency *latency;
int i;
if (fsb == 0 || mem == 0)
if (i915->fsb_freq == 0 || i915->mem_freq == 0)
return NULL;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
const struct cxsr_latency *latency = &cxsr_latency_table[i];
bool is_desktop = !IS_MOBILE(i915);
if (is_desktop == latency->is_desktop &&
is_ddr3 == latency->is_ddr3 &&
fsb == latency->fsb_freq && mem == latency->mem_freq)
i915->is_ddr3 == latency->is_ddr3 &&
i915->fsb_freq == latency->fsb_freq &&
i915->mem_freq == latency->mem_freq)
return latency;
}
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
drm_dbg_kms(&i915->drm, "Unknown FSB/MEM found, disable CxSR\n");
return NULL;
}
@ -525,6 +524,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
/**
* intel_calculate_wm - calculate watermark level
* @i915: the device
* @pixel_rate: pixel clock
* @wm: chip FIFO params
* @fifo_size: size of the FIFO buffer
@ -542,7 +542,8 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
static unsigned int intel_calculate_wm(int pixel_rate,
static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
int pixel_rate,
const struct intel_watermark_params *wm,
int fifo_size, int cpp,
unsigned int latency_ns)
@ -559,10 +560,10 @@ static unsigned int intel_calculate_wm(int pixel_rate,
latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
wm->guard_size;
DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
wm_size = fifo_size - entries;
DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > wm->max_wm)
@ -634,10 +635,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
u32 reg;
unsigned int wm;
latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq);
latency = intel_get_cxsr_latency(dev_priv);
if (!latency) {
drm_dbg_kms(&dev_priv->drm,
"Unknown FSB/MEM found, disable CxSR\n");
@ -653,7 +651,8 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
int cpp = fb->format->cpp[0];
/* Display SR */
wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
wm = intel_calculate_wm(dev_priv, pixel_rate,
&pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
@ -663,20 +662,23 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
wm = intel_calculate_wm(dev_priv, pixel_rate,
&pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
wm = intel_calculate_wm(dev_priv, pixel_rate,
&pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
wm = intel_calculate_wm(dev_priv, pixel_rate,
&pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
@ -2124,7 +2126,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
else
cpp = fb->format->cpp[0];
planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@ -2151,7 +2153,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
else
cpp = fb->format->cpp[0];
planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@ -2245,7 +2247,7 @@ static void i845_update_wm(struct drm_i915_private *dev_priv)
if (crtc == NULL)
return;
planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
&i845_wm_info,
i845_get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
@ -2531,7 +2533,8 @@ static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static bool ilk_validate_wm_level(int level,
static bool ilk_validate_wm_level(struct drm_i915_private *i915,
int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
{
@ -2554,14 +2557,17 @@ static bool ilk_validate_wm_level(int level,
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
drm_dbg_kms(&i915->drm,
"Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
drm_dbg_kms(&i915->drm,
"Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
drm_dbg_kms(&i915->drm,
"Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
result->pri_val = min_t(u32, result->pri_val, max->pri);
result->spr_val = min_t(u32, result->spr_val, max->spr);
@ -2761,7 +2767,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@ -2776,7 +2782,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
return false;
}
@ -2845,7 +2851,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
* register maximums since such watermarks are
* always invalid.
*/
if (!ilk_validate_wm_level(level, &max, wm)) {
if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
memset(wm, 0, sizeof(*wm));
break;
}
@ -2976,7 +2982,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
if (level > last_enabled_level)
wm->enable = false;
else if (!ilk_validate_wm_level(level, max, wm))
else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
@ -4016,10 +4022,7 @@ void i9xx_wm_init(struct drm_i915_private *dev_priv)
g4x_setup_wm_latency(dev_priv);
dev_priv->display.funcs.wm = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
if (!intel_get_cxsr_latency(dev_priv)) {
drm_info(&dev_priv->drm,
"failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "

View file

@ -29,6 +29,7 @@
* See intel_atomic_plane.c for the plane-specific atomic functionality.
*/
#include <drm/display/drm_dp_tunnel.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
@ -38,6 +39,7 @@
#include "intel_atomic.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_dp_tunnel.h"
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
@ -258,6 +260,10 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
if (crtc_state->post_csc_lut)
drm_property_blob_get(crtc_state->post_csc_lut);
if (crtc_state->dp_tunnel_ref.tunnel)
drm_dp_tunnel_ref_get(crtc_state->dp_tunnel_ref.tunnel,
&crtc_state->dp_tunnel_ref);
crtc_state->update_pipe = false;
crtc_state->update_m_n = false;
crtc_state->update_lrr = false;
@ -309,6 +315,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
if (crtc_state->dp_tunnel_ref.tunnel)
drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
kfree(crtc_state);
}
@ -344,6 +352,8 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
/* state->internal not reset on purpose */
state->dpll_set = state->modeset = false;
intel_dp_tunnel_atomic_cleanup_inherited_state(state);
}
struct intel_crtc_state *

View file

@ -1759,7 +1759,8 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Find the sequence block and size for the given panel. */
static const u8 *
find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
find_panel_sequence_block(struct drm_i915_private *i915,
const struct bdb_mipi_sequence *sequence,
u16 panel_id, u32 *seq_size)
{
u32 total = get_blocksize(sequence);
@ -1776,7 +1777,7 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
if (index + header_size > total) {
DRM_ERROR("Invalid sequence block (header)\n");
drm_err(&i915->drm, "Invalid sequence block (header)\n");
return NULL;
}
@ -1789,7 +1790,7 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
index += header_size;
if (index + current_size > total) {
DRM_ERROR("Invalid sequence block\n");
drm_err(&i915->drm, "Invalid sequence block\n");
return NULL;
}
@ -1801,12 +1802,13 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
index += current_size;
}
DRM_ERROR("Sequence block detected but no valid configuration\n");
drm_err(&i915->drm, "Sequence block detected but no valid configuration\n");
return NULL;
}
static int goto_next_sequence(const u8 *data, int index, int total)
static int goto_next_sequence(struct drm_i915_private *i915,
const u8 *data, int index, int total)
{
u16 len;
@ -1836,7 +1838,7 @@ static int goto_next_sequence(const u8 *data, int index, int total)
len = *(data + index + 6) + 7;
break;
default:
DRM_ERROR("Unknown operation byte\n");
drm_err(&i915->drm, "Unknown operation byte\n");
return 0;
}
}
@ -1844,7 +1846,8 @@ static int goto_next_sequence(const u8 *data, int index, int total)
return 0;
}
static int goto_next_sequence_v3(const u8 *data, int index, int total)
static int goto_next_sequence_v3(struct drm_i915_private *i915,
const u8 *data, int index, int total)
{
int seq_end;
u16 len;
@ -1855,7 +1858,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
* checking on the structure.
*/
if (total < 5) {
DRM_ERROR("Too small sequence size\n");
drm_err(&i915->drm, "Too small sequence size\n");
return 0;
}
@ -1872,7 +1875,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
seq_end = index + size_of_sequence;
if (seq_end > total) {
DRM_ERROR("Invalid sequence size\n");
drm_err(&i915->drm, "Invalid sequence size\n");
return 0;
}
@ -1882,7 +1885,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
if (operation_byte == MIPI_SEQ_ELEM_END) {
if (index != seq_end) {
DRM_ERROR("Invalid element structure\n");
drm_err(&i915->drm, "Invalid element structure\n");
return 0;
}
return index;
@ -1904,8 +1907,8 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
case MIPI_SEQ_ELEM_PMIC:
break;
default:
DRM_ERROR("Unknown operation byte %u\n",
operation_byte);
drm_err(&i915->drm, "Unknown operation byte %u\n",
operation_byte);
break;
}
}
@ -2030,7 +2033,7 @@ parse_mipi_sequence(struct drm_i915_private *i915,
drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n",
sequence->version);
seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
seq_data = find_panel_sequence_block(i915, sequence, panel_type, &seq_size);
if (!seq_data)
return;
@ -2058,9 +2061,9 @@ parse_mipi_sequence(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
index = goto_next_sequence_v3(data, index, seq_size);
index = goto_next_sequence_v3(i915, data, index, seq_size);
else
index = goto_next_sequence(data, index, seq_size);
index = goto_next_sequence(i915, data, index, seq_size);
if (!index) {
drm_err(&i915->drm, "Invalid sequence %u\n",
seq_id);
@ -2135,12 +2138,13 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
}
static u8 translate_iboost(u8 val)
static u8 translate_iboost(struct drm_i915_private *i915, u8 val)
{
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
if (val >= ARRAY_SIZE(mapping)) {
DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
drm_dbg_kms(&i915->drm,
"Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
return 0;
}
return mapping[val];
@ -2897,12 +2901,14 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
/**
* intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
* @i915: the device
* @buf: pointer to a buffer to validate
* @size: size of the buffer
*
* Returns true on valid VBT.
*/
bool intel_bios_is_valid_vbt(const void *buf, size_t size)
bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
const void *buf, size_t size)
{
const struct vbt_header *vbt = buf;
const struct bdb_header *bdb;
@ -2911,17 +2917,17 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return false;
if (sizeof(struct vbt_header) > size) {
DRM_DEBUG_DRIVER("VBT header incomplete\n");
drm_dbg_kms(&i915->drm, "VBT header incomplete\n");
return false;
}
if (memcmp(vbt->signature, "$VBT", 4)) {
DRM_DEBUG_DRIVER("VBT invalid signature\n");
drm_dbg_kms(&i915->drm, "VBT invalid signature\n");
return false;
}
if (vbt->vbt_size > size) {
DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
drm_dbg_kms(&i915->drm, "VBT incomplete (vbt_size overflows)\n");
return false;
}
@ -2931,13 +2937,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
vbt->bdb_offset,
sizeof(struct bdb_header),
size)) {
DRM_DEBUG_DRIVER("BDB header incomplete\n");
drm_dbg_kms(&i915->drm, "BDB header incomplete\n");
return false;
}
bdb = get_bdb_header(vbt);
if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
DRM_DEBUG_DRIVER("BDB incomplete\n");
drm_dbg_kms(&i915->drm, "BDB incomplete\n");
return false;
}
@ -2989,7 +2995,7 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
for (count = 0; count < vbt_size; count += 4)
*(vbt + store++) = intel_spi_read(&i915->uncore, found + count);
if (!intel_bios_is_valid_vbt(vbt, vbt_size))
if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
goto err_free_vbt;
drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
@ -3046,7 +3052,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
memcpy_fromio(vbt, p, vbt_size);
if (!intel_bios_is_valid_vbt(vbt, vbt_size))
if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
goto err_free_vbt;
pci_unmap_rom(pdev, oprom);
@ -3398,6 +3404,7 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
struct dsc_compression_parameters_entry *dsc,
int dsc_max_bpc)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
int bpc = 8;
@ -3411,8 +3418,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
else if (dsc->support_8bpc && dsc_max_bpc >= 8)
bpc = 8;
else
DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n",
dsc_max_bpc);
drm_dbg_kms(&i915->drm, "VBT: Unsupported BPC %d for DCS\n",
dsc_max_bpc);
crtc_state->pipe_bpp = bpc * 3;
@ -3432,16 +3439,16 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
} else {
/* FIXME */
if (!(dsc->slices_per_line & BIT(0)))
DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n");
drm_dbg_kms(&i915->drm, "VBT: Unsupported DSC slice count for DSI\n");
crtc_state->dsc.slice_count = 1;
}
if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
crtc_state->dsc.slice_count != 0)
DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n",
crtc_state->hw.adjusted_mode.crtc_hdisplay,
crtc_state->dsc.slice_count);
drm_dbg_kms(&i915->drm, "VBT: DSC hdisplay %d not divisible by slice count %d\n",
crtc_state->hw.adjusted_mode.crtc_hdisplay,
crtc_state->dsc.slice_count);
/*
* The VBT rc_buffer_block_size and rc_buffer_size definitions
@ -3597,7 +3604,7 @@ int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
return translate_iboost(devdata->child.dp_iboost_level);
return translate_iboost(devdata->i915, devdata->child.dp_iboost_level);
}
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
@ -3605,7 +3612,7 @@ int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
return translate_iboost(devdata->child.hdmi_iboost_level);
return translate_iboost(devdata->i915, devdata->child.hdmi_iboost_level);
}
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)

View file

@ -242,7 +242,8 @@ void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
const struct drm_edid *drm_edid);
void intel_bios_fini_panel(struct intel_panel *panel);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);

View file

@ -63,6 +63,16 @@
* DMC will not change the active CDCLK frequency however, so that part
* will still be performed by the driver directly.
*
* Several methods exist to change the CDCLK frequency, which ones are
* supported depends on the platform:
*
* - Full PLL disable + re-enable with new VCO frequency. Pipes must be inactive.
* - CD2X divider update. Single pipe can be active as the divider update
* can be synchronized with the pipe's start of vblank.
* - Crawl the PLL smoothly to the new VCO frequency. Pipes can be active.
* - Squash waveform update. Pipes can be active.
* - Crawl and squash can also be done back to back. Pipes can be active.
*
* RAWCLK is a fixed frequency clock, often used by various auxiliary
* blocks such as AUX CH or backlight PWM. Hence the only thing we
* really need to know about RAWCLK is its frequency so that various
@ -1406,6 +1416,20 @@ static const struct intel_cdclk_vals lnl_cdclk_table[] = {
{}
};
static const int cdclk_squash_len = 16;
static int cdclk_squash_divider(u16 waveform)
{
return hweight16(waveform ?: 0xffff);
}
static int cdclk_divider(int cdclk, int vco, u16 waveform)
{
/* 2 * cd2x divider */
return DIV_ROUND_CLOSEST(vco * cdclk_squash_divider(waveform),
cdclk * cdclk_squash_len);
}
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
@ -1744,10 +1768,10 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
}
static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
int cdclk, int vco)
int cdclk, int vco, u16 waveform)
{
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
switch (cdclk_divider(cdclk, vco, waveform)) {
default:
drm_WARN_ON(&dev_priv->drm,
cdclk != dev_priv->display.cdclk.hw.bypass);
@ -1764,7 +1788,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
}
}
static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
static u16 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
int cdclk)
{
const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
@ -1826,20 +1850,13 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
static const int cdclk_squash_len = 16;
static int cdclk_squash_divider(u16 waveform)
{
return hweight16(waveform ?: 0xffff);
}
static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
const struct intel_cdclk_config *old_cdclk_config,
const struct intel_cdclk_config *new_cdclk_config,
struct intel_cdclk_config *mid_cdclk_config)
{
u16 old_waveform, new_waveform, mid_waveform;
int div = 2;
int old_div, new_div, mid_div;
/* Return if PLL is in an unknown state, force a complete disable and re-enable. */
if (cdclk_pll_is_unknown(old_cdclk_config->vco))
@ -1858,6 +1875,18 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
old_waveform == new_waveform)
return false;
old_div = cdclk_divider(old_cdclk_config->cdclk,
old_cdclk_config->vco, old_waveform);
new_div = cdclk_divider(new_cdclk_config->cdclk,
new_cdclk_config->vco, new_waveform);
/*
* Should not happen currently. We might need more midpoint
* transitions if we need to also change the cd2x divider.
*/
if (drm_WARN_ON(&i915->drm, old_div != new_div))
return false;
*mid_cdclk_config = *new_cdclk_config;
/*
@ -1870,15 +1899,17 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
if (cdclk_squash_divider(new_waveform) > cdclk_squash_divider(old_waveform)) {
mid_cdclk_config->vco = old_cdclk_config->vco;
mid_div = old_div;
mid_waveform = new_waveform;
} else {
mid_cdclk_config->vco = new_cdclk_config->vco;
mid_div = new_div;
mid_waveform = old_waveform;
}
mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) *
mid_cdclk_config->vco,
cdclk_squash_len * div);
cdclk_squash_len * mid_div);
/* make sure the mid clock came out sane */
@ -1906,16 +1937,12 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
int unsquashed_cdclk;
u16 waveform;
u32 val;
waveform = cdclk_squash_waveform(i915, cdclk);
unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
cdclk_squash_divider(waveform));
val = bxt_cdclk_cd2x_div_sel(i915, unsquashed_cdclk, vco) |
val = bxt_cdclk_cd2x_div_sel(i915, cdclk, vco, waveform) |
bxt_cdclk_cd2x_pipe(i915, pipe);
/*

View file

@ -2111,7 +2111,8 @@ static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state)
return DISPLAY_INFO(i915)->color.degamma_lut_size;
}
static int check_lut_size(const struct drm_property_blob *lut, int expected)
static int check_lut_size(struct drm_i915_private *i915,
const struct drm_property_blob *lut, int expected)
{
int len;
@ -2120,8 +2121,8 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
len = drm_color_lut_size(lut);
if (len != expected) {
DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
len, expected);
drm_dbg_kms(&i915->drm, "Invalid LUT size; got %d, expected %d\n",
len, expected);
return -EINVAL;
}
@ -2146,8 +2147,8 @@ static int _check_luts(const struct intel_crtc_state *crtc_state,
degamma_length = intel_degamma_lut_size(crtc_state);
gamma_length = intel_gamma_lut_size(crtc_state);
if (check_lut_size(degamma_lut, degamma_length) ||
check_lut_size(gamma_lut, gamma_length))
if (check_lut_size(i915, degamma_lut, degamma_length) ||
check_lut_size(i915, gamma_lut, gamma_length))
return -EINVAL;
if (drm_color_lut_check(degamma_lut, degamma_tests) ||

View file

@ -933,6 +933,9 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct i2c_adapter *ddc;
int ret;
if (!intel_display_driver_check_access(dev_priv))
return drm_edid_connector_add_modes(connector);
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);

View file

@ -848,10 +848,10 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
.clock = 1000000, /* 10 Gbps */
.tx = { 0xbe21, /* tx cfg0 */
0x4800, /* tx cfg1 */
0xe800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = {0x0500, /* cmn cfg0*/
.cmn = {0x0700, /* cmn cfg0*/
0x0005, /* cmn cfg1 */
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
@ -1641,7 +1641,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
.clock = 3000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@ -1649,8 +1649,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
},
.mpllb = { 0x209c, /* mpllb cfg0 */
0x7d10, /* mpllb cfg1 */
.mpllb = { 0x309c, /* mpllb cfg0 */
0x2110, /* mpllb cfg1 */
0xca06, /* mpllb cfg2 */
0xbe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@ -1666,7 +1666,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
.clock = 6000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@ -1674,8 +1674,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
},
.mpllb = { 0x009c, /* mpllb cfg0 */
0x7d08, /* mpllb cfg1 */
.mpllb = { 0x109c, /* mpllb cfg0 */
0x2108, /* mpllb cfg1 */
0xca06, /* mpllb cfg2 */
0xbe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@ -1691,7 +1691,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
.clock = 8000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@ -1699,8 +1699,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
},
.mpllb = { 0x00d0, /* mpllb cfg0 */
0x7d08, /* mpllb cfg1 */
.mpllb = { 0x10d0, /* mpllb cfg0 */
0x2108, /* mpllb cfg1 */
0x4a06, /* mpllb cfg2 */
0xbe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@ -1716,7 +1716,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
.clock = 10000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@ -1725,7 +1725,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
0x0000, /* cmn cfg3 */
},
.mpllb = { 0x1104, /* mpllb cfg0 */
0x7d08, /* mpllb cfg1 */
0x2108, /* mpllb cfg1 */
0x0a06, /* mpllb cfg2 */
0xbe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */
@ -1741,7 +1741,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
.clock = 12000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
},
.cmn = { 0x0500, /* cmn cfg0*/
@ -1749,8 +1749,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
0x0000, /* cmn cfg2 */
0x0000, /* cmn cfg3 */
},
.mpllb = { 0x0138, /* mpllb cfg0 */
0x7d08, /* mpllb cfg1 */
.mpllb = { 0x1138, /* mpllb cfg0 */
0x2108, /* mpllb cfg1 */
0x5486, /* mpllb cfg2 */
0xfe40, /* mpllb cfg3 */
0x0000, /* mpllb cfg4 */

View file

@ -54,6 +54,7 @@
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dsi.h"
#include "intel_fdi.h"
@ -4150,7 +4151,7 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
crtc_state);
if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
if (intel_encoder_is_dp(encoder))
intel_dp_sync_state(encoder, crtc_state);
}

View file

@ -33,6 +33,7 @@
#include <linux/string_helpers.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_tunnel.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
@ -73,6 +74,7 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
@ -2478,7 +2480,7 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
bw_overhead);
u32 data_n = intel_dp_max_data_rate(link_clock, nlanes);
u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes);
/*
* Windows/BIOS uses fixed M/N values always. Follow suit.
@ -4490,6 +4492,8 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
saved_state->crc_enabled = slave_crtc_state->crc_enabled;
intel_crtc_free_hw_state(slave_crtc_state);
if (slave_crtc_state->dp_tunnel_ref.tunnel)
drm_dp_tunnel_ref_put(&slave_crtc_state->dp_tunnel_ref);
memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
kfree(saved_state);
@ -4505,6 +4509,10 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
&master_crtc_state->hw.adjusted_mode);
slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
if (master_crtc_state->dp_tunnel_ref.tunnel)
drm_dp_tunnel_ref_get(master_crtc_state->dp_tunnel_ref.tunnel,
&slave_crtc_state->dp_tunnel_ref);
copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
@ -4533,6 +4541,8 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
/* free the old crtc_state->hw members */
intel_crtc_free_hw_state(crtc_state);
intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state);
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
* fixed, so that the crtc_state can be safely duplicated. For now,
@ -4851,10 +4861,12 @@ memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
}
static void
pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
bool fastset, const char *name,
pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name,
const u8 *a, const u8 *b, size_t len)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
@ -4863,7 +4875,8 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
len = memcmp_diff_len(a, b, len);
drm_dbg_kms(&dev_priv->drm,
"fastset requirement not met in %s buffer\n", name);
"[CRTC:%d:%s] fastset requirement not met in %s buffer\n",
crtc->base.base.id, crtc->base.name, name);
print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
16, 0, a, len, false);
print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
@ -4872,7 +4885,8 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
/* only dump up to the last difference */
len = memcmp_diff_len(a, b, len);
drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
crtc->base.base.id, crtc->base.name, name);
print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
16, 0, a, len, false);
print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
@ -4903,18 +4917,34 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
va_end(args);
}
static bool fastboot_enabled(struct drm_i915_private *dev_priv)
static void
pipe_config_pll_mismatch(bool fastset,
const struct intel_crtc *crtc,
const char *name,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
/* Enable fastboot by default on Skylake and newer */
if (DISPLAY_VER(dev_priv) >= 9)
return true;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* Enable fastboot by default on VLV and CHV */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return true;
if (fastset) {
if (!drm_debug_enabled(DRM_UT_KMS))
return;
/* Disabled by default on all others */
return false;
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] fastset requirement not met in %s\n",
crtc->base.base.id, crtc->base.name, name);
drm_dbg_kms(&i915->drm, "expected:\n");
intel_dpll_dump_hw_state(i915, a);
drm_dbg_kms(&i915->drm, "found:\n");
intel_dpll_dump_hw_state(i915, b);
} else {
drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
crtc->base.base.id, crtc->base.name, name);
drm_err(&i915->drm, "expected:\n");
intel_dpll_dump_hw_state(i915, a);
drm_err(&i915->drm, "found:\n");
intel_dpll_dump_hw_state(i915, b);
}
}
bool
@ -4925,14 +4955,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
bool ret = true;
bool fixup_inherited = fastset &&
current_config->inherited && !pipe_config->inherited;
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
"initial modeset and fastboot not set\n");
ret = false;
}
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
@ -5012,7 +5034,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
#define PIPE_CONF_CHECK_TIMINGS(name) do { \
#define PIPE_CONF_CHECK_PLL(name) do { \
if (!intel_dpll_compare_hw_state(dev_priv, &current_config->name, \
&pipe_config->name)) { \
pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \
&current_config->name, \
&pipe_config->name); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_TIMINGS(name) do { \
PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
PIPE_CONF_CHECK_I(name.crtc_htotal); \
PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
@ -5071,7 +5103,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \
current_config->name, \
pipe_config->name, \
(len)); \
@ -5215,42 +5247,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
if (dev_priv->display.dpll.mgr) {
if (dev_priv->display.dpll.mgr)
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
PIPE_CONF_CHECK_X(dpll_hw_state.spll);
PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
PIPE_CONF_CHECK_X(dpll_hw_state.div0);
PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
}
/* FIXME convert everything over the dpll_mgr */
if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv))
PIPE_CONF_CHECK_PLL(dpll_hw_state);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
@ -5373,6 +5375,10 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
if (ret)
return ret;
ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc);
if (ret)
return ret;
ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
if (ret)
return ret;
@ -6260,12 +6266,11 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_link_bw_limits new_limits;
struct intel_link_bw_limits old_limits;
int ret;
intel_link_bw_init_limits(i915, &new_limits);
intel_link_bw_init_limits(state, &new_limits);
old_limits = new_limits;
while (true) {
@ -7118,6 +7123,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_commit_modeset_disables(state);
intel_dp_tunnel_atomic_alloc_bw(state);
/* FIXME: Eventually get rid of our crtc->config pointer */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
crtc->config = new_crtc_state;
@ -8094,8 +8101,9 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
/* Kill all the work that may have been queued by hpd. */
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
if (connector->modeset_retry_work.func &&
cancel_work_sync(&connector->modeset_retry_work))
drm_connector_put(&connector->base);
if (connector->hdcp.shim) {
cancel_delayed_work_sync(&connector->hdcp.check_work);
cancel_work_sync(&connector->hdcp.prop_work);

View file

@ -524,6 +524,7 @@ struct intel_display {
} wq;
/* Grouping using named structs. Keep sorted. */
struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
struct intel_audio audio;
struct intel_dpll dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];

View file

@ -188,7 +188,8 @@ static void intel_panel_info(struct seq_file *m,
}
static void intel_hdcp_info(struct seq_file *m,
struct intel_connector *intel_connector)
struct intel_connector *intel_connector,
bool remote_req)
{
bool hdcp_cap, hdcp2_cap;
@ -197,8 +198,14 @@ static void intel_hdcp_info(struct seq_file *m,
goto out;
}
hdcp_cap = intel_hdcp_capable(intel_connector);
hdcp2_cap = intel_hdcp2_capable(intel_connector);
if (remote_req) {
intel_hdcp_get_remote_capability(intel_connector,
&hdcp_cap,
&hdcp2_cap);
} else {
hdcp_cap = intel_hdcp_get_capability(intel_connector);
hdcp2_cap = intel_hdcp2_get_capability(intel_connector);
}
if (hdcp_cap)
seq_puts(m, "HDCP1.4 ");
@ -285,7 +292,11 @@ static void intel_connector_info(struct seq_file *m,
}
seq_puts(m, "\tHDCP version: ");
intel_hdcp_info(m, intel_connector);
if (intel_encoder_is_mst(encoder)) {
intel_hdcp_info(m, intel_connector, true);
seq_puts(m, "\tMST Hub HDCP version: ");
}
intel_hdcp_info(m, intel_connector, false);
seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
@ -1131,7 +1142,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
connector->base.base.id);
intel_hdcp_info(m, connector);
intel_hdcp_info(m, connector, false);
out:
drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
@ -1391,6 +1402,20 @@ out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
static int i915_bigjoiner_enable_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
struct drm_crtc *crtc;
crtc = connector->base.state->crtc;
if (connector->base.status != connector_status_connected || !crtc)
return -ENODEV;
seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable);
return 0;
}
static ssize_t i915_dsc_output_format_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
@ -1412,6 +1437,30 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
return len;
}
static ssize_t i915_bigjoiner_enable_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = m->private;
struct drm_crtc *crtc;
bool bigjoiner_en = 0;
int ret;
crtc = connector->base.state->crtc;
if (connector->base.status != connector_status_connected || !crtc)
return -ENODEV;
ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en);
if (ret < 0)
return ret;
connector->force_bigjoiner_enable = bigjoiner_en;
*offp += len;
return len;
}
static int i915_dsc_output_format_open(struct inode *inode,
struct file *file)
{
@ -1505,6 +1554,8 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = {
.write = i915_dsc_fractional_bpp_write
};
DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable);
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@ -1586,6 +1637,13 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
connector, &i915_dsc_fractional_bpp_fops);
}
if (DISPLAY_VER(i915) >= 11 &&
(connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP)) {
debugfs_create_file("i915_bigjoiner_force_enable", 0644, root,
connector, &i915_bigjoiner_enable_fops);
}
if (connector_type == DRM_MODE_CONNECTOR_DSI ||
connector_type == DRM_MODE_CONNECTOR_eDP ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||

View file

@ -35,6 +35,7 @@
#include "intel_dkl_phy.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_tunnel.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_fb.h"
@ -434,10 +435,8 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
for_each_pipe(i915, pipe) {
ret = intel_crtc_init(i915, pipe);
if (ret) {
intel_mode_config_cleanup(i915);
return ret;
}
if (ret)
goto err_mode_config;
}
intel_plane_possible_crtcs_init(i915);
@ -457,6 +456,10 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_vga_disable(i915);
intel_setup_outputs(i915);
ret = intel_dp_tunnel_mgr_init(i915);
if (ret)
goto err_hdcp;
intel_display_driver_disable_user_access(i915);
drm_modeset_lock_all(dev);
@ -475,6 +478,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
ilk_wm_sanitize(i915);
return 0;
err_hdcp:
intel_hdcp_component_fini(i915);
err_mode_config:
intel_mode_config_cleanup(i915);
return ret;
}
/* part #3: call after gem init */
@ -599,6 +609,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_mode_config_cleanup(i915);
intel_dp_tunnel_mgr_cleanup(i915);
intel_overlay_cleanup(i915);
intel_gmbus_teardown(i915);

View file

@ -33,6 +33,7 @@
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_dp_tunnel.h>
#include <drm/display/drm_dsc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
@ -327,7 +328,6 @@ struct intel_vbt_panel_data {
struct edp_power_seq pps;
u8 drrs_msa_timing_delay;
bool low_vswing;
bool initialized;
bool hobl;
} edp;
@ -499,15 +499,15 @@ struct intel_hdcp_shim {
struct intel_connector *connector);
/* Detects panel's hdcp capability. This is optional for HDMI. */
int (*hdcp_capable)(struct intel_digital_port *dig_port,
bool *hdcp_capable);
int (*hdcp_get_capability)(struct intel_digital_port *dig_port,
bool *hdcp_capable);
/* HDCP adaptation(DP/HDMI) required on the port */
enum hdcp_wired_protocol protocol;
/* Detects whether sink is HDCP2.2 capable */
int (*hdcp_2_2_capable)(struct intel_connector *connector,
bool *capable);
int (*hdcp_2_2_get_capability)(struct intel_connector *connector,
bool *capable);
/* Write HDCP2.2 messages */
int (*write_2_2_msg)(struct intel_connector *connector,
@ -532,6 +532,10 @@ struct intel_hdcp_shim {
/* HDCP2.2 Link Integrity Check */
int (*check_2_2_link)(struct intel_digital_port *dig_port,
struct intel_connector *connector);
/* HDCP remote sink cap */
int (*get_remote_hdcp_capability)(struct intel_connector *connector,
bool *hdcp_capable, bool *hdcp2_capable);
};
struct intel_hdcp {
@ -626,6 +630,8 @@ struct intel_connector {
struct intel_dp *mst_port;
bool force_bigjoiner_enable;
struct {
struct drm_dp_aux *dsc_decompression_aux;
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
@ -677,6 +683,8 @@ struct intel_atomic_state {
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
struct intel_dp_tunnel_inherited_state *inherited_dp_tunnels;
/*
* Current watermarks can't be trusted during hardware readout, so
* don't bother calculating intermediate watermarks.
@ -1374,6 +1382,9 @@ struct intel_crtc_state {
struct drm_dsc_config config;
} dsc;
/* DP tunnel used for BW allocation. */
struct drm_dp_tunnel_ref dp_tunnel_ref;
/* HSW+ linetime watermarks */
u16 linetime;
u16 ips_linetime;
@ -1784,6 +1795,9 @@ struct intel_dp {
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
struct drm_dp_tunnel *tunnel;
bool tunnel_suspended:1;
/* mst connector list */
struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mst_mgr;

View file

@ -36,6 +36,7 @@
#include <asm/byteorder.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_tunnel.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
@ -63,6 +64,7 @@
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_fifo_underrun.h"
@ -152,6 +154,22 @@ int intel_dp_link_symbol_clock(int rate)
return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
}
static int max_dprx_rate(struct intel_dp *intel_dp)
{
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
}
static int max_dprx_lane_count(struct intel_dp *intel_dp)
{
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel);
return drm_dp_max_lane_count(intel_dp->dpcd);
}
static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
{
intel_dp->sink_rates[0] = 162000;
@ -180,7 +198,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
/*
* Sink rates for 8b/10b.
*/
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
max_rate = max_dprx_rate(intel_dp);
max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
if (max_lttpr_rate)
max_rate = min(max_rate, max_lttpr_rate);
@ -259,7 +277,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp);
switch (intel_dp->max_sink_lane_count) {
case 1:
@ -309,7 +327,7 @@ static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
}
/* Theoretical max between source and sink */
static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
int intel_dp_max_common_rate(struct intel_dp *intel_dp)
{
return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
}
@ -326,7 +344,7 @@ static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
}
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int source_max = intel_dp_max_source_lane_count(dig_port);
@ -383,50 +401,27 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
1000000 * 16 * 8);
}
/*
* Given a link rate and lanes, get the data bandwidth.
/**
* intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params
* @intel_dp: Intel DP object
* @max_dprx_rate: Maximum data rate of the DPRX
* @max_dprx_lanes: Maximum lane count of the DPRX
*
* Data bandwidth is the actual payload rate, which depends on the data
* bandwidth efficiency and the link rate.
* Calculate the maximum data rate for the provided link parameters taking into
* account any BW limitations by a DP tunnel attached to @intel_dp.
*
* For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
* is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
* 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
* coincidence, the port clock in kHz matches the data bandwidth in kBps, and
* they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
* longer holds for data bandwidth as soon as FEC or MST is taken into account!)
*
* For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
* example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
* kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
* does not match the symbol clock, the port clock (not even if you think in
* terms of a byte clock), nor the data bandwidth. It only matches the link bit
* rate in units of 10000 bps.
* Returns the maximum data rate in kBps units.
*/
int
intel_dp_max_data_rate(int max_link_rate, int max_lanes)
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
int max_dprx_rate, int max_dprx_lanes)
{
int ch_coding_efficiency =
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
int max_link_rate_kbps = max_link_rate * 10;
int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes);
/*
* UHBR rates always use 128b/132b channel encoding, and have
* 97.71% data bandwidth efficiency. Consider max_link_rate the
* link bit rate in units of 10000 bps.
*/
/*
* Lower than UHBR rates always use 8b/10b channel encoding, and have
* 80% data bandwidth efficiency for SST non-FEC. However, this turns
* out to be a nop by coincidence:
*
* int max_link_rate_kbps = max_link_rate * 10;
* max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
* max_link_rate = max_link_rate_kbps / 8;
*/
return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
ch_coding_efficiency),
1000000 * 8);
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
max_rate = min(max_rate,
drm_dp_tunnel_available_bw(intel_dp->tunnel));
return max_rate;
}
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@ -658,7 +653,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int mode_rate, max_rate;
mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
max_rate = intel_dp_max_data_rate(link_rate, lane_count);
max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
if (mode_rate > max_rate)
return false;
@ -1205,11 +1200,13 @@ bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
int hdisplay, int clock)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
if (!intel_dp_can_bigjoiner(intel_dp))
return false;
return clock > i915->max_dotclk_freq || hdisplay > 5120;
return clock > i915->max_dotclk_freq || hdisplay > 5120 ||
connector->force_bigjoiner_enable;
}
static enum drm_mode_status
@ -1260,7 +1257,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock,
intel_dp_mode_min_output_bpp(connector, mode));
@ -1610,8 +1608,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
link_avail = intel_dp_max_data_rate(link_rate,
lane_count);
link_avail = intel_dp_max_link_data_rate(intel_dp,
link_rate,
lane_count);
if (mode_rate <= link_avail) {
pipe_config->lane_count = lane_count;
@ -2387,6 +2387,17 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits);
}
int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp = crtc_state->dsc.compression_enable ?
to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
crtc_state->pipe_bpp;
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
}
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@ -2454,31 +2465,16 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
if (pipe_config->dsc.compression_enable) {
drm_dbg_kms(&i915->drm,
"DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
drm_dbg_kms(&i915->drm,
"DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
intel_dp_config_required_rate(pipe_config),
intel_dp_max_link_data_rate(intel_dp,
pipe_config->port_clock,
pipe_config->lane_count));
drm_dbg_kms(&i915->drm,
"DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
} else {
drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp);
drm_dbg_kms(&i915->drm,
"DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
pipe_config->pipe_bpp),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
}
return 0;
}
@ -2840,12 +2836,47 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
intel_dp_is_uhbr(pipe_config);
}
void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_connector_get(&connector->base);
if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
drm_connector_put(&connector->base);
}
void
intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_connector *connector;
struct intel_digital_connector_state *conn_state;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int i;
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
return;
}
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
if (!conn_state->base.crtc)
continue;
if (connector->mst_port == intel_dp)
intel_dp_queue_modeset_retry_work(connector);
}
}
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
@ -2946,7 +2977,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
return 0;
return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
pipe_config);
}
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@ -3282,18 +3314,21 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (!crtc_state)
return;
bool dpcd_updated = false;
/*
* Don't clobber DPCD if it's been already read out during output
* setup (eDP) or detect.
*/
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) {
intel_dp_get_dpcd(intel_dp);
dpcd_updated = true;
}
intel_dp_reset_max_link_params(intel_dp);
intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated);
if (crtc_state)
intel_dp_reset_max_link_params(intel_dp);
}
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
@ -3959,6 +3994,13 @@ intel_dp_has_sink_count(struct intel_dp *intel_dp)
&intel_dp->desc);
}
void intel_dp_update_sink_caps(struct intel_dp *intel_dp)
{
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_max_sink_lane_count(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
@ -3975,9 +4017,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_max_sink_lane_count(intel_dp);
intel_dp_set_common_rates(intel_dp);
intel_dp_update_sink_caps(intel_dp);
}
if (intel_dp_has_sink_count(intel_dp)) {
@ -4087,73 +4127,6 @@ intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
return false;
}
static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
struct dp_sdp *sdp, size_t size)
{
size_t length = sizeof(struct dp_sdp);
if (size < length)
return -ENOSPC;
memset(sdp, 0, size);
/*
* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
* VSC SDP Header Bytes
*/
sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
if (vsc->revision == 0x6) {
sdp->db[0] = 1;
sdp->db[3] = 1;
}
/*
* Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
* Format as per DP 1.4a spec and DP 2.0 respectively.
*/
if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
goto out;
/* VSC SDP Payload for DB16 through DB18 */
/* Pixel Encoding and Colorimetry Formats */
sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
switch (vsc->bpc) {
case 6:
/* 6bpc: 0x0 */
break;
case 8:
sdp->db[17] = 0x1; /* DB17[3:0] */
break;
case 10:
sdp->db[17] = 0x2;
break;
case 12:
sdp->db[17] = 0x3;
break;
case 16:
sdp->db[17] = 0x4;
break;
default:
MISSING_CASE(vsc->bpc);
break;
}
/* Dynamic Range and Component Bit Depth */
if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
sdp->db[17] |= 0x80; /* DB17[7] */
/* Content Type */
sdp->db[18] = vsc->content_type & 0x7;
out:
return length;
}
static ssize_t
intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
const struct hdmi_drm_infoframe *drm_infoframe,
@ -4246,8 +4219,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
switch (type) {
case DP_SDP_VSC:
len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
sizeof(sdp));
len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp);
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
@ -4868,13 +4840,15 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
* - %true if pending interrupts were serviced (or no interrupts were
* pending) w/o detecting an error condition.
* - %false if an error condition - like AUX failure or a loss of link - is
* detected, which needs servicing from the hotplug work.
* detected, or another condition - like a DP tunnel BW state change - needs
* servicing from the hotplug work.
*/
static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool link_ok = true;
bool reprobe_needed = false;
drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
@ -4901,6 +4875,13 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
intel_dp_mst_hpd_irq(intel_dp, esi, ack);
if (esi[3] & DP_TUNNELING_IRQ) {
if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
&intel_dp->aux))
reprobe_needed = true;
ack[3] |= DP_TUNNELING_IRQ;
}
if (!memchr_inv(ack, 0, sizeof(ack)))
break;
@ -4911,7 +4892,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
}
return link_ok;
return link_ok && !reprobe_needed;
}
static void
@ -5038,9 +5019,10 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
if (!crtc_state->hw.active)
continue;
if (conn_state->commit &&
!try_wait_for_completion(&conn_state->commit->hw_done))
continue;
if (conn_state->commit)
drm_WARN_ON(&i915->drm,
!wait_for_completion_timeout(&conn_state->commit->hw_done,
msecs_to_jiffies(5000)));
*pipe_mask |= BIT(crtc->pipe);
}
@ -5270,23 +5252,32 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool reprobe_needed = false;
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
return;
return false;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
return;
return false;
if ((val & DP_TUNNELING_IRQ) &&
drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
&intel_dp->aux))
reprobe_needed = true;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
return;
return reprobe_needed;
if (val & HDMI_LINK_STATUS_CHANGED)
intel_dp_handle_hdmi_link_status_change(intel_dp);
return reprobe_needed;
}
/*
@ -5307,6 +5298,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 old_sink_count = intel_dp->sink_count;
bool reprobe_needed = false;
bool ret;
/*
@ -5329,7 +5321,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
}
intel_dp_check_device_service_irq(intel_dp);
intel_dp_check_link_service_irq(intel_dp);
reprobe_needed = intel_dp_check_link_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@ -5356,10 +5348,10 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
* FIXME get rid of the ad-hoc phy test modeset code
* and properly incorporate it into the normal modeset.
*/
return false;
reprobe_needed = true;
}
return true;
return !reprobe_needed;
}
/* XXX this is probably wrong for multiple downstream ports */
@ -5669,6 +5661,7 @@ intel_dp_detect(struct drm_connector *connector,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
int ret;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@ -5704,9 +5697,18 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp->is_mst);
}
intel_dp_tunnel_disconnect(intel_dp);
goto out;
}
ret = intel_dp_tunnel_detect(intel_dp, ctx);
if (ret == -EDEADLK)
return ret;
if (ret == 1)
intel_connector->base.epoch_counter++;
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
intel_dp_configure_mst(intel_dp);
@ -5737,8 +5739,6 @@ intel_dp_detect(struct drm_connector *connector,
* with an IRQ_HPD, so force a link status check.
*/
if (!intel_dp_is_edp(intel_dp)) {
int ret;
ret = intel_dp_retrain_link(encoder, ctx);
if (ret)
return ret;
@ -5878,6 +5878,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
intel_dp_mst_encoder_cleanup(dig_port);
intel_dp_tunnel_destroy(intel_dp);
intel_pps_vdd_off_sync(intel_dp);
/*
@ -5894,6 +5896,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
intel_pps_vdd_off_sync(intel_dp);
intel_dp_tunnel_suspend(intel_dp);
}
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
@ -6031,6 +6035,15 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
}
if (!intel_connector_needs_modeset(state, conn))
return 0;
ret = intel_dp_tunnel_atomic_check_state(state,
intel_dp,
intel_conn);
if (ret)
return ret;
/*
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
@ -6038,9 +6051,6 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (DISPLAY_VER(dev_priv) < 9)
return 0;
if (!intel_connector_needs_modeset(state, conn))
return 0;
if (conn->has_tile) {
ret = intel_modeset_tile_group(state, conn->tile_group->id);
if (ret)
@ -6097,6 +6107,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
@ -6119,6 +6130,17 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
dig_port->base.base.name,
long_hpd ? "long" : "short");
/*
* TBT DP tunnels require the GFX driver to read out the DPRX caps in
* response to long HPD pulses. The DP hotplug handler does that,
* however the hotplug handler may be blocked by another
* connector's/encoder's hotplug handler. Since the TBT CM may not
* complete the DP tunnel BW request for the latter connector/encoder
* waiting for this encoder's DPRX read, perform a dummy read here.
*/
if (long_hpd)
intel_dp_read_dprx_caps(intel_dp, dpcd);
if (long_hpd) {
intel_dp->reset_link_params = true;
return IRQ_NONE;
@ -6439,6 +6461,14 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
mutex_unlock(&connector->dev->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
drm_kms_helper_connector_hotplug_event(connector);
drm_connector_put(connector);
}
void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
{
INIT_WORK(&connector->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
}
bool
@ -6455,8 +6485,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
int type;
/* Initialize the work for modeset in case of link train failure */
INIT_WORK(&intel_connector->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
intel_dp_init_modeset_retry_work(intel_connector);
if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",

View file

@ -43,6 +43,12 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
int intel_dp_min_bpp(enum intel_output_format output_format);
void intel_dp_init_modeset_retry_work(struct intel_connector *connector);
void intel_dp_queue_modeset_retry_work(struct intel_connector *connector);
void
intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
bool intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@ -94,7 +100,11 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
int intel_dp_max_common_rate(struct intel_dp *intel_dp);
int intel_dp_max_common_lane_count(struct intel_dp *intel_dp);
void intel_dp_update_sink_caps(struct intel_dp *intel_dp);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
@ -105,7 +115,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
int max_dprx_rate, int max_dprx_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);

View file

@ -36,8 +36,10 @@ static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
}
}
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_connector *connector,
int timeout)
{
struct intel_hdcp *hdcp = &connector->hdcp;
long ret;
#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
@ -45,7 +47,8 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
msecs_to_jiffies(timeout));
if (!ret)
DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
drm_dbg_kms(connector->base.dev,
"Timedout at waiting for CP_IRQ\n");
}
static
@ -122,13 +125,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
}
static
int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux,
struct drm_i915_private *i915,
u8 *bcaps)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
ret = drm_dp_dpcd_read(aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
drm_dbg_kms(&i915->drm,
@ -143,10 +146,11 @@ static
int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
u8 bcaps;
ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
if (ret)
return ret;
@ -265,13 +269,14 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port,
}
static
int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
bool *hdcp_capable)
int intel_dp_hdcp_get_capability(struct intel_digital_port *dig_port,
bool *hdcp_capable)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
ssize_t ret;
u8 bcaps;
ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
if (ret)
return ret;
@ -330,23 +335,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
0, 0 },
};
static struct drm_dp_aux *
intel_dp_hdcp_get_aux(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
if (intel_encoder_is_mst(connector->encoder))
return &connector->port->aux;
else
return &dig_port->dp.aux;
}
static int
intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
u8 *rx_status)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
ret = drm_dp_dpcd_read(aux,
@ -387,7 +382,8 @@ int hdcp2_detect_msg_availability(struct intel_connector *connector,
*msg_ready = true;
break;
default:
DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
drm_err(connector->base.dev,
"Unidentified msg_id: %d\n", msg_id);
return -EINVAL;
}
@ -399,7 +395,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_dp *dp = &dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
int ret, timeout;
bool msg_ready = false;
@ -421,7 +419,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
* As we want to check the msg availability at timeout, Ignoring
* the timeout at wait for CP_IRQ.
*/
intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
intel_dp_hdcp_wait_for_cp_irq(connector, timeout);
ret = hdcp2_detect_msg_availability(connector, msg_id,
&msg_ready);
if (!msg_ready)
@ -454,8 +452,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_dp_aux *aux = &dig_port->dp.aux;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
struct drm_dp_aux *aux;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@ -463,8 +462,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
offset = hdcp2_msg_data->offset;
aux = intel_dp_hdcp_get_aux(connector);
/* No msg_id in DP HDCP2.2 msgs */
bytes_to_write = size - 1;
byte++;
@ -490,7 +487,8 @@ static
ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector,
u32 *dev_cnt, u8 *byte)
{
struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
u8 *rx_info = byte;
@ -515,8 +513,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_dp_aux *aux;
struct drm_dp_aux *aux = &dig_port->dp.aux;
struct intel_dp *dp = &dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@ -530,8 +529,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
return -EINVAL;
offset = hdcp2_msg_data->offset;
aux = intel_dp_hdcp_get_aux(connector);
ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data);
if (ret < 0)
return ret;
@ -561,13 +558,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
/* Entire msg read timeout since initiate of msg read */
if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) {
if (intel_encoder_is_mst(connector->encoder))
msg_end = ktime_add_ms(ktime_get_raw(),
hdcp2_msg_data->msg_read_timeout *
connector->port->parent->num_ports);
else
msg_end = ktime_add_ms(ktime_get_raw(),
hdcp2_msg_data->msg_read_timeout);
msg_end = ktime_add_ms(ktime_get_raw(),
hdcp2_msg_data->msg_read_timeout);
}
ret = drm_dp_dpcd_read(aux, offset,
@ -648,25 +640,69 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
}
static
int intel_dp_hdcp2_capable(struct intel_connector *connector,
bool *capable)
int _intel_dp_hdcp2_get_capability(struct drm_dp_aux *aux,
bool *capable)
{
struct drm_dp_aux *aux;
u8 rx_caps[3];
int ret;
aux = intel_dp_hdcp_get_aux(connector);
int ret, i;
*capable = false;
ret = drm_dp_dpcd_read(aux,
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
rx_caps, HDCP_2_2_RXCAPS_LEN);
if (ret != HDCP_2_2_RXCAPS_LEN)
return ret >= 0 ? -EIO : ret;
if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
*capable = true;
/*
* Some HDCP monitors act really shady by not giving the correct hdcp
* capability on the first rx_caps read and usually take an extra read
* to give the capability. We read rx_caps three times before we
* declare a monitor not capable of HDCP 2.2.
*/
for (i = 0; i < 3; i++) {
ret = drm_dp_dpcd_read(aux,
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
rx_caps, HDCP_2_2_RXCAPS_LEN);
if (ret != HDCP_2_2_RXCAPS_LEN)
return ret >= 0 ? -EIO : ret;
if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) {
*capable = true;
break;
}
}
return 0;
}
static
int intel_dp_hdcp2_get_capability(struct intel_connector *connector,
bool *capable)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_dp_aux *aux = &dig_port->dp.aux;
return _intel_dp_hdcp2_get_capability(aux, capable);
}
static
int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
bool *hdcp_capable,
bool *hdcp2_capable)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_dp_aux *aux = &connector->port->aux;
u8 bcaps;
int ret;
if (!intel_encoder_is_mst(connector->encoder))
return -EINVAL;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
return ret;
ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
if (ret)
return ret;
*hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
return 0;
}
@ -682,12 +718,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
.hdcp_get_capability = intel_dp_hdcp_get_capability,
.write_2_2_msg = intel_dp_hdcp2_write_msg,
.read_2_2_msg = intel_dp_hdcp2_read_msg,
.config_stream_type = intel_dp_hdcp2_config_stream_type,
.check_2_2_link = intel_dp_hdcp2_check_link,
.hdcp_2_2_capable = intel_dp_hdcp2_capable,
.hdcp_2_2_get_capability = intel_dp_hdcp2_get_capability,
.protocol = HDCP_PROTOCOL_DP,
};
@ -812,13 +848,14 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.stream_encryption = intel_dp_mst_hdcp_stream_encryption,
.check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
.hdcp_get_capability = intel_dp_hdcp_get_capability,
.write_2_2_msg = intel_dp_hdcp2_write_msg,
.read_2_2_msg = intel_dp_hdcp2_read_msg,
.config_stream_type = intel_dp_hdcp2_config_stream_type,
.stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
.check_2_2_link = intel_dp_mst_hdcp2_check_link,
.hdcp_2_2_capable = intel_dp_hdcp2_capable,
.hdcp_2_2_get_capability = intel_dp_hdcp2_get_capability,
.get_remote_hdcp_capability = intel_dp_hdcp_get_remote_capability,
.protocol = HDCP_PROTOCOL_DP,
};

View file

@ -162,6 +162,28 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
return lttpr_count;
}
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (intel_dp_is_edp(intel_dp))
return 0;
/*
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))
if (drm_dp_dpcd_probe(&intel_dp->aux,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
return -EIO;
if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
return -EIO;
return 0;
}
/**
* intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
* @intel_dp: Intel DP struct
@ -192,12 +214,10 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp) &&
(DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
return -EIO;
if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
return -EIO;
if (err != 0)
return err;
lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
}
@ -1075,7 +1095,6 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
@ -1093,7 +1112,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
}
/* Schedule a Hotplug Uevent to userspace to start modeset */
queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work);
intel_dp_queue_modeset_retry_work(intel_connector);
}
/* Perform the link training on all LTTPRs and the DPRX on a link. */

View file

@ -11,6 +11,7 @@
struct intel_crtc_state;
struct intel_dp;
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,

View file

@ -42,6 +42,7 @@
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@ -523,6 +524,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
const struct intel_connector *connector =
@ -619,7 +621,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
return 0;
return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
pipe_config);
}
/*
@ -876,6 +879,14 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
if (intel_connector_needs_modeset(state, connector)) {
ret = intel_dp_tunnel_atomic_check_state(state,
intel_connector->mst_port,
intel_connector);
if (ret)
return ret;
}
return drm_dp_atomic_release_time_slots(&state->base,
&intel_connector->mst_port->mst_mgr,
intel_connector->port);
@ -1197,6 +1208,7 @@ static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
struct intel_dp *intel_dp = intel_connector->mst_port;
const struct drm_edid *drm_edid;
int ret;
@ -1204,6 +1216,9 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
if (drm_connector_is_unregistered(connector))
return intel_connector_update_modes(connector, NULL);
if (!intel_display_driver_check_access(i915))
return drm_edid_connector_add_modes(connector);
drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
ret = intel_connector_update_modes(connector, drm_edid);
@ -1295,7 +1310,8 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
max_rate = intel_dp_max_link_data_rate(intel_dp,
max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
ret = drm_modeset_lock(&mgr->base.lock, ctx);
@ -1542,6 +1558,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_connector->port = port;
drm_dp_mst_get_port_malloc(port);
intel_dp_init_modeset_retry_work(intel_connector);
intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
intel_connector->dp.dsc_hblank_expansion_quirk =

View file

@ -0,0 +1,811 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "i915_drv.h"
#include <drm/display/drm_dp_tunnel.h>
#include "intel_atomic.h"
#include "intel_display_limits.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_link_bw.h"
struct intel_dp_tunnel_inherited_state {
struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
};
/**
* intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port
* @intel_dp: DP port object the tunnel is connected to
*
* Disconnect a DP tunnel from @intel_dp, destroying any related state. This
* should be called after detecting a sink-disconnect event from the port.
*/
void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
{
drm_dp_tunnel_destroy(intel_dp->tunnel);
intel_dp->tunnel = NULL;
}
/**
* intel_dp_tunnel_destroy - Destroy a DP tunnel
* @intel_dp: DP port object the tunnel is connected to
*
* Destroy a DP tunnel connected to @intel_dp, after disabling the BW
* allocation mode on the tunnel. This should be called while destroying the
* port.
*/
void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
{
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
intel_dp_tunnel_disconnect(intel_dp);
}
static int kbytes_to_mbits(int kbytes)
{
return DIV_ROUND_UP(kbytes * 8, 1000);
}
static int get_current_link_bw(struct intel_dp *intel_dp,
bool *below_dprx_bw)
{
int rate = intel_dp_max_common_rate(intel_dp);
int lane_count = intel_dp_max_common_lane_count(intel_dp);
int bw;
bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
*below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
return bw;
}
static int update_tunnel_state(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool old_bw_below_dprx;
bool new_bw_below_dprx;
int old_bw;
int new_bw;
int ret;
old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
if (ret < 0) {
drm_dbg_kms(&i915->drm,
"[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
ERR_PTR(ret));
return ret;
}
if (ret == 0 ||
!drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
return 0;
intel_dp_update_sink_caps(intel_dp);
new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
/* Suppress the notification if the mode list can't change due to bw. */
if (old_bw_below_dprx == new_bw_below_dprx &&
!new_bw_below_dprx)
return 0;
drm_dbg_kms(&i915->drm,
"[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
return 1;
}
/*
* Allocate the BW for a tunnel on a DP connector/port if the connector/port
* was already active when detecting the tunnel. The allocated BW must be
* freed by the next atomic modeset, storing the BW in the
* intel_atomic_state::inherited_dp_tunnels, and calling
* intel_dp_tunnel_atomic_free_bw().
*/
static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc;
int tunnel_bw = 0;
int err;
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int stream_bw = intel_dp_config_required_rate(crtc_state);
tunnel_bw += stream_bw;
drm_dbg_kms(&i915->drm,
"[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
crtc->pipe,
kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
}
err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
if (err) {
drm_dbg_kms(&i915->drm,
"[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
ERR_PTR(err));
return err;
}
return update_tunnel_state(intel_dp);
}
static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx)
{
u8 pipe_mask;
int err;
err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
if (err)
return err;
return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
}
static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_dp_tunnel *tunnel;
int ret;
tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr,
&intel_dp->aux);
if (IS_ERR(tunnel))
return PTR_ERR(tunnel);
intel_dp->tunnel = tunnel;
ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
if (ret) {
if (ret == -EOPNOTSUPP)
return 0;
drm_dbg_kms(&i915->drm,
"[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
ERR_PTR(ret));
/* Keep the tunnel with BWA disabled */
return 0;
}
ret = allocate_initial_tunnel_bw(intel_dp, ctx);
if (ret < 0)
intel_dp_tunnel_destroy(intel_dp);
return ret;
}
/**
* intel_dp_tunnel_detect - Detect a DP tunnel on a port
* @intel_dp: DP port object
* @ctx: lock context acquired by the connector detection handler
*
* Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode
* on it if supported and allocating the BW required on an already active port.
* The BW allocated this way must be freed by the next atomic modeset calling
* intel_dp_tunnel_atomic_free_bw().
*
* If @intel_dp has already a tunnel detected on it, update the tunnel's state
* wrt. its support for BW allocation mode and the available BW via the
* tunnel. If the tunnel's state change requires this - for instance the
* tunnel's group ID has changed - the tunnel will be dropped and recreated.
*
* Return 0 in case of success - after any tunnel detected and added to
* @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
* way that requires notifying user space.
*/
int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
int ret;
if (intel_dp_is_edp(intel_dp))
return 0;
if (intel_dp->tunnel) {
ret = update_tunnel_state(intel_dp);
if (ret >= 0)
return ret;
/* Try to recreate the tunnel after an update error. */
intel_dp_tunnel_destroy(intel_dp);
}
return detect_new_tunnel(intel_dp, ctx);
}
/**
* intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel
* @intel_dp: DP port object
*
* Query whether a DP tunnel is connected on @intel_dp and the tunnel supports
* the BW allocation mode.
*
* Returns %true if the BW allocation mode is supported on @intel_dp.
*/
bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
{
return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel);
}
/**
* intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port
* @intel_dp: DP port object
*
* Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it.
*/
void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return;
drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
intel_dp->tunnel_suspended = true;
}
/**
* intel_dp_tunnel_resume - Resume a DP tunnel connected on a port
* @intel_dp: DP port object
* @crtc_state: CRTC state
* @dpcd_updated: the DPCD DPRX capabilities got updated during resume
*
* Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it.
*/
void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool dpcd_updated)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 pipe_mask;
int err = 0;
if (!intel_dp->tunnel_suspended)
return;
intel_dp->tunnel_suspended = false;
drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
/*
* The TBT Connection Manager requires the GFX driver to read out
* the sink's DPRX caps to be able to service any BW requests later.
* During resume overriding the caps in @intel_dp cached before
* suspend must be avoided, so do here only a dummy read, unless the
* capabilities were updated already during resume.
*/
if (!dpcd_updated) {
err = intel_dp_read_dprx_caps(intel_dp, dpcd);
if (err) {
drm_dp_tunnel_set_io_error(intel_dp->tunnel);
goto out_err;
}
}
err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
if (err)
goto out_err;
pipe_mask = 0;
if (crtc_state) {
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/* TODO: Add support for MST */
pipe_mask |= BIT(crtc->pipe);
}
err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
if (err < 0)
goto out_err;
return;
out_err:
drm_dbg_kms(&i915->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
ERR_PTR(err));
}
static struct drm_dp_tunnel *
get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
if (!state->inherited_dp_tunnels)
return NULL;
return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
}
static int
add_inherited_tunnel(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct drm_dp_tunnel *old_tunnel;
old_tunnel = get_inherited_tunnel(state, crtc);
if (old_tunnel) {
drm_WARN_ON(&i915->drm, old_tunnel != tunnel);
return 0;
}
if (!state->inherited_dp_tunnels) {
state->inherited_dp_tunnels = kzalloc(sizeof(*state->inherited_dp_tunnels),
GFP_KERNEL);
if (!state->inherited_dp_tunnels)
return -ENOMEM;
}
drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]);
return 0;
}
static int check_inherited_tunnel_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_digital_connector_state *old_conn_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector =
to_intel_connector(old_conn_state->base.connector);
struct intel_crtc *old_crtc;
const struct intel_crtc_state *old_crtc_state;
/*
* If a BWA tunnel gets detected only after the corresponding
* connector got enabled already without a BWA tunnel, or a different
* BWA tunnel (which was removed meanwhile) the old CRTC state won't
* contain the state of the current tunnel. This tunnel still has a
* reserved BW, which needs to be released, add the state for such
* inherited tunnels separately only to this atomic state.
*/
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return 0;
if (!old_conn_state->base.crtc)
return 0;
old_crtc = to_intel_crtc(old_conn_state->base.crtc);
old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
if (!old_crtc_state->hw.active ||
old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
return 0;
drm_dbg_kms(&i915->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
old_crtc->base.base.id, old_crtc->base.name,
intel_dp->tunnel);
return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc);
}
/**
* intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state
* @state: Atomic state
*
* Free the inherited DP tunnel state in @state.
*/
void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
{
enum pipe pipe;
if (!state->inherited_dp_tunnels)
return;
for_each_pipe(to_i915(state->base.dev), pipe)
if (state->inherited_dp_tunnels->ref[pipe].tunnel)
drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
kfree(state->inherited_dp_tunnels);
state->inherited_dp_tunnels = NULL;
}
static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
u32 pipe_mask;
int err;
err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
tunnel, &pipe_mask);
if (err)
return err;
drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
}
/**
* intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state
* @state: Atomic state
* @crtc: CRTC to add the tunnel state for
*
* Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled
* via a DP tunnel.
*
* Return 0 in case of success, a negative error code otherwise.
*/
int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct drm_dp_tunnel_state *tunnel_state;
struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
if (!tunnel)
return 0;
tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
if (IS_ERR(tunnel_state))
return PTR_ERR(tunnel_state);
return 0;
}
static int check_group_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
struct intel_connector *connector,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->dp_tunnel_ref.tunnel)
return 0;
drm_dbg_kms(&i915->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
crtc_state->dp_tunnel_ref.tunnel);
return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel);
}
/**
* intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state
* @state: Atomic state
* @intel_dp: DP port object
* @connector: connector using @intel_dp
*
* Check and add the DP tunnel atomic state for @intel_dp/@connector to
* @state, if there is a DP tunnel detected on @intel_dp with BW allocation
* mode enabled on it, or if @intel_dp/@connector was previously enabled via a
* DP tunnel.
*
* Returns 0 in case of success, or a negative error code otherwise.
*/
int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
struct intel_connector *connector)
{
const struct intel_digital_connector_state *old_conn_state =
intel_atomic_get_old_connector_state(state, connector);
const struct intel_digital_connector_state *new_conn_state =
intel_atomic_get_new_connector_state(state, connector);
int err;
if (old_conn_state->base.crtc) {
err = check_group_state(state, intel_dp, connector,
to_intel_crtc(old_conn_state->base.crtc));
if (err)
return err;
}
if (new_conn_state->base.crtc &&
new_conn_state->base.crtc != old_conn_state->base.crtc) {
err = check_group_state(state, intel_dp, connector,
to_intel_crtc(new_conn_state->base.crtc));
if (err)
return err;
}
return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
}
/**
* intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream
* @state: Atomic state
* @intel_dp: DP object
* @connector: connector using @intel_dp
* @crtc_state: state of CRTC of the given DP tunnel stream
*
* Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to
* the DP tunnel state containing the stream in @state. Before re-calculating a
* BW requirement in the crtc_state state the old BW requirement computed by this
* function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw().
*
* Returns 0 in case of success, a negative error code otherwise.
*/
int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int required_rate = intel_dp_config_required_rate(crtc_state);
int ret;
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return 0;
drm_dbg_kms(&i915->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
crtc->pipe,
kbytes_to_mbits(required_rate));
ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
crtc->pipe, required_rate);
if (ret < 0)
return ret;
drm_dp_tunnel_ref_get(intel_dp->tunnel,
&crtc_state->dp_tunnel_ref);
return 0;
}
/**
* intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement
* @state: Atomic state
* @crtc_state: state of CRTC of the given DP tunnel stream
*
* Clear any DP tunnel stream BW requirement set by
* intel_dp_tunnel_atomic_compute_stream_bw().
*/
void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (!crtc_state->dp_tunnel_ref.tunnel)
return;
drm_dp_tunnel_atomic_set_stream_bw(&state->base,
crtc_state->dp_tunnel_ref.tunnel,
crtc->pipe, 0);
drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
}
/**
* intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
* @state: intel atomic state
* @limits: link BW limits
*
* Check the link configuration for all DP tunnels in @state. If the
* configuration is invalid @limits will be updated if possible to
* reduce the total BW, after which the configuration for all CRTCs in
* @state must be recomputed with the updated @limits.
*
* Returns:
* - 0 if the confugration is valid
* - %-EAGAIN, if the configuration is invalid and @limits got updated
* with fallback values with which the configuration of all CRTCs in
* @state must be recomputed
* - Other negative error, if the configuration is invalid without a
* fallback possibility, or the check failed for another reason
*/
int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits)
{
u32 failed_stream_mask;
int err;
err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
&failed_stream_mask);
if (err != -ENOSPC)
return err;
err = intel_link_bw_reduce_bpp(state, limits,
failed_stream_mask, "DP tunnel link BW");
return err ? : -EAGAIN;
}
static void atomic_decrease_bw(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_dp_tunnel_state *new_tunnel_state;
struct drm_dp_tunnel *tunnel;
int old_bw;
int new_bw;
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
tunnel = get_inherited_tunnel(state, crtc);
if (!tunnel)
tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
if (!tunnel)
continue;
old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
if (new_bw >= old_bw)
continue;
drm_dp_tunnel_alloc_bw(tunnel, new_bw);
}
}
static void queue_retry_work(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_encoder *encoder;
encoder = intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_digital_port_connected(encoder))
return;
drm_dbg_kms(&i915->drm,
"[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
drm_dp_tunnel_name(tunnel),
encoder->base.base.id,
encoder->base.name);
intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
}
static void atomic_increase_bw(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
const struct intel_crtc_state *crtc_state;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_dp_tunnel_state *tunnel_state;
struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
int bw;
if (!intel_crtc_needs_modeset(crtc_state))
continue;
if (!tunnel)
continue;
tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
queue_retry_work(state, tunnel, crtc_state);
}
}
/**
* intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels
* @state: Atomic state
*
* Allocate the required BW for all tunnels in @state.
*/
void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
{
atomic_decrease_bw(state);
atomic_increase_bw(state);
}
/**
* intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
* @i915: i915 device object
*
* Initialize the DP tunnel manager. The tunnel manager will support the
* detection/management of DP tunnels on all DP connectors, so the function
* must be called after all these connectors have been registered already.
*
* Return 0 in case of success, a negative error code otherwise.
*/
int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
{
struct drm_dp_tunnel_mgr *tunnel_mgr;
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector;
int dp_connectors = 0;
drm_connector_list_iter_begin(&i915->drm, &connector_list_iter);
for_each_intel_connector_iter(connector, &connector_list_iter) {
if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
dp_connectors++;
}
drm_connector_list_iter_end(&connector_list_iter);
tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors);
if (IS_ERR(tunnel_mgr))
return PTR_ERR(tunnel_mgr);
i915->display.dp_tunnel_mgr = tunnel_mgr;
return 0;
}
/**
* intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
* @i915: i915 device object
*
* Clean up the DP tunnel manager state.
*/
void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915)
{
drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr);
i915->display.dp_tunnel_mgr = NULL;
}

View file

@ -0,0 +1,133 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_DP_TUNNEL_H__
#define __INTEL_DP_TUNNEL_H__
#include <linux/errno.h>
#include <linux/types.h>
struct drm_i915_private;
struct drm_connector_state;
struct drm_modeset_acquire_ctx;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
struct intel_encoder;
struct intel_link_bw_limits;
#if defined(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)
int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx);
void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp);
void intel_dp_tunnel_destroy(struct intel_dp *intel_dp);
void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool dpcd_updated);
void intel_dp_tunnel_suspend(struct intel_dp *intel_dp);
bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp);
void
intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state);
int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_connector *connector,
struct intel_crtc_state *crtc_state);
void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state);
int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits);
int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
struct intel_connector *connector);
void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state);
int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915);
void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915);
#else
static inline int
intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
return -EOPNOTSUPP;
}
static inline void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp) {}
static inline void intel_dp_tunnel_destroy(struct intel_dp *intel_dp) {}
static inline void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool dpcd_updated) {}
static inline void intel_dp_tunnel_suspend(struct intel_dp *intel_dp) {}
static inline bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
{
return false;
}
static inline void
intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state) {}
static inline int
intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
return 0;
}
static inline void
intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state) {}
static inline int
intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
return 0;
}
static inline int
intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits)
{
return 0;
}
static inline int
intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
struct intel_connector *connector)
{
return 0;
}
static inline int
intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
{
return 0;
}
static inline int
intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
{
return 0;
}
static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {}
#endif /* CONFIG_DRM_I915_DP_TUNNEL */
#endif /* __INTEL_DP_TUNNEL_H__ */

View file

@ -109,6 +109,8 @@ struct intel_dpll_mgr {
void (*update_ref_clks)(struct drm_i915_private *i915);
void (*dump_hw_state)(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *hw_state);
bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
};
static void
@ -644,6 +646,15 @@ static void ibx_dump_hw_state(struct drm_i915_private *i915,
hw_state->fp1);
}
static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
return a->dpll == b->dpll &&
a->dpll_md == b->dpll_md &&
a->fp0 == b->fp0 &&
a->fp1 == b->fp1;
}
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
@ -662,6 +673,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
.get_dplls = ibx_get_dpll,
.put_dplls = intel_put_dpll,
.dump_hw_state = ibx_dump_hw_state,
.compare_hw_state = ibx_compare_hw_state,
};
static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
@ -1220,6 +1232,13 @@ static void hsw_dump_hw_state(struct drm_i915_private *i915,
hw_state->wrpll, hw_state->spll);
}
static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
return a->wrpll == b->wrpll &&
a->spll == b->spll;
}
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
@ -1278,6 +1297,7 @@ static const struct intel_dpll_mgr hsw_pll_mgr = {
.put_dplls = intel_put_dpll,
.update_ref_clks = hsw_update_dpll_ref_clks,
.dump_hw_state = hsw_dump_hw_state,
.compare_hw_state = hsw_compare_hw_state,
};
struct skl_dpll_regs {
@ -1929,6 +1949,14 @@ static void skl_dump_hw_state(struct drm_i915_private *i915,
hw_state->cfgcr2);
}
static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
return a->ctrl1 == b->ctrl1 &&
a->cfgcr1 == b->cfgcr1 &&
a->cfgcr2 == b->cfgcr2;
}
static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
@ -1959,6 +1987,7 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
.put_dplls = intel_put_dpll,
.update_ref_clks = skl_update_dpll_ref_clks,
.dump_hw_state = skl_dump_hw_state,
.compare_hw_state = skl_compare_hw_state,
};
static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
@ -2392,6 +2421,21 @@ static void bxt_dump_hw_state(struct drm_i915_private *i915,
hw_state->pcsdw12);
}
static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
return a->ebb0 == b->ebb0 &&
a->ebb4 == b->ebb4 &&
a->pll0 == b->pll0 &&
a->pll1 == b->pll1 &&
a->pll2 == b->pll2 &&
a->pll3 == b->pll3 &&
a->pll6 == b->pll6 &&
a->pll8 == b->pll8 &&
a->pll10 == b->pll10 &&
a->pcsdw12 == b->pcsdw12;
}
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
@ -2413,6 +2457,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
.put_dplls = intel_put_dpll,
.update_ref_clks = bxt_update_dpll_ref_clks,
.dump_hw_state = bxt_dump_hw_state,
.compare_hw_state = bxt_compare_hw_state,
};
static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
@ -4005,6 +4050,25 @@ static void icl_dump_hw_state(struct drm_i915_private *i915,
hw_state->mg_pll_tdc_coldst_bias);
}
static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
/* FIXME split combo vs. mg more thoroughly */
return a->cfgcr0 == b->cfgcr0 &&
a->cfgcr1 == b->cfgcr1 &&
a->div0 == b->div0 &&
a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
a->mg_pll_div0 == b->mg_pll_div0 &&
a->mg_pll_div1 == b->mg_pll_div1 &&
a->mg_pll_lf == b->mg_pll_lf &&
a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
a->mg_pll_ssc == b->mg_pll_ssc &&
a->mg_pll_bias == b->mg_pll_bias &&
a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
}
static const struct intel_shared_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
@ -4046,6 +4110,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.update_active_dpll = icl_update_active_dpll,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
.compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info ehl_plls[] = {
@ -4063,6 +4128,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
.compare_hw_state = icl_compare_hw_state,
};
static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
@ -4094,6 +4160,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.update_active_dpll = icl_update_active_dpll,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
.compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info rkl_plls[] = {
@ -4110,6 +4177,7 @@ static const struct intel_dpll_mgr rkl_pll_mgr = {
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
.compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info dg1_plls[] = {
@ -4127,6 +4195,7 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
.compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info adls_plls[] = {
@ -4144,6 +4213,7 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
.compare_hw_state = icl_compare_hw_state,
};
static const struct dpll_info adlp_plls[] = {
@ -4166,6 +4236,7 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
.update_active_dpll = icl_update_active_dpll,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
.compare_hw_state = icl_compare_hw_state,
};
/**
@ -4458,13 +4529,31 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
drm_dbg_kms(&i915->drm,
"dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
"fp0: 0x%x, fp1: 0x%x\n",
hw_state->dpll,
hw_state->dpll_md,
hw_state->fp0,
hw_state->fp1);
ibx_dump_hw_state(i915, hw_state);
}
}
/**
* intel_dpll_compare_hw_state - compare the two states
* @i915: i915 drm device
* @a: first DPLL hw state
* @b: second DPLL hw state
*
* Compare DPLL hw states @a and @b.
*
* Returns: true if the states are equal, false if the differ
*/
bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b)
{
if (i915->display.dpll.mgr) {
return i915->display.dpll.mgr->compare_hw_state(a, b);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
return ibx_compare_hw_state(a, b);
}
}

View file

@ -378,6 +378,9 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915);
void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *hw_state);
bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);

View file

@ -299,6 +299,7 @@ void intel_drrs_crtc_init(struct intel_crtc *crtc)
static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state;
int ret;
@ -310,6 +311,11 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
seq_printf(m, "DRRS capable: %s\n",
str_yes_no(crtc_state->has_drrs ||
HAS_DOUBLE_BUFFERED_M_N(i915) ||
intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
seq_printf(m, "DRRS enabled: %s\n",
str_yes_no(crtc_state->has_drrs));

View file

@ -325,7 +325,7 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
unsigned int latency = skl_watermark_max_latency(i915);
unsigned int latency = skl_watermark_max_latency(i915, 0);
int vblank_start;
if (crtc_state->vrr.enable) {

View file

@ -57,9 +57,6 @@ struct intel_dsi {
u16 phys; /* ICL DSI */
};
/* if true, use HS mode, otherwise LP */
bool hs;
/* virtual channel */
int channel;
@ -93,7 +90,6 @@ struct intel_dsi {
bool bgr_enabled;
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;

View file

@ -30,6 +30,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "i915_drv.h"
#include "i915_reg.h"
@ -338,8 +339,12 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
static int intel_dvo_get_modes(struct drm_connector *_connector)
{
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
int num_modes;
if (!intel_display_driver_check_access(i915))
return drm_edid_connector_add_modes(&connector->base);
/*
* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip

View file

@ -53,12 +53,6 @@ struct intel_dvo_dev_ops {
bool (*init)(struct intel_dvo_device *dvo,
struct i2c_adapter *i2cbus);
/*
* Called to allow the output a chance to create properties after the
* RandR objects have been created.
*/
void (*create_resources)(struct intel_dvo_device *dvo);
/*
* Turn on/off output.
*
@ -79,16 +73,6 @@ struct intel_dvo_dev_ops {
enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode);
/*
* Callback for preparing mode changes on an output
*/
void (*prepare)(struct intel_dvo_device *dvo);
/*
* Callback for committing mode changes on an output
*/
void (*commit)(struct intel_dvo_device *dvo);
/*
* Callback for setting up a video mode after fixups have been made.
*
@ -111,15 +95,6 @@ struct intel_dvo_dev_ops {
*/
bool (*get_hw_state)(struct intel_dvo_device *dev);
/**
* Query the device for the modes it provides.
*
* This function may also update MonInfo, mm_width, and mm_height.
*
* \return singly-linked list of modes or NULL if no modes found.
*/
struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
/**
* Clean up driver-specific bits of the output
*/

View file

@ -1849,9 +1849,10 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
fb->modifier, rotation);
if (stride > max_stride) {
DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
fb->base.id, stride,
plane->base.base.id, plane->base.name, max_stride);
drm_dbg_kms(plane->base.dev,
"[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
fb->base.id, stride,
plane->base.base.id, plane->base.name, max_stride);
return -EINVAL;
}

View file

@ -37,11 +37,11 @@ struct intel_global_obj {
(__i)++) \
for_each_if(obj)
#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \
#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_global_objs && \
((obj) = (__state)->global_objs[__i].ptr, \
(new_obj_state) = (__state)->global_objs[__i].old_state, 1); \
(old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
(__i)++) \
for_each_if(obj)

View file

@ -30,7 +30,7 @@
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
static int intel_conn_to_vcpi(struct drm_atomic_state *state,
static int intel_conn_to_vcpi(struct intel_atomic_state *state,
struct intel_connector *connector)
{
struct drm_dp_mst_topology_mgr *mgr;
@ -43,7 +43,7 @@ static int intel_conn_to_vcpi(struct drm_atomic_state *state,
return 0;
mgr = connector->port->mgr;
drm_modeset_lock(&mgr->base.lock, state->acquire_ctx);
drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
if (drm_WARN_ON(mgr->dev, !payload))
@ -68,19 +68,51 @@ out:
* DP MST topology. Though it is not compulsory, security fw should change its
* policy to mark different content_types for different streams.
*/
static void
intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
static int
intel_hdcp_required_content_stream(struct intel_atomic_state *state,
struct intel_digital_port *dig_port)
{
struct drm_connector_list_iter conn_iter;
struct intel_digital_port *conn_dig_port;
struct intel_connector *connector;
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
bool enforce_type0 = false;
int k;
if (dig_port->hdcp_auth_status)
return;
return 0;
data->k = 0;
if (!dig_port->hdcp_mst_type1_capable)
enforce_type0 = true;
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.status == connector_status_disconnected)
continue;
if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
continue;
conn_dig_port = intel_attached_dig_port(connector);
if (conn_dig_port != dig_port)
continue;
data->streams[data->k].stream_id =
intel_conn_to_vcpi(state, connector);
data->k++;
/* if there is only one active stream */
if (dig_port->dp.active_mst_links <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
return -EINVAL;
/*
* Apply common protection level across all streams in DP MST Topology.
* Use highest supported content type for all streams in DP MST Topology.
@ -88,19 +120,25 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
for (k = 0; k < data->k; k++)
data->streams[k].stream_type =
enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
return 0;
}
static void intel_hdcp_prepare_streams(struct intel_connector *connector)
static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
data->streams[0].stream_type = hdcp->content_type;
} else {
intel_hdcp_required_content_stream(dig_port);
}
if (intel_encoder_is_mst(intel_attached_encoder(connector)))
return intel_hdcp_required_content_stream(state, dig_port);
data->k = 1;
data->streams[0].stream_id = 0;
data->streams[0].stream_type = hdcp->content_type;
return 0;
}
static
@ -140,7 +178,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
}
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_capable(struct intel_connector *connector)
bool intel_hdcp_get_capability(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
@ -150,8 +188,8 @@ bool intel_hdcp_capable(struct intel_connector *connector)
if (!shim)
return capable;
if (shim->hdcp_capable) {
shim->hdcp_capable(dig_port, &capable);
if (shim->hdcp_get_capability) {
shim->hdcp_get_capability(dig_port, &capable);
} else {
if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
capable = true;
@ -160,12 +198,14 @@ bool intel_hdcp_capable(struct intel_connector *connector)
return capable;
}
/* Is HDCP2.2 capable on Platform and Sink */
bool intel_hdcp2_capable(struct intel_connector *connector)
/*
* Check if the source has all the building blocks ready to make
* HDCP 2.2 work
*/
static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
/* I915 support for HDCP2.2 */
if (!hdcp->hdcp2_supported)
@ -185,12 +225,40 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
}
mutex_unlock(&i915->display.hdcp.hdcp_mutex);
return true;
}
/* Is HDCP2.2 capable on Platform and Sink */
bool intel_hdcp2_get_capability(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
if (!intel_hdcp2_prerequisite(connector))
return false;
/* Sink's capability for HDCP2.2 */
hdcp->shim->hdcp_2_2_capable(connector, &capable);
hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
return capable;
}
void intel_hdcp_get_remote_capability(struct intel_connector *connector,
bool *hdcp_capable,
bool *hdcp2_capable)
{
struct intel_hdcp *hdcp = &connector->hdcp;
if (!hdcp->shim->get_remote_hdcp_capability)
return;
hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
hdcp2_capable);
if (!intel_hdcp2_prerequisite(connector))
*hdcp2_capable = false;
}
static bool intel_hdcp_in_use(struct drm_i915_private *i915,
enum transcoder cpu_transcoder, enum port port)
{
@ -726,8 +794,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
* whether the display supports HDCP before we write An. For HDMI
* displays, this is not necessary.
*/
if (shim->hdcp_capable) {
ret = shim->hdcp_capable(dig_port, &hdcp_capable);
if (shim->hdcp_get_capability) {
ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
if (ret)
return ret;
if (!hdcp_capable) {
@ -1058,15 +1126,9 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
ret = intel_hdcp1_enable(connector);
if (ret) {
drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED,
true);
goto out;
}
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED,
true);
out:
mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
@ -1871,7 +1933,8 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
return ret;
}
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
@ -1880,7 +1943,13 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
intel_hdcp_prepare_streams(connector);
ret = intel_hdcp_prepare_streams(state, connector);
if (ret) {
drm_dbg_kms(&i915->drm,
"Prepare stream failed.(%d)\n",
ret);
break;
}
ret = hdcp2_propagate_stream_management_info(connector);
if (ret) {
@ -1925,7 +1994,8 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
return ret;
}
static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_enable(struct intel_atomic_state *state,
struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
@ -1935,7 +2005,7 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
connector->base.base.id, connector->base.name,
hdcp->content_type);
ret = hdcp2_authenticate_and_encrypt(connector);
ret = hdcp2_authenticate_and_encrypt(state, connector);
if (ret) {
drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
hdcp->content_type, ret);
@ -2038,17 +2108,6 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
drm_dbg_kms(&i915->drm,
"HDCP2.2 Downstream topology change\n");
ret = hdcp2_authenticate_repeater_topology(connector);
if (!ret) {
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED,
true);
goto out;
}
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
connector->base.base.id, connector->base.name,
ret);
} else {
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
@ -2065,18 +2124,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
ret = _intel_hdcp2_enable(connector);
if (ret) {
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] Failed to enable hdcp2.2 (%d)\n",
connector->base.base.id, connector->base.name,
ret);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED,
true);
goto out;
}
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
out:
mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
@ -2284,52 +2333,6 @@ int intel_hdcp_init(struct intel_connector *connector,
return 0;
}
static int
intel_hdcp_set_streams(struct intel_digital_port *dig_port,
struct intel_atomic_state *state)
{
struct drm_connector_list_iter conn_iter;
struct intel_digital_port *conn_dig_port;
struct intel_connector *connector;
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
if (!intel_encoder_is_mst(&dig_port->base)) {
data->k = 1;
data->streams[0].stream_id = 0;
return 0;
}
data->k = 0;
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.status == connector_status_disconnected)
continue;
if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
continue;
conn_dig_port = intel_attached_dig_port(connector);
if (conn_dig_port != dig_port)
continue;
data->streams[data->k].stream_id =
intel_conn_to_vcpi(&state->base, connector);
data->k++;
/* if there is only one active stream */
if (dig_port->dp.active_mst_links <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
return -EINVAL;
return 0;
}
static int _intel_hdcp_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@ -2374,25 +2377,18 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
* is capable of HDCP2.2, it is preferred to use HDCP2.2.
*/
if (intel_hdcp2_capable(connector)) {
ret = intel_hdcp_set_streams(dig_port, state);
if (!ret) {
ret = _intel_hdcp2_enable(connector);
if (!ret)
check_link_interval =
DRM_HDCP2_CHECK_PERIOD_MS;
} else {
drm_dbg_kms(&i915->drm,
"Set content streams failed: (%d)\n",
ret);
}
if (intel_hdcp2_get_capability(connector)) {
ret = _intel_hdcp2_enable(state, connector);
if (!ret)
check_link_interval =
DRM_HDCP2_CHECK_PERIOD_MS;
}
/*
* When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
* be attempted.
*/
if (ret && intel_hdcp_capable(connector) &&
if (ret && intel_hdcp_get_capability(connector) &&
hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
ret = intel_hdcp1_enable(connector);
}

View file

@ -38,8 +38,11 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct drm_i915_private *i915, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
bool intel_hdcp2_capable(struct intel_connector *connector);
bool intel_hdcp_get_capability(struct intel_connector *connector);
bool intel_hdcp2_get_capability(struct intel_connector *connector);
void intel_hdcp_get_remote_capability(struct intel_connector *connector,
bool *hdcp_capable,
bool *hdcp2_capable);
void intel_hdcp_component_init(struct drm_i915_private *i915);
void intel_hdcp_component_fini(struct drm_i915_private *i915);
void intel_hdcp_cleanup(struct intel_connector *connector);

View file

@ -1732,8 +1732,8 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
}
static
int intel_hdmi_hdcp2_capable(struct intel_connector *connector,
bool *capable)
int intel_hdmi_hdcp2_get_capability(struct intel_connector *connector,
bool *capable)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
u8 hdcp2_version;
@ -1762,7 +1762,7 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.write_2_2_msg = intel_hdmi_hdcp2_write_msg,
.read_2_2_msg = intel_hdmi_hdcp2_read_msg,
.check_2_2_link = intel_hdmi_hdcp2_check_link,
.hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
.hdcp_2_2_get_capability = intel_hdmi_hdcp2_get_capability,
.protocol = HDCP_PROTOCOL_HDMI,
};

View file

@ -6,26 +6,41 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
/**
* intel_link_bw_init_limits - initialize BW limits
* @i915: device instance
* @state: Atomic state
* @limits: link BW limits
*
* Initialize @limits.
*/
void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_bw_limits *limits)
void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe;
limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(i915, pipe)
limits->max_bpp_x16[pipe] = INT_MAX;
for_each_pipe(i915, pipe) {
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state,
intel_crtc_for_pipe(i915, pipe));
if (state->base.duplicated && crtc_state) {
limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
if (crtc_state->fec_enable)
limits->force_fec_pipes |= BIT(pipe);
} else {
limits->max_bpp_x16[pipe] = INT_MAX;
}
}
}
/**
@ -149,6 +164,10 @@ static int check_all_link_config(struct intel_atomic_state *state,
if (ret)
return ret;
ret = intel_dp_tunnel_atomic_check_link(state, limits);
if (ret)
return ret;
ret = intel_fdi_atomic_check_link(state, limits);
if (ret)
return ret;

View file

@ -22,7 +22,7 @@ struct intel_link_bw_limits {
int max_bpp_x16[I915_MAX_PIPES];
};
void intel_link_bw_init_limits(struct drm_i915_private *i915,
void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits);
int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits,

View file

@ -887,7 +887,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
return ret;
}
if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) {
opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (opregion->vbt_firmware) {
drm_dbg_kms(&dev_priv->drm,
@ -1034,7 +1034,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
drm_dbg_kms(&dev_priv->drm,
"Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
@ -1059,7 +1059,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt_size = (mboxes & MBOX_ASLE_EXT) ?
OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
vbt_size -= OPREGION_VBT_OFFSET;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
drm_dbg_kms(&dev_priv->drm,
"Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;

View file

@ -252,6 +252,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct i2c_msg msgs[] = {
{
.addr = intel_sdvo->slave_addr,
@ -271,7 +272,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
return true;
DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
drm_dbg_kms(&i915->drm, "i2c transfer returned %d\n", ret);
return false;
}
@ -437,7 +438,8 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
drm_dbg_kms(&dev_priv->drm, "%s: W: %02X %s\n", SDVO_NAME(intel_sdvo),
cmd, buffer);
}
static const char * const cmd_status_names[] = {
@ -462,6 +464,7 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len,
bool unlocked)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
u8 *buf, status;
struct i2c_msg *msgs;
int i, ret = true;
@ -511,13 +514,13 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
else
ret = __i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
drm_dbg_kms(&i915->drm, "I2c transfer returned %d\n", ret);
ret = false;
goto out;
}
if (ret != i+3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
drm_dbg_kms(&i915->drm, "I2c transfer returned %d/%d\n", ret, i+3);
ret = false;
}
@ -604,12 +607,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
drm_dbg_kms(&dev_priv->drm, "%s: R: %s\n",
SDVO_NAME(intel_sdvo), buffer);
return true;
log_fail:
DRM_DEBUG_KMS("%s: R: ... failed %s\n",
SDVO_NAME(intel_sdvo), buffer);
drm_dbg_kms(&dev_priv->drm, "%s: R: ... failed %s\n",
SDVO_NAME(intel_sdvo), buffer);
return false;
}
@ -758,7 +762,7 @@ static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
}
static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
struct intel_sdvo_dtd *dtd)
{
return intel_sdvo_set_timing(intel_sdvo,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
@ -926,8 +930,8 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
BUILD_BUG_ON(sizeof(encode) != 2);
return intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
}
static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
@ -1004,6 +1008,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
unsigned int if_index, u8 tx_rate,
const u8 *data, unsigned int length)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
u8 set_buf_index[2] = { if_index, 0 };
u8 hbuf_size, tmp[8];
int i;
@ -1016,8 +1021,9 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
drm_dbg_kms(&i915->drm,
"writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
if (hbuf_size < length)
return false;
@ -1042,6 +1048,7 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
unsigned int if_index,
u8 *data, unsigned int length)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
u8 set_buf_index[2] = { if_index, 0 };
u8 hbuf_size, tx_rate, av_split;
int i;
@ -1071,8 +1078,9 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
drm_dbg_kms(&i915->drm,
"reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
hbuf_size = min_t(unsigned int, length, hbuf_size);
@ -1151,6 +1159,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
ssize_t len;
@ -1162,7 +1171,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
sdvo_data, sizeof(sdvo_data));
if (len < 0) {
DRM_DEBUG_KMS("failed to read AVI infoframe\n");
drm_dbg_kms(&i915->drm, "failed to read AVI infoframe\n");
return;
} else if (len == 0) {
return;
@ -1173,13 +1182,14 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
ret = hdmi_infoframe_unpack(frame, sdvo_data, len);
if (ret) {
DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
drm_dbg_kms(&i915->drm, "Failed to unpack AVI infoframe\n");
return;
}
if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
drm_dbg_kms(&i915->drm,
"Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
}
static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo,
@ -1348,6 +1358,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
@ -1360,7 +1371,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
drm_dbg_kms(&i915->drm, "forcing bpc to 8 for SDVO\n");
/* FIXME: Don't increase pipe_bpp */
pipe_config->pipe_bpp = 8*3;
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
@ -1439,7 +1450,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
pipe_config, conn_state)) {
DRM_DEBUG_KMS("bad AVI infoframe\n");
drm_dbg_kms(&i915->drm, "bad AVI infoframe\n");
return -EINVAL;
}
@ -1916,8 +1927,8 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
*/
if (success && !input1) {
drm_dbg_kms(&dev_priv->drm,
"First %s output reported failure to "
"sync\n", SDVO_NAME(intel_sdvo));
"First %s output reported failure to sync\n",
SDVO_NAME(intel_sdvo));
}
if (0)
@ -1976,37 +1987,38 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
BUILD_BUG_ON(sizeof(*caps) != 8);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
return false;
DRM_DEBUG_KMS("SDVO capabilities:\n"
" vendor_id: %d\n"
" device_id: %d\n"
" device_rev_id: %d\n"
" sdvo_version_major: %d\n"
" sdvo_version_minor: %d\n"
" sdvo_num_inputs: %d\n"
" smooth_scaling: %d\n"
" sharp_scaling: %d\n"
" up_scaling: %d\n"
" down_scaling: %d\n"
" stall_support: %d\n"
" output_flags: %d\n",
caps->vendor_id,
caps->device_id,
caps->device_rev_id,
caps->sdvo_version_major,
caps->sdvo_version_minor,
caps->sdvo_num_inputs,
caps->smooth_scaling,
caps->sharp_scaling,
caps->up_scaling,
caps->down_scaling,
caps->stall_support,
caps->output_flags);
drm_dbg_kms(&i915->drm, "SDVO capabilities:\n"
" vendor_id: %d\n"
" device_id: %d\n"
" device_rev_id: %d\n"
" sdvo_version_major: %d\n"
" sdvo_version_minor: %d\n"
" sdvo_num_inputs: %d\n"
" smooth_scaling: %d\n"
" sharp_scaling: %d\n"
" up_scaling: %d\n"
" down_scaling: %d\n"
" stall_support: %d\n"
" output_flags: %d\n",
caps->vendor_id,
caps->device_id,
caps->device_rev_id,
caps->sdvo_version_major,
caps->sdvo_version_minor,
caps->sdvo_num_inputs,
caps->smooth_scaling,
caps->sharp_scaling,
caps->up_scaling,
caps->down_scaling,
caps->stall_support,
caps->output_flags);
return true;
}
@ -2038,7 +2050,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&hotplug, sizeof(hotplug)))
&hotplug, sizeof(hotplug)))
return 0;
return hotplug;
@ -2121,8 +2133,9 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
bool monitor_is_digital = drm_edid_is_digital(drm_edid);
bool connector_is_digital = !!IS_DIGITAL(sdvo);
DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
connector_is_digital, monitor_is_digital);
drm_dbg_kms(sdvo->base.base.dev,
"connector_is_digital? %d, monitor_is_digital? %d\n",
connector_is_digital, monitor_is_digital);
return connector_is_digital == monitor_is_digital;
}
@ -2135,8 +2148,8 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret;
u16 response;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
@ -2153,9 +2166,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
&response, 2))
return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
response & 0xff, response >> 8,
intel_sdvo_connector->output_flag);
drm_dbg_kms(&i915->drm, "SDVO response %d %d [%x]\n",
response & 0xff, response >> 8,
intel_sdvo_connector->output_flag);
if (response == 0)
return connector_status_disconnected;
@ -2189,11 +2202,15 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
int num_modes = 0;
const struct drm_edid *drm_edid;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_driver_check_access(i915))
return drm_edid_connector_add_modes(connector);
/* set the bus switch and get the modes */
drm_edid = intel_sdvo_get_edid(connector);
@ -2287,6 +2304,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
const struct drm_connector_state *conn_state = connector->state;
@ -2295,8 +2313,11 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
int num_modes = 0;
int i;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_driver_check_access(i915))
return 0;
/*
* Read the list of supported input resolutions for the selected TV
@ -2783,10 +2804,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
DRM_DEBUG_KMS("initialising DVI type 0x%x\n", type);
drm_dbg_kms(&i915->drm, "initialising DVI type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@ -2797,7 +2819,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
if (intel_sdvo_get_hotplug_support(intel_sdvo) &
intel_sdvo_connector->output_flag) {
intel_sdvo_connector->output_flag) {
intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/*
* Some SDVO devices have one-shot hotplug interrupts.
@ -2832,12 +2854,13 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
static bool
intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, u16 type)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
DRM_DEBUG_KMS("initialising TV type 0x%x\n", type);
drm_dbg_kms(&i915->drm, "initialising TV type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@ -2871,12 +2894,13 @@ err:
static bool
intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
DRM_DEBUG_KMS("initialising analog type 0x%x\n", type);
drm_dbg_kms(&i915->drm, "initialising analog type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@ -2908,7 +2932,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
DRM_DEBUG_KMS("initialising LVDS type 0x%x\n", type);
drm_dbg_kms(&i915->drm, "initialising LVDS type 0x%x\n", type);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
@ -2992,6 +3016,7 @@ static bool intel_sdvo_output_init(struct intel_sdvo *sdvo, u16 type)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
static const u16 probe_order[] = {
SDVO_OUTPUT_TMDS0,
SDVO_OUTPUT_TMDS1,
@ -3010,8 +3035,9 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
flags = intel_sdvo_filter_output_flags(intel_sdvo->caps.output_flags);
if (flags == 0) {
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%04x)\n",
SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags);
drm_dbg_kms(&i915->drm,
"%s: Unknown SDVO output type (0x%04x)\n",
SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags);
return false;
}
@ -3073,8 +3099,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->tv_format =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
"mode", intel_sdvo_connector->format_supported_num);
drm_property_create(dev, DRM_MODE_PROP_ENUM,
"mode", intel_sdvo_connector->format_supported_num);
if (!intel_sdvo_connector->tv_format)
return false;
@ -3100,8 +3126,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
state_assignment = response; \
drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, 0); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
data_value[0], data_value[1], response); \
drm_dbg_kms(dev, #name ": max %d, default %d, current %d\n", \
data_value[0], data_value[1], response); \
} \
} while (0)
@ -3112,6 +3138,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
struct drm_connector_state *conn_state = connector->state;
@ -3148,10 +3175,9 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->right, 0);
DRM_DEBUG_KMS("h_overscan: max %d, "
"default %d, current %d\n",
data_value[0], data_value[1], response);
intel_sdvo_connector->right, 0);
drm_dbg_kms(&i915->drm, "h_overscan: max %d, default %d, current %d\n",
data_value[0], data_value[1], response);
}
if (enhancements.overscan_v) {
@ -3170,7 +3196,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_vscan = data_value[0];
intel_sdvo_connector->top =
drm_property_create_range(dev, 0,
"top_margin", 0, data_value[0]);
"top_margin", 0, data_value[0]);
if (!intel_sdvo_connector->top)
return false;
@ -3179,15 +3205,14 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->bottom =
drm_property_create_range(dev, 0,
"bottom_margin", 0, data_value[0]);
"bottom_margin", 0, data_value[0]);
if (!intel_sdvo_connector->bottom)
return false;
drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom, 0);
DRM_DEBUG_KMS("v_overscan: max %d, "
"default %d, current %d\n",
data_value[0], data_value[1], response);
intel_sdvo_connector->bottom, 0);
drm_dbg_kms(&i915->drm, "v_overscan: max %d, default %d, current %d\n",
data_value[0], data_value[1], response);
}
ENHANCEMENT(&sdvo_state->tv, hpos, HPOS);
@ -3215,7 +3240,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl, 0);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
drm_dbg_kms(&i915->drm, "dot crawl: current %d\n", response);
}
return true;
@ -3240,6 +3265,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector)
{
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
union {
struct intel_sdvo_enhancements_reply reply;
u16 response;
@ -3251,7 +3277,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
&enhancements, sizeof(enhancements)) ||
enhancements.response == 0) {
DRM_DEBUG_KMS("No enhancement is supported\n");
drm_dbg_kms(&i915->drm, "No enhancement is supported\n");
return true;
}
@ -3471,23 +3497,23 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
goto err_output;
drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"num inputs: %d, "
"output 1: %c, output 2: %c\n",
SDVO_NAME(intel_sdvo),
intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
intel_sdvo->caps.device_rev_id,
intel_sdvo->pixel_clock_min / 1000,
intel_sdvo->pixel_clock_max / 1000,
intel_sdvo->caps.sdvo_num_inputs,
/* check currently supported outputs */
intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 |
SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 |
SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N',
intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 |
SDVO_OUTPUT_LVDS1) ? 'Y' : 'N');
"clock range %dMHz - %dMHz, "
"num inputs: %d, "
"output 1: %c, output 2: %c\n",
SDVO_NAME(intel_sdvo),
intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
intel_sdvo->caps.device_rev_id,
intel_sdvo->pixel_clock_min / 1000,
intel_sdvo->pixel_clock_max / 1000,
intel_sdvo->caps.sdvo_num_inputs,
/* check currently supported outputs */
intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 |
SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 |
SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N',
intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 |
SDVO_OUTPUT_LVDS1) ? 'Y' : 'N');
return true;
err_output:

View file

@ -948,6 +948,11 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(dev_priv) == 13)
plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
if (GRAPHICS_VER(dev_priv) >= 20 &&
fb->modifier == I915_FORMAT_MOD_4_TILED) {
plane_ctl |= PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
}
return plane_ctl;
}

View file

@ -23,6 +23,12 @@
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
/*It is expected that DSB can do posted writes to every register in
* the pipe and planes within 100us. For flip queue use case, the
* recommended DSB execution time is 100us + one SAGV block time.
*/
#define DSB_EXE_TIME 100
static void skl_sagv_disable(struct drm_i915_private *i915);
/* Stores plane specific WM parameters */
@ -2904,12 +2910,51 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return 0;
}
/*
* If Fixed Refresh Rate:
* Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
* watermark level1 and up and above. If watermark level 1 is
* invalid program it with all 1's.
* Program PKG_C_LATENCY Added Wake Time = DSB execution time
* If Variable Refresh Rate:
* Program DEEP PKG_C_LATENCY Pkg C with all 1's.
* Program PKG_C_LATENCY Added Wake Time = 0
*/
static void
skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled)
{
u32 max_latency = 0;
u32 clear = 0, val = 0;
u32 added_wake_time = 0;
if (DISPLAY_VER(i915) < 20)
return;
if (vrr_enabled) {
max_latency = LNL_PKG_C_LATENCY_MASK;
added_wake_time = 0;
} else {
max_latency = skl_watermark_max_latency(i915, 1);
if (max_latency == 0)
max_latency = LNL_PKG_C_LATENCY_MASK;
added_wake_time = DSB_EXE_TIME +
i915->display.sagv.block_time_us;
}
clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency);
val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val);
}
static int
skl_compute_wm(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
bool vrr_enabled = false;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
@ -2934,8 +2979,13 @@ skl_compute_wm(struct intel_atomic_state *state)
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
if (new_crtc_state->vrr.enable)
vrr_enabled = true;
}
skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled);
skl_print_wm_changes(state);
return 0;
@ -3731,11 +3781,11 @@ void skl_watermark_debugfs_register(struct drm_i915_private *i915)
&intel_sagv_status_fops);
}
unsigned int skl_watermark_max_latency(struct drm_i915_private *i915)
unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
{
int level;
for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
unsigned int latency = skl_wm_latency(i915, level, NULL);
if (latency)

View file

@ -46,8 +46,8 @@ void skl_watermark_ipc_update(struct drm_i915_private *i915);
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
void skl_watermark_debugfs_register(struct drm_i915_private *i915);
unsigned int skl_watermark_max_latency(struct drm_i915_private *i915);
unsigned int skl_watermark_max_latency(struct drm_i915_private *i915,
int initial_wm_level);
void skl_wm_init(struct drm_i915_private *i915);
struct intel_dbuf_state {

View file

@ -157,4 +157,8 @@
#define MTL_LATENCY_SAGV _MMIO(0x4578c)
#define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0)
#define LNL_PKG_C_LATENCY _MMIO(0x46460)
#define LNL_ADDED_WAKE_TIME_MASK REG_GENMASK(28, 16)
#define LNL_PKG_C_LATENCY_MASK REG_GENMASK(12, 0)
#endif /* __SKL_WATERMARK_REGS_H__ */

View file

@ -349,6 +349,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
GEM_WARN_ON(obj->userptr.page_ref);
if (!obj->userptr.notifier.mm)
return;
mmu_interval_notifier_remove(&obj->userptr.notifier);
obj->userptr.notifier.mm = NULL;
}

View file

@ -206,8 +206,6 @@ struct intel_guc {
u32 ads_golden_ctxt_size;
/** @ads_capture_size: size of register lists in the ADS used for error capture */
u32 ads_capture_size;
/** @ads_engine_usage_size: size of engine usage in the ADS */
u32 ads_engine_usage_size;
/** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
struct i915_vma *lrc_desc_pool_v69;

View file

@ -152,17 +152,6 @@ struct intel_vgpu_cursor_plane_format {
u32 y_hot; /* in pixels */
};
struct intel_vgpu_pipe_format {
struct intel_vgpu_primary_plane_format primary;
struct intel_vgpu_sprite_plane_format sprite;
struct intel_vgpu_cursor_plane_format cursor;
enum DDI_PORT ddi_port; /* the DDI port that pipe is connected to */
};
struct intel_vgpu_fb_format {
struct intel_vgpu_pipe_format pipes[I915_MAX_PIPES];
};
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane);
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,

View file

@ -93,8 +93,6 @@ struct intel_gvt_gtt_gma_ops {
struct intel_gvt_gtt {
const struct intel_gvt_gtt_pte_ops *pte_ops;
const struct intel_gvt_gtt_gma_ops *gma_ops;
int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct mutex ppgtt_mm_lock;
@ -210,7 +208,6 @@ struct intel_vgpu_scratch_pt {
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
struct list_head ppgtt_mm_list_head;
struct radix_tree_root spt_tree;
struct list_head oos_page_list_head;

View file

@ -89,7 +89,6 @@ struct intel_vgpu_gm {
/* Fences owned by a vGPU */
struct intel_vgpu_fence {
struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
u32 base;
u32 size;
};
@ -119,7 +118,6 @@ struct intel_vgpu_irq {
};
struct intel_vgpu_opregion {
bool mapped;
void *va;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
};
@ -223,7 +221,6 @@ struct intel_vgpu {
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
/*
@ -256,7 +253,6 @@ struct intel_gvt_fence {
/* Special MMIO blocks. */
struct gvt_mmio_block {
unsigned int device;
i915_reg_t offset;
unsigned int size;
gvt_mmio_func read;
@ -444,7 +440,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define vgpu_hidden_gmadr_end(vgpu) \
(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
#define vgpu_fence_base(vgpu) (vgpu->fence.base)
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
/* ring context size i.e. the first 0x50 dwords*/

View file

@ -40,7 +40,6 @@ struct intel_gvt_irq_info {
char *name;
i915_reg_t reg_base;
enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
unsigned long warned;
int group;
DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
bool has_upstream_irq;

View file

@ -177,7 +177,6 @@ enum intel_gvt_irq_type {
/* per-event information */
struct intel_gvt_event_info {
int bit; /* map to register bit */
int policy; /* forwarding policy */
struct intel_gvt_irq_info *info; /* register info */
gvt_event_virt_handler_t v_handler; /* for v_event */
};
@ -188,7 +187,6 @@ struct intel_gvt_irq {
struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
struct intel_gvt_irq_map *irq_map;
};

View file

@ -62,10 +62,8 @@ typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
struct intel_gvt_mmio_info {
u32 offset;
u64 ro_mask;
u32 device;
gvt_mmio_func read;
gvt_mmio_func write;
u32 addr_range;
struct hlist_node node;
};

View file

@ -104,10 +104,8 @@ struct intel_vgpu_workload {
/* execlist context information */
struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
unsigned long guest_rb_head;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;
atomic_t shadow_ctx_active;

View file

@ -24,8 +24,6 @@ struct drm_printer;
struct i915_drm_client {
struct kref kref;
unsigned int id;
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */

View file

@ -288,7 +288,6 @@ struct i915_perf_stream {
struct i915_vma *vma;
u8 *vaddr;
u32 last_ctx_id;
int size_exponent;
/**
* @oa_buffer.ptr_lock: Locks reads and writes to all

View file

@ -52,7 +52,6 @@
struct execute_cb {
struct irq_work work;
struct i915_sw_fence *fence;
struct i915_request *signal;
};
static struct kmem_cache *slab_requests;

View file

@ -290,7 +290,6 @@ struct i915_vma {
struct list_head obj_link; /* Link in the object's VMA list */
struct rb_node obj_node;
struct hlist_node obj_hash;
/** This vma's place in the eviction list */
struct list_head evict_link;

View file

@ -50,8 +50,6 @@ enum intel_region_id {
for_each_if((mr) = (i915)->mm.regions[id])
struct intel_memory_region_ops {
unsigned int flags;
int (*init)(struct intel_memory_region *mem);
int (*release)(struct intel_memory_region *mem);

View file

@ -73,6 +73,8 @@ void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int
struct cmdq_pkt *cmdq_pkt);
void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt);
void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt);
enum drm_mode_status mtk_merge_mode_valid(struct device *dev,
const struct drm_display_mode *mode);
void mtk_ovl_bgclr_in_on(struct device *dev);
void mtk_ovl_bgclr_in_off(struct device *dev);
@ -131,6 +133,8 @@ unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev);
struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev);
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev);
size_t mtk_ovl_adaptor_get_num_formats(struct device *dev);
enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,
const struct drm_display_mode *mode);
void mtk_rdma_bypass_shadow(struct device *dev);
int mtk_rdma_clk_enable(struct device *dev);

View file

@ -222,6 +222,71 @@ void mtk_merge_clk_disable(struct device *dev)
clk_disable_unprepare(priv->clk);
}
enum drm_mode_status mtk_merge_mode_valid(struct device *dev,
const struct drm_display_mode *mode)
{
struct mtk_disp_merge *priv = dev_get_drvdata(dev);
unsigned long rate;
rate = clk_get_rate(priv->clk);
/* Convert to KHz and round the number */
rate = (rate + 500) / 1000;
if (rate && mode->clock > rate) {
dev_dbg(dev, "invalid clock: %d (>%lu)\n", mode->clock, rate);
return MODE_CLOCK_HIGH;
}
/*
* Measure the bandwidth requirement of hardware prefetch (per frame)
*
* let N = prefetch buffer size in lines
* (ex. N=3, then prefetch buffer size = 3 lines)
*
* prefetch size = htotal * N (pixels)
* time per line = 1 / fps / vtotal (seconds)
* duration = vbp * time per line
* = vbp / fps / vtotal
*
* data rate = prefetch size / duration
* = htotal * N / (vbp / fps / vtotal)
* = htotal * vtotal * fps * N / vbp
* = clk * N / vbp (pixels per second)
*
* Say 4K60 (CEA-861) is the maximum mode supported by the SoC
* data rate = 594000K * N / 72 = 8250 (standard)
* (remove K * N due to the same unit)
*
* For 2560x1440@144 (clk=583600K, vbp=17):
* data rate = 583600 / 17 ~= 34329 > 8250 (NG)
*
* For 2560x1440@120 (clk=497760K, vbp=77):
* data rate = 497760 / 77 ~= 6464 < 8250 (OK)
*
* A non-standard 4K60 timing (clk=521280K, vbp=54)
* data rate = 521280 / 54 ~= 9653 > 8250 (NG)
*
* Bandwidth requirement of hardware prefetch increases significantly
* when the VBP decreases (more than 4x in this example).
*
* The proposed formula is only one way to estimate whether our SoC
* supports the mode setting. The basic idea behind it is just to check
* if the data rate requirement is too high (directly proportional to
* pixel clock, inversely proportional to vbp). Please adjust the
* function if it doesn't fit your situation in the future.
*/
rate = mode->clock / (mode->vtotal - mode->vsync_end);
if (rate > 8250) {
dev_dbg(dev, "invalid rate: %lu (>8250): " DRM_MODE_FMT "\n",
rate, DRM_MODE_ARG(mode));
return MODE_BAD;
}
return MODE_OK;
}
static int mtk_disp_merge_bind(struct device *dev, struct device *master,
void *data)
{

View file

@ -30,6 +30,7 @@ enum mtk_ovl_adaptor_comp_type {
OVL_ADAPTOR_TYPE_ETHDR,
OVL_ADAPTOR_TYPE_MDP_RDMA,
OVL_ADAPTOR_TYPE_MERGE,
OVL_ADAPTOR_TYPE_PADDING,
OVL_ADAPTOR_TYPE_NUM,
};
@ -47,6 +48,14 @@ enum mtk_ovl_adaptor_comp_id {
OVL_ADAPTOR_MERGE1,
OVL_ADAPTOR_MERGE2,
OVL_ADAPTOR_MERGE3,
OVL_ADAPTOR_PADDING0,
OVL_ADAPTOR_PADDING1,
OVL_ADAPTOR_PADDING2,
OVL_ADAPTOR_PADDING3,
OVL_ADAPTOR_PADDING4,
OVL_ADAPTOR_PADDING5,
OVL_ADAPTOR_PADDING6,
OVL_ADAPTOR_PADDING7,
OVL_ADAPTOR_ID_MAX
};
@ -67,6 +76,7 @@ static const char * const private_comp_stem[OVL_ADAPTOR_TYPE_NUM] = {
[OVL_ADAPTOR_TYPE_ETHDR] = "ethdr",
[OVL_ADAPTOR_TYPE_MDP_RDMA] = "vdo1-rdma",
[OVL_ADAPTOR_TYPE_MERGE] = "merge",
[OVL_ADAPTOR_TYPE_PADDING] = "padding",
};
static const struct mtk_ddp_comp_funcs ethdr = {
@ -79,6 +89,14 @@ static const struct mtk_ddp_comp_funcs ethdr = {
static const struct mtk_ddp_comp_funcs merge = {
.clk_enable = mtk_merge_clk_enable,
.clk_disable = mtk_merge_clk_disable,
.mode_valid = mtk_merge_mode_valid,
};
static const struct mtk_ddp_comp_funcs padding = {
.clk_enable = mtk_padding_clk_enable,
.clk_disable = mtk_padding_clk_disable,
.start = mtk_padding_start,
.stop = mtk_padding_stop,
};
static const struct mtk_ddp_comp_funcs rdma = {
@ -102,6 +120,14 @@ static const struct ovl_adaptor_comp_match comp_matches[OVL_ADAPTOR_ID_MAX] = {
[OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE2, 2, &merge },
[OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE3, 3, &merge },
[OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE4, 4, &merge },
[OVL_ADAPTOR_PADDING0] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING0, 0, &padding },
[OVL_ADAPTOR_PADDING1] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING1, 1, &padding },
[OVL_ADAPTOR_PADDING2] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING2, 2, &padding },
[OVL_ADAPTOR_PADDING3] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING3, 3, &padding },
[OVL_ADAPTOR_PADDING4] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING4, 4, &padding },
[OVL_ADAPTOR_PADDING5] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING5, 5, &padding },
[OVL_ADAPTOR_PADDING6] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING6, 6, &padding },
[OVL_ADAPTOR_PADDING7] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING7, 7, &padding },
};
void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx,
@ -317,6 +343,22 @@ void mtk_ovl_adaptor_clk_disable(struct device *dev)
}
}
enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,
const struct drm_display_mode *mode)
{
int i;
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
dev = ovl_adaptor->ovl_adaptor_comp[i];
if (!dev || !comp_matches[i].funcs->mode_valid)
continue;
return comp_matches[i].funcs->mode_valid(dev, mode);
}
return MODE_OK;
}
unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev)
{
return MTK_OVL_ADAPTOR_LAYER_NUM;
@ -437,6 +479,7 @@ static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node,
}
static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
{ .compatible = "mediatek,mt8188-disp-padding", .data = (void *)OVL_ADAPTOR_TYPE_PADDING },
{ .compatible = "mediatek,mt8195-disp-ethdr", .data = (void *)OVL_ADAPTOR_TYPE_ETHDR },
{ .compatible = "mediatek,mt8195-disp-merge", .data = (void *)OVL_ADAPTOR_TYPE_MERGE },
{ .compatible = "mediatek,mt8195-vdo1-rdma", .data = (void *)OVL_ADAPTOR_TYPE_MDP_RDMA },

View file

@ -95,11 +95,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
struct drm_crtc *crtc = &mtk_crtc->base;
unsigned long flags;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
drm_crtc_vblank_put(crtc);
mtk_crtc->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
if (mtk_crtc->event) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
drm_crtc_vblank_put(crtc);
mtk_crtc->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
}
static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
@ -213,6 +215,22 @@ static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
kfree(to_mtk_crtc_state(state));
}
static enum drm_mode_status
mtk_drm_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
enum drm_mode_status status = MODE_OK;
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode);
if (status != MODE_OK)
break;
}
return status;
}
static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@ -831,6 +849,7 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
.mode_fixup = mtk_drm_crtc_mode_fixup,
.mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
.mode_valid = mtk_drm_crtc_mode_valid,
.atomic_begin = mtk_drm_crtc_atomic_begin,
.atomic_flush = mtk_drm_crtc_atomic_flush,
.atomic_enable = mtk_drm_crtc_atomic_enable,

View file

@ -418,6 +418,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
.remove = mtk_ovl_adaptor_remove_comp,
.get_formats = mtk_ovl_adaptor_get_formats,
.get_num_formats = mtk_ovl_adaptor_get_num_formats,
.mode_valid = mtk_ovl_adaptor_mode_valid,
};
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {

View file

@ -12,6 +12,8 @@
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
#include <drm/drm_modes.h>
struct device;
struct device_node;
struct drm_crtc;
@ -85,6 +87,7 @@ struct mtk_ddp_comp_funcs {
void (*add)(struct device *dev, struct mtk_mutex *mutex);
void (*remove)(struct device *dev, struct mtk_mutex *mutex);
unsigned int (*encoder_index)(struct device *dev);
enum drm_mode_status (*mode_valid)(struct device *dev, const struct drm_display_mode *mode);
};
struct mtk_ddp_comp {
@ -126,6 +129,15 @@ static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp)
comp->funcs->clk_disable(comp->dev);
}
static inline
enum drm_mode_status mtk_ddp_comp_mode_valid(struct mtk_ddp_comp *comp,
const struct drm_display_mode *mode)
{
if (comp && comp->funcs && comp->funcs->mode_valid)
return comp->funcs->mode_valid(comp->dev, mode);
return MODE_OK;
}
static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
unsigned int w, unsigned int h,
unsigned int vrefresh, unsigned int bpc,

View file

@ -293,7 +293,7 @@ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
.main_len = ARRAY_SIZE(mt8188_mtk_ddp_main),
.conn_routes = mt8188_mtk_ddp_main_routes,
.num_conn_routes = ARRAY_SIZE(mt8188_mtk_ddp_main_routes),
.mmsys_dev_num = 1,
.mmsys_dev_num = 2,
};
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
@ -334,6 +334,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt8186_mmsys_driver_data},
{ .compatible = "mediatek,mt8188-vdosys0",
.data = &mt8188_vdosys0_driver_data},
{ .compatible = "mediatek,mt8188-vdosys1",
.data = &mt8195_vdosys1_driver_data},
{ .compatible = "mediatek,mt8192-mmsys",
.data = &mt8192_mmsys_driver_data},
{ .compatible = "mediatek,mt8195-mmsys",

View file

@ -3,6 +3,7 @@
* Copyright (c) 2015 MediaTek Inc.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
@ -12,6 +13,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/units.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
@ -58,28 +60,31 @@
#define DSI_TXRX_CTRL 0x18
#define VC_NUM BIT(1)
#define LANE_NUM (0xf << 2)
#define LANE_NUM GENMASK(5, 2)
#define DIS_EOT BIT(6)
#define NULL_EN BIT(7)
#define TE_FREERUN BIT(8)
#define EXT_TE_EN BIT(9)
#define EXT_TE_EDGE BIT(10)
#define MAX_RTN_SIZE (0xf << 12)
#define MAX_RTN_SIZE GENMASK(15, 12)
#define HSTX_CKLP_EN BIT(16)
#define DSI_PSCTRL 0x1c
#define DSI_PS_WC 0x3fff
#define DSI_PS_SEL (3 << 16)
#define PACKED_PS_16BIT_RGB565 (0 << 16)
#define LOOSELY_PS_18BIT_RGB666 (1 << 16)
#define PACKED_PS_18BIT_RGB666 (2 << 16)
#define PACKED_PS_24BIT_RGB888 (3 << 16)
#define DSI_PS_WC GENMASK(13, 0)
#define DSI_PS_SEL GENMASK(17, 16)
#define PACKED_PS_16BIT_RGB565 0
#define PACKED_PS_18BIT_RGB666 1
#define LOOSELY_PS_24BIT_RGB666 2
#define PACKED_PS_24BIT_RGB888 3
#define DSI_VSA_NL 0x20
#define DSI_VBP_NL 0x24
#define DSI_VFP_NL 0x28
#define DSI_VACT_NL 0x2C
#define VACT_NL GENMASK(14, 0)
#define DSI_SIZE_CON 0x38
#define DSI_HEIGHT GENMASK(30, 16)
#define DSI_WIDTH GENMASK(14, 0)
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
@ -109,26 +114,27 @@
#define LD0_WAKEUP_EN BIT(2)
#define DSI_PHY_TIMECON0 0x110
#define LPX (0xff << 0)
#define HS_PREP (0xff << 8)
#define HS_ZERO (0xff << 16)
#define HS_TRAIL (0xff << 24)
#define LPX GENMASK(7, 0)
#define HS_PREP GENMASK(15, 8)
#define HS_ZERO GENMASK(23, 16)
#define HS_TRAIL GENMASK(31, 24)
#define DSI_PHY_TIMECON1 0x114
#define TA_GO (0xff << 0)
#define TA_SURE (0xff << 8)
#define TA_GET (0xff << 16)
#define DA_HS_EXIT (0xff << 24)
#define TA_GO GENMASK(7, 0)
#define TA_SURE GENMASK(15, 8)
#define TA_GET GENMASK(23, 16)
#define DA_HS_EXIT GENMASK(31, 24)
#define DSI_PHY_TIMECON2 0x118
#define CONT_DET (0xff << 0)
#define CLK_ZERO (0xff << 16)
#define CLK_TRAIL (0xff << 24)
#define CONT_DET GENMASK(7, 0)
#define DA_HS_SYNC GENMASK(15, 8)
#define CLK_ZERO GENMASK(23, 16)
#define CLK_TRAIL GENMASK(31, 24)
#define DSI_PHY_TIMECON3 0x11c
#define CLK_HS_PREP (0xff << 0)
#define CLK_HS_POST (0xff << 8)
#define CLK_HS_EXIT (0xff << 16)
#define CLK_HS_PREP GENMASK(7, 0)
#define CLK_HS_POST GENMASK(15, 8)
#define CLK_HS_EXIT GENMASK(23, 16)
#define DSI_VM_CMD_CON 0x130
#define VM_CMD_EN BIT(0)
@ -138,13 +144,14 @@
#define FORCE_COMMIT BIT(0)
#define BYPASS_SHADOW BIT(1)
#define CONFIG (0xff << 0)
/* CMDQ related bits */
#define CONFIG GENMASK(7, 0)
#define SHORT_PACKET 0
#define LONG_PACKET 2
#define BTA BIT(2)
#define DATA_ID (0xff << 8)
#define DATA_0 (0xff << 16)
#define DATA_1 (0xff << 24)
#define DATA_ID GENMASK(15, 8)
#define DATA_0 GENMASK(23, 16)
#define DATA_1 GENMASK(31, 24)
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
@ -232,7 +239,7 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
struct mtk_phy_timing *timing = &dsi->phy_timing;
timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
@ -252,14 +259,23 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
timing->clk_hs_zero = timing->clk_hs_trail * 4;
timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
timcon1 = timing->ta_go | timing->ta_sure << 8 |
timing->ta_get << 16 | timing->da_hs_exit << 24;
timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
timing->clk_hs_trail << 24;
timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
timing->clk_hs_exit << 16;
timcon0 = FIELD_PREP(LPX, timing->lpx) |
FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
FIELD_PREP(HS_ZERO, timing->da_hs_zero) |
FIELD_PREP(HS_TRAIL, timing->da_hs_trail);
timcon1 = FIELD_PREP(TA_GO, timing->ta_go) |
FIELD_PREP(TA_SURE, timing->ta_sure) |
FIELD_PREP(TA_GET, timing->ta_get) |
FIELD_PREP(DA_HS_EXIT, timing->da_hs_exit);
timcon2 = FIELD_PREP(DA_HS_SYNC, 1) |
FIELD_PREP(CLK_ZERO, timing->clk_hs_zero) |
FIELD_PREP(CLK_TRAIL, timing->clk_hs_trail);
timcon3 = FIELD_PREP(CLK_HS_PREP, timing->clk_hs_prepare) |
FIELD_PREP(CLK_HS_POST, timing->clk_hs_post) |
FIELD_PREP(CLK_HS_EXIT, timing->clk_hs_exit);
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@ -350,101 +366,63 @@ static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
}
static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
{
struct videomode *vm = &dsi->vm;
u32 dsi_buf_bpp, ps_wc;
u32 ps_bpp_mode;
u32 regval, tmp_reg = 0;
u8 i;
/* Number of DSI lanes (max 4 lanes), each bit enables one DSI lane. */
for (i = 0; i < dsi->lanes; i++)
tmp_reg |= BIT(i);
regval = FIELD_PREP(LANE_NUM, tmp_reg);
if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
regval |= HSTX_CKLP_EN;
if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
regval |= DIS_EOT;
writel(regval, dsi->regs + DSI_TXRX_CTRL);
}
static void mtk_dsi_ps_control(struct mtk_dsi *dsi, bool config_vact)
{
u32 dsi_buf_bpp, ps_val, ps_wc, vact_nl;
if (dsi->format == MIPI_DSI_FMT_RGB565)
dsi_buf_bpp = 2;
else
dsi_buf_bpp = 3;
ps_wc = vm->hactive * dsi_buf_bpp;
ps_bpp_mode = ps_wc;
/* Word count */
ps_wc = FIELD_PREP(DSI_PS_WC, dsi->vm.hactive * dsi_buf_bpp);
ps_val = ps_wc;
/* Pixel Stream type */
switch (dsi->format) {
default:
fallthrough;
case MIPI_DSI_FMT_RGB888:
ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_24BIT_RGB888);
break;
case MIPI_DSI_FMT_RGB666:
ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
ps_val |= FIELD_PREP(DSI_PS_SEL, LOOSELY_PS_24BIT_RGB666);
break;
case MIPI_DSI_FMT_RGB666_PACKED:
ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_18BIT_RGB666);
break;
case MIPI_DSI_FMT_RGB565:
ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
ps_val |= FIELD_PREP(DSI_PS_SEL, PACKED_PS_16BIT_RGB565);
break;
}
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
}
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
{
u32 tmp_reg;
switch (dsi->lanes) {
case 1:
tmp_reg = 1 << 2;
break;
case 2:
tmp_reg = 3 << 2;
break;
case 3:
tmp_reg = 7 << 2;
break;
case 4:
tmp_reg = 0xf << 2;
break;
default:
tmp_reg = 0xf << 2;
break;
if (config_vact) {
vact_nl = FIELD_PREP(VACT_NL, dsi->vm.vactive);
writel(vact_nl, dsi->regs + DSI_VACT_NL);
writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
}
if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
tmp_reg |= HSTX_CKLP_EN;
if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
tmp_reg |= DIS_EOT;
writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
}
static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
{
u32 dsi_tmp_buf_bpp;
u32 tmp_reg;
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
tmp_reg = PACKED_PS_24BIT_RGB888;
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB666:
tmp_reg = LOOSELY_PS_18BIT_RGB666;
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
tmp_reg = PACKED_PS_18BIT_RGB666;
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB565:
tmp_reg = PACKED_PS_16BIT_RGB565;
dsi_tmp_buf_bpp = 2;
break;
default:
tmp_reg = PACKED_PS_24BIT_RGB888;
dsi_tmp_buf_bpp = 3;
break;
}
tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
writel(tmp_reg, dsi->regs + DSI_PSCTRL);
writel(ps_val, dsi->regs + DSI_PSCTRL);
}
static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
@ -471,7 +449,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
if (dsi->driver_data->has_size_ctl)
writel(vm->vactive << 16 | vm->hactive,
writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) |
FIELD_PREP(DSI_WIDTH, vm->hactive),
dsi->regs + DSI_SIZE_CON);
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
@ -520,7 +499,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
mtk_dsi_ps_control(dsi);
mtk_dsi_ps_control(dsi, false);
}
static void mtk_dsi_start(struct mtk_dsi *dsi)
@ -619,19 +598,12 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
if (++dsi->refcount != 1)
return 0;
switch (dsi->format) {
case MIPI_DSI_FMT_RGB565:
bit_per_pixel = 16;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
bit_per_pixel = 18;
break;
case MIPI_DSI_FMT_RGB666:
case MIPI_DSI_FMT_RGB888:
default:
bit_per_pixel = 24;
break;
ret = mipi_dsi_pixel_format_to_bpp(dsi->format);
if (ret < 0) {
dev_err(dev, "Unknown MIPI DSI format %d\n", dsi->format);
return ret;
}
bit_per_pixel = ret;
dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
dsi->lanes);
@ -665,7 +637,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_ps_control(dsi, true);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
mtk_dsi_set_interrupt_enable(dsi);
@ -814,12 +786,11 @@ mtk_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
u32 bpp;
int bpp;
if (dsi->format == MIPI_DSI_FMT_RGB565)
bpp = 16;
else
bpp = 24;
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
if (bpp < 0)
return MODE_ERROR;
if (mode->clock * bpp / dsi->lanes > 1500000)
return MODE_CLOCK_HIGH;
@ -1135,67 +1106,47 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (!dsi)
return -ENOMEM;
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
ret = mipi_dsi_host_register(&dsi->host);
if (ret < 0) {
dev_err(dev, "failed to register DSI host: %d\n", ret);
return ret;
}
dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
if (IS_ERR(dsi->engine_clk))
return dev_err_probe(dev, PTR_ERR(dsi->engine_clk),
"Failed to get engine clock\n");
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get engine clock: %d\n", ret);
goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get digital clock: %d\n", ret);
goto err_unregister_host;
}
if (IS_ERR(dsi->digital_clk))
return dev_err_probe(dev, PTR_ERR(dsi->digital_clk),
"Failed to get digital clock\n");
dsi->hs_clk = devm_clk_get(dev, "hs");
if (IS_ERR(dsi->hs_clk)) {
ret = PTR_ERR(dsi->hs_clk);
dev_err(dev, "Failed to get hs clock: %d\n", ret);
goto err_unregister_host;
}
if (IS_ERR(dsi->hs_clk))
return dev_err_probe(dev, PTR_ERR(dsi->hs_clk), "Failed to get hs clock\n");
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(dev, regs);
if (IS_ERR(dsi->regs)) {
ret = PTR_ERR(dsi->regs);
dev_err(dev, "Failed to ioremap memory: %d\n", ret);
goto err_unregister_host;
}
if (IS_ERR(dsi->regs))
return dev_err_probe(dev, PTR_ERR(dsi->regs), "Failed to ioremap memory\n");
dsi->phy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->phy)) {
ret = PTR_ERR(dsi->phy);
dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
goto err_unregister_host;
}
if (IS_ERR(dsi->phy))
return dev_err_probe(dev, PTR_ERR(dsi->phy), "Failed to get MIPI-DPHY\n");
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
ret = irq_num;
goto err_unregister_host;
}
if (irq_num < 0)
return irq_num;
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
ret = mipi_dsi_host_register(&dsi->host);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to register DSI host\n");
ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
goto err_unregister_host;
mipi_dsi_host_unregister(&dsi->host);
return dev_err_probe(&pdev->dev, ret, "Failed to request DSI irq\n");
}
init_waitqueue_head(&dsi->irq_wait_queue);
@ -1207,10 +1158,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
return 0;
err_unregister_host:
mipi_dsi_host_unregister(&dsi->host);
return ret;
}
static void mtk_dsi_remove(struct platform_device *pdev)
@ -1249,17 +1196,12 @@ static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
};
static const struct of_device_id mtk_dsi_of_match[] = {
{ .compatible = "mediatek,mt2701-dsi",
.data = &mt2701_dsi_driver_data },
{ .compatible = "mediatek,mt8173-dsi",
.data = &mt8173_dsi_driver_data },
{ .compatible = "mediatek,mt8183-dsi",
.data = &mt8183_dsi_driver_data },
{ .compatible = "mediatek,mt8186-dsi",
.data = &mt8186_dsi_driver_data },
{ .compatible = "mediatek,mt8188-dsi",
.data = &mt8188_dsi_driver_data },
{ },
{ .compatible = "mediatek,mt2701-dsi", .data = &mt2701_dsi_driver_data },
{ .compatible = "mediatek,mt8173-dsi", .data = &mt8173_dsi_driver_data },
{ .compatible = "mediatek,mt8183-dsi", .data = &mt8183_dsi_driver_data },
{ .compatible = "mediatek,mt8186-dsi", .data = &mt8186_dsi_driver_data },
{ .compatible = "mediatek,mt8188-dsi", .data = &mt8188_dsi_driver_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_dsi_of_match);

View file

@ -11,3 +11,15 @@ config DRM_MGAG200
MGA G200 desktop chips and the server variants. It requires 0.3.0
of the modesetting userspace driver, and a version of mga driver
that will fail on KMS enabled devices.
config DRM_MGAG200_IOBURST_WORKAROUND
bool "Disable buffer caching"
depends on DRM_MGAG200 && PREEMPT_RT && X86
help
Enable a workaround to avoid I/O bursts within the mgag200 driver at
the expense of overall display performance.
It restores the <v5.10 behavior, by mapping the framebuffer in system
RAM as Write-Combining, and flushing the cache after each write.
This is only useful on x86_64 if you want to run processes with
deterministic latency.
If unsure, say N.

View file

@ -84,6 +84,20 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
return offset - 65536;
}
#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND)
static struct drm_gem_object *mgag200_create_object(struct drm_device *dev, size_t size)
{
struct drm_gem_shmem_object *shmem;
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
return NULL;
shmem->map_wc = true;
return &shmem->base;
}
#endif
/*
* DRM driver
*/
@ -99,6 +113,9 @@ static const struct drm_driver mgag200_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND)
.gem_create_object = mgag200_create_object,
#endif
DRM_GEM_SHMEM_DRIVER_OPS,
};

View file

@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_cache.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
@ -436,6 +437,13 @@ static void mgag200_handle_damage(struct mga_device *mdev, const struct iosys_ma
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, vmap, fb, clip);
/* Flushing the cache greatly improves latency on x86_64 */
#if defined(CONFIG_DRM_MGAG200_IOBURST_WORKAROUND)
if (!vmap->is_iomem)
drm_clflush_virt_range(vmap->vaddr + clip->y1 * fb->pitches[0],
drm_rect_height(clip) * fb->pitches[0]);
#endif
}
/*

View file

@ -127,8 +127,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_drm.o \
dp/dp_link.o \
dp/dp_panel.o \
dp/dp_parser.o \
dp/dp_power.o \
dp/dp_audio.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o

View file

@ -3,28 +3,20 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
http://gitlab.freedesktop.org/mesa/mesa/
git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
Copyright (C) 2013-2023 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from Fri Jun 2 14:59:26 2023)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
Copyright (C) 2013-2024 by the following authors:
- Rob Clark <robdclark@gmail.com> Rob Clark
- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@ -45,8 +37,21 @@ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef __KERNEL__
#include <linux/bug.h>
#define assert(x) BUG_ON(!(x))
#else
#include <assert.h>
#endif
#ifdef __cplusplus
#define __struct_cast(X)
#else
#define __struct_cast(X) (struct X)
#endif
enum a2xx_rb_dither_type {
DITHER_PIXEL = 0,
@ -1442,16 +1447,18 @@ static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
{
return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
}
#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
{
return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
}
static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
#define REG_A2XX_VSC_PIPE(i0) (0x00000c06 + 0x3*(i0))
static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
@ -1661,7 +1668,8 @@ static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
{
return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
assert(!(val & 0xfff));
return (((val >> 12)) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
}
#define REG_A2XX_RB_DEPTH_INFO 0x00002002
@ -1675,7 +1683,8 @@ static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
assert(!(val & 0xfff));
return (((val >> 12)) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
@ -2654,7 +2663,8 @@ static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
{
return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
}
#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
@ -3027,7 +3037,8 @@ static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
{
return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
}
#define A2XX_SQ_TEX_0_TILED 0x80000000
@ -3061,7 +3072,8 @@ static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
{
return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
assert(!(val & 0xfff));
return (((val >> 12)) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
}
#define REG_A2XX_SQ_TEX_2 0x00000002
@ -3229,8 +3241,11 @@ static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
{
return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
assert(!(val & 0xfff));
return (((val >> 12)) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
}
#ifdef __cplusplus
#endif
#endif /* A2XX_XML */

View file

@ -3,28 +3,20 @@
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
http://gitlab.freedesktop.org/mesa/mesa/
git clone https://gitlab.freedesktop.org/mesa/mesa.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
Copyright (C) 2013-2022 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84323 bytes, from Wed Aug 23 10:39:39 2023)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from Fri Jun 2 14:59:26 2023)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from Fri Jun 2 14:59:26 2023)
- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 85691 bytes, from Fri Feb 16 09:49:01 2024)
Copyright (C) 2013-2024 by the following authors:
- Rob Clark <robdclark@gmail.com> Rob Clark
- Ilia Mirkin <imirkin@alum.mit.edu> Ilia Mirkin
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@ -45,8 +37,21 @@ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifdef __KERNEL__
#include <linux/bug.h>
#define assert(x) BUG_ON(!(x))
#else
#include <assert.h>
#endif
#ifdef __cplusplus
#define __struct_cast(X)
#else
#define __struct_cast(X) (struct X)
#endif
enum a3xx_tile_mode {
LINEAR = 0,
@ -612,6 +617,7 @@ enum a3xx_tex_msaa {
#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
#define REG_A3XX_RBBM_HW_VERSION 0x00000000
#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
@ -672,13 +678,9 @@ enum a3xx_tex_msaa {
#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
@ -912,7 +914,7 @@ enum a3xx_tex_msaa {
#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
#define REG_A3XX_CP_PROTECT(i0) (0x00000460 + 0x1*(i0))
static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
@ -1167,7 +1169,8 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val)
#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
{
return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
}
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
@ -1218,7 +1221,7 @@ static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
return ((_mesa_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
}
static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
#define REG_A3XX_RB_MRT(i0) (0x000020c4 + 0x4*(i0))
static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
@ -1267,7 +1270,8 @@ static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
{
return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
}
static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
@ -1275,7 +1279,8 @@ static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6
#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
{
return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
}
static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
@ -1407,7 +1412,8 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
{
return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
assert(!(val & 0x3fff));
return (((val >> 14)) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
}
#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
@ -1415,7 +1421,8 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
{
return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
}
#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
@ -1423,7 +1430,8 @@ static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
{
return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
}
#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
@ -1491,7 +1499,8 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
assert(!(val & 0xfff));
return (((val >> 12)) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
@ -1499,7 +1508,8 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
#define A3XX_RB_DEPTH_PITCH__SHIFT 0
static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
{
return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
assert(!(val & 0x7));
return (((val >> 3)) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
}
#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
@ -1562,7 +1572,8 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11
static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
{
return ((val >> 12) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
assert(!(val & 0xfff));
return (((val >> 12)) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
}
#define REG_A3XX_RB_STENCIL_PITCH 0x00002107
@ -1570,7 +1581,8 @@ static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
#define A3XX_RB_STENCIL_PITCH__SHIFT 0
static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val)
{
return ((val >> 3) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK;
assert(!(val & 0x7));
return (((val >> 3)) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK;
}
#define REG_A3XX_RB_STENCILREFMASK 0x00002108
@ -1877,7 +1889,7 @@ static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
}
static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; }
#define REG_A3XX_HLSQ_CL_GLOBAL_WORK(i0) (0x0000220b + 0x2*(i0))
static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
@ -1889,7 +1901,7 @@ static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return
#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; }
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP(i0) (0x00002215 + 0x1*(i0))
static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
@ -1965,7 +1977,7 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
#define REG_A3XX_VFD_FETCH(i0) (0x00002246 + 0x2*(i0))
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
@ -1997,7 +2009,7 @@ static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
#define REG_A3XX_VFD_DECODE(i0) (0x00002266 + 0x1*(i0))
static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
@ -2084,7 +2096,7 @@ static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
}
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
#define REG_A3XX_VPC_VARYING_INTERP(i0) (0x00002282 + 0x1*(i0))
static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
@ -2184,7 +2196,7 @@ static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
}
static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
#define REG_A3XX_VPC_VARYING_PS_REPL(i0) (0x00002286 + 0x1*(i0))
static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003
@ -2392,7 +2404,7 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
}
static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
#define REG_A3XX_SP_VS_OUT(i0) (0x000022c7 + 0x1*(i0))
static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
@ -2422,7 +2434,7 @@ static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
}
static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
#define REG_A3XX_SP_VS_VPC_DST(i0) (0x000022d0 + 0x1*(i0))
static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
@ -2477,7 +2489,8 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
{
return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
assert(!(val & 0x7f));
return (((val >> 7)) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
}
#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
@ -2503,7 +2516,8 @@ static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
{
return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
}
#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
@ -2641,7 +2655,8 @@ static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
{
return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
}
#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
@ -2665,7 +2680,7 @@ static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
}
static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
#define REG_A3XX_SP_FS_MRT(i0) (0x000022f0 + 0x1*(i0))
static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
@ -2678,7 +2693,7 @@ static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
#define REG_A3XX_SP_FS_IMAGE_OUTPUT(i0) (0x000022f4 + 0x1*(i0))
static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
@ -2821,18 +2836,20 @@ static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
{
return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
}
#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
{
return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
assert(!(val & 0x1f));
return (((val >> 5)) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
}
#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
#define REG_A3XX_VSC_PIPE(i0) (0x00000c06 + 0x3*(i0))
static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
@ -2887,7 +2904,7 @@ static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x000
#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
#define REG_A3XX_GRAS_CL_USER_PLANE(i0) (0x00000ca0 + 0x4*(i0))
static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
@ -3228,7 +3245,8 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
{
return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
assert(!(val & 0xfff));
return (((val >> 12)) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
}
#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
@ -3240,8 +3258,11 @@ static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
{
return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
assert(!(val & 0xfff));
return (((val >> 12)) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
}
#ifdef __cplusplus
#endif
#endif /* A3XX_XML */

View file

@ -134,6 +134,13 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* Set up AOOO: */
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
} else if (adreno_is_a305b(adreno_gpu)) {
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x00181818);
gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x00181818);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000018);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000018);
gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303);
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
} else if (adreno_is_a306(adreno_gpu)) {
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
@ -230,7 +237,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
if (adreno_is_a306(adreno_gpu))
if (adreno_is_a305b(adreno_gpu) || adreno_is_a306(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
else if (adreno_is_a320(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
@ -333,7 +340,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
} else if (adreno_is_a330(adreno_gpu)) {
} else if (adreno_is_a330(adreno_gpu) || adreno_is_a305b(adreno_gpu)) {
/* NOTE: this (value take from downstream android driver)
* includes some bits outside of the known bitfields. But
* A330 has this "MERCIU queue" thing too, which might
@ -559,7 +566,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
goto fail;
/* if needed, allocate gmem: */
if (adreno_is_a330(adreno_gpu)) {
if (adreno_is_a330(adreno_gpu) || adreno_is_a305b(adreno_gpu)) {
ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
adreno_gpu, &a3xx_gpu->ocmem);
if (ret)

Some files were not shown because too many files have changed in this diff Show more