2016-04-25 18:28:56 -04:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Ke Yu
|
|
|
|
* Zhiyuan Lv <zhiyuan.lv@intel.com>
|
|
|
|
*
|
|
|
|
* Contributors:
|
|
|
|
* Terrence Xu <terrence.xu@intel.com>
|
|
|
|
* Changbin Du <changbin.du@intel.com>
|
|
|
|
* Bing Niu <bing.niu@intel.com>
|
|
|
|
* Zhi Wang <zhi.a.wang@intel.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2024-09-30 16:53:42 +03:00
|
|
|
#include <drm/display/drm_dp.h>
|
|
|
|
|
2016-04-25 18:28:56 -04:00
|
|
|
#include "i915_drv.h"
|
2022-01-27 15:43:34 -08:00
|
|
|
#include "i915_reg.h"
|
2025-06-06 13:22:56 +03:00
|
|
|
#include "display/intel_display_regs.h"
|
2016-10-20 17:15:03 +08:00
|
|
|
#include "gvt.h"
|
2016-04-25 18:28:56 -04:00
|
|
|
|
2024-04-17 18:12:32 +03:00
|
|
|
#include "display/bxt_dpio_phy_regs.h"
|
2024-05-16 16:56:18 +03:00
|
|
|
#include "display/i9xx_plane_regs.h"
|
2024-11-07 18:11:19 +02:00
|
|
|
#include "display/intel_crt_regs.h"
|
2024-05-10 18:23:16 +03:00
|
|
|
#include "display/intel_cursor_regs.h"
|
2023-01-16 18:46:44 +02:00
|
|
|
#include "display/intel_display.h"
|
2025-05-22 12:48:43 +03:00
|
|
|
#include "display/intel_display_core.h"
|
2022-11-02 12:08:14 +02:00
|
|
|
#include "display/intel_dpio_phy.h"
|
2024-04-26 13:51:37 +03:00
|
|
|
#include "display/intel_sprite_regs.h"
|
2022-11-02 12:08:14 +02:00
|
|
|
|
2016-04-25 18:28:56 -04:00
|
|
|
static int get_edp_pipe(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
|
|
|
|
int pipe = -1;
|
|
|
|
|
|
|
|
switch (data & TRANS_DDI_EDP_INPUT_MASK) {
|
|
|
|
case TRANS_DDI_EDP_INPUT_A_ON:
|
|
|
|
case TRANS_DDI_EDP_INPUT_A_ONOFF:
|
|
|
|
pipe = PIPE_A;
|
|
|
|
break;
|
|
|
|
case TRANS_DDI_EDP_INPUT_B_ONOFF:
|
|
|
|
pipe = PIPE_B;
|
|
|
|
break;
|
|
|
|
case TRANS_DDI_EDP_INPUT_C_ONOFF:
|
|
|
|
pipe = PIPE_C;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return pipe;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
|
|
|
|
{
|
2020-03-06 10:08:10 +08:00
|
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
2025-05-07 18:22:54 +03:00
|
|
|
struct intel_display *display = dev_priv->display;
|
2016-04-25 18:28:56 -04:00
|
|
|
|
2024-11-20 14:03:32 +02:00
|
|
|
if (!(vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
|
2016-04-25 18:28:56 -04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-11-23 16:26:32 +08:00
|
|
|
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
|
2016-04-25 18:28:56 -04:00
|
|
|
{
|
2020-03-06 10:08:10 +08:00
|
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
2025-05-07 18:22:54 +03:00
|
|
|
struct intel_display *display = dev_priv->display;
|
2016-04-25 18:28:56 -04:00
|
|
|
|
drm/i915/gvt: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is
readily available.
The conversion was done automatically with below coccinelle semantic
patch. checkpatch errors/warnings are fixed manually.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Acked-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200220165507.16823-8-pankaj.laxminarayan.bharadiya@intel.com
2020-02-20 22:25:06 +05:30
|
|
|
if (drm_WARN_ON(&dev_priv->drm,
|
|
|
|
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
|
2016-04-25 18:28:56 -04:00
|
|
|
return -EINVAL;
|
|
|
|
|
2024-11-20 14:03:32 +02:00
|
|
|
if (vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) & TRANSCONF_ENABLE)
|
2016-04-25 18:28:56 -04:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (edp_pipe_is_enabled(vgpu) &&
|
|
|
|
get_edp_pipe(vgpu) == pipe)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-01-07 20:22:40 +02:00
|
|
|
static const unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
|
2017-02-24 10:58:20 +08:00
|
|
|
{
|
|
|
|
/* EDID with 1024x768 as its resolution */
|
|
|
|
/*Header*/
|
|
|
|
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
|
|
|
|
/* Vendor & Product Identification */
|
|
|
|
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
|
|
|
|
/* Version & Revision */
|
|
|
|
0x01, 0x04,
|
|
|
|
/* Basic Display Parameters & Features */
|
|
|
|
0xa5, 0x34, 0x20, 0x78, 0x23,
|
|
|
|
/* Color Characteristics */
|
|
|
|
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
|
|
|
|
/* Established Timings: maximum resolution is 1024x768 */
|
|
|
|
0x21, 0x08, 0x00,
|
|
|
|
/* Standard Timings. All invalid */
|
|
|
|
0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
|
|
|
|
0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
|
|
|
|
/* 18 Byte Data Blocks 1: invalid */
|
|
|
|
0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
|
|
|
|
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
|
|
|
|
/* 18 Byte Data Blocks 2: invalid */
|
|
|
|
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
|
|
|
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
|
|
/* 18 Byte Data Blocks 3: invalid */
|
|
|
|
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
|
|
|
|
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
|
|
|
|
/* 18 Byte Data Blocks 4: invalid */
|
|
|
|
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
|
|
|
|
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
|
|
|
|
/* Extension Block Count */
|
|
|
|
0x00,
|
|
|
|
/* Checksum */
|
|
|
|
0xef,
|
|
|
|
},
|
|
|
|
{
|
2017-01-13 10:17:02 +08:00
|
|
|
/* EDID with 1920x1200 as its resolution */
|
2017-02-24 10:58:20 +08:00
|
|
|
/*Header*/
|
|
|
|
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
|
|
|
|
/* Vendor & Product Identification */
|
|
|
|
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
|
|
|
|
/* Version & Revision */
|
|
|
|
0x01, 0x04,
|
|
|
|
/* Basic Display Parameters & Features */
|
|
|
|
0xa5, 0x34, 0x20, 0x78, 0x23,
|
|
|
|
/* Color Characteristics */
|
|
|
|
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
|
|
|
|
/* Established Timings: maximum resolution is 1024x768 */
|
|
|
|
0x21, 0x08, 0x00,
|
|
|
|
/*
|
|
|
|
* Standard Timings.
|
|
|
|
* below new resolutions can be supported:
|
|
|
|
* 1920x1080, 1280x720, 1280x960, 1280x1024,
|
|
|
|
* 1440x900, 1600x1200, 1680x1050
|
|
|
|
*/
|
|
|
|
0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
|
|
|
|
0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
|
|
|
|
/* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
|
|
|
|
0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
|
|
|
|
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
|
|
|
|
/* 18 Byte Data Blocks 2: invalid */
|
|
|
|
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
|
|
|
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
|
|
/* 18 Byte Data Blocks 3: invalid */
|
|
|
|
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
|
|
|
|
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
|
|
|
|
/* 18 Byte Data Blocks 4: invalid */
|
|
|
|
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
|
|
|
|
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
|
|
|
|
/* Extension Block Count */
|
|
|
|
0x00,
|
|
|
|
/* Checksum */
|
|
|
|
0x45,
|
|
|
|
},
|
2016-04-25 18:28:56 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
#define DPCD_HEADER_SIZE 0xb
|
|
|
|
|
2017-03-20 23:54:39 +08:00
|
|
|
/* let the virtual display supports DP1.2 */
|
2016-10-20 14:08:47 +08:00
|
|
|
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
|
2020-09-21 02:58:07 -04:00
|
|
|
0x12, 0x014, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
2016-04-25 18:28:56 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
|
|
|
{
|
2020-03-06 10:08:10 +08:00
|
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
2025-05-07 18:22:54 +03:00
|
|
|
struct intel_display *display = dev_priv->display;
|
2018-03-28 05:30:14 +08:00
|
|
|
int pipe;
|
|
|
|
|
2018-06-11 15:39:37 +08:00
|
|
|
if (IS_BROXTON(dev_priv)) {
|
2020-11-09 15:39:22 +08:00
|
|
|
enum transcoder trans;
|
|
|
|
enum port port;
|
|
|
|
|
|
|
|
/* Clear PIPE, DDI, PHY, HPD before setting new */
|
2020-10-28 23:33:11 +02:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
|
2020-10-28 23:33:12 +02:00
|
|
|
~(GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) |
|
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) |
|
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
|
2018-06-11 15:39:37 +08:00
|
|
|
|
2024-11-20 14:03:32 +02:00
|
|
|
for_each_pipe(display, pipe) {
|
|
|
|
vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) &=
|
2023-02-14 00:52:50 +02:00
|
|
|
~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE);
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE;
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK;
|
|
|
|
vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE;
|
2020-11-09 15:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) {
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, trans)) &=
|
2020-11-09 15:39:22 +08:00
|
|
|
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
|
|
|
TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE);
|
|
|
|
}
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
|
2020-11-09 15:39:22 +08:00
|
|
|
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
|
|
|
TRANS_DDI_PORT_MASK);
|
|
|
|
|
|
|
|
for (port = PORT_A; port <= PORT_C; port++) {
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &=
|
|
|
|
~BXT_PHY_LANE_ENABLED;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |=
|
|
|
|
(BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
|
|
|
BXT_PHY_LANE_POWERDOWN_ACK);
|
|
|
|
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &=
|
|
|
|
~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
|
|
|
|
PORT_PLL_REF_SEL | PORT_PLL_LOCK |
|
|
|
|
PORT_PLL_ENABLE);
|
|
|
|
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &=
|
|
|
|
~(DDI_INIT_DISPLAY_DETECTED |
|
|
|
|
DDI_BUF_CTL_ENABLE);
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
|
|
|
|
}
|
2020-12-01 14:03:29 +08:00
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
|
|
|
~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
|
|
|
~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
|
|
|
~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
|
|
|
|
/* No hpd_invert set in vgpu vbt, need to clear invert mask */
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
|
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
|
2020-11-09 15:39:22 +08:00
|
|
|
|
|
|
|
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
|
|
|
|
~PHY_POWER_GOOD;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
|
|
|
|
~PHY_POWER_GOOD;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30);
|
|
|
|
|
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED;
|
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only 1 PIPE enabled in current vGPU display and PIPE_A is
|
|
|
|
* tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
|
|
|
|
* TRANSCODER_A can be enabled. PORT_x depends on the input of
|
|
|
|
* setup_virtual_dp_monitor.
|
|
|
|
*/
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE;
|
|
|
|
vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
|
2020-11-09 15:39:22 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Golden M/N are calculated based on:
|
|
|
|
* 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
|
|
|
|
* DP link clk 1620 MHz and non-constant_n.
|
|
|
|
* TODO: calculate DP link symbol clk and stream clk m/n.
|
|
|
|
*/
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64);
|
|
|
|
vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e;
|
|
|
|
vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000;
|
|
|
|
vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e;
|
|
|
|
vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000;
|
2018-06-11 15:39:37 +08:00
|
|
|
|
2020-11-09 15:39:22 +08:00
|
|
|
/* Enable per-DDI/PORT vreg */
|
2018-06-11 15:39:37 +08:00
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
|
|
|
|
PHY_POWER_GOOD;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |=
|
|
|
|
BIT(30);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
|
|
|
|
BXT_PHY_LANE_ENABLED;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
|
|
|
|
~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
|
|
|
BXT_PHY_LANE_POWERDOWN_ACK);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |=
|
|
|
|
(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
|
|
|
|
PORT_PLL_REF_SEL | PORT_PLL_LOCK |
|
|
|
|
PORT_PLL_ENABLE);
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |=
|
|
|
|
(DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED);
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &=
|
|
|
|
~DDI_BUF_IS_IDLE;
|
2024-06-04 18:26:17 +03:00
|
|
|
vgpu_vreg_t(vgpu,
|
2024-11-20 14:03:32 +02:00
|
|
|
TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP)) |=
|
2020-11-09 15:39:22 +08:00
|
|
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
|
|
|
TRANS_DDI_FUNC_ENABLE);
|
2020-12-01 14:03:29 +08:00
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
|
|
|
PORTA_HOTPLUG_ENABLE;
|
2018-06-11 15:39:37 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
2020-10-28 23:33:12 +02:00
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
2018-06-11 15:39:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
|
|
|
|
PHY_POWER_GOOD;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |=
|
|
|
|
BIT(30);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
|
|
|
|
BXT_PHY_LANE_ENABLED;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
|
|
|
|
~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
|
|
|
BXT_PHY_LANE_POWERDOWN_ACK);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |=
|
|
|
|
(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
|
|
|
|
PORT_PLL_REF_SEL | PORT_PLL_LOCK |
|
|
|
|
PORT_PLL_ENABLE);
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |=
|
|
|
|
DDI_BUF_CTL_ENABLE;
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &=
|
|
|
|
~DDI_BUF_IS_IDLE;
|
2024-06-04 18:26:17 +03:00
|
|
|
vgpu_vreg_t(vgpu,
|
2024-11-20 14:03:32 +02:00
|
|
|
TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
|
2020-11-09 15:39:22 +08:00
|
|
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
|
|
|
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
|
|
|
TRANS_DDI_FUNC_ENABLE);
|
2020-12-01 14:03:29 +08:00
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
|
|
|
PORTB_HOTPLUG_ENABLE;
|
2018-06-11 15:39:37 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
2020-10-28 23:33:12 +02:00
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
2018-06-11 15:39:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
|
|
|
|
PHY_POWER_GOOD;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |=
|
|
|
|
BIT(30);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
|
|
|
|
BXT_PHY_LANE_ENABLED;
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
|
|
|
|
~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
|
|
|
BXT_PHY_LANE_POWERDOWN_ACK);
|
|
|
|
vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |=
|
|
|
|
(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
|
|
|
|
PORT_PLL_REF_SEL | PORT_PLL_LOCK |
|
|
|
|
PORT_PLL_ENABLE);
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |=
|
|
|
|
DDI_BUF_CTL_ENABLE;
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &=
|
|
|
|
~DDI_BUF_IS_IDLE;
|
2024-06-04 18:26:17 +03:00
|
|
|
vgpu_vreg_t(vgpu,
|
2024-11-20 14:03:32 +02:00
|
|
|
TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
|
2020-11-09 15:39:22 +08:00
|
|
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
|
|
|
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
|
|
|
TRANS_DDI_FUNC_ENABLE);
|
2020-12-01 14:03:29 +08:00
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
|
|
|
PORTC_HOTPLUG_ENABLE;
|
2018-06-11 15:39:37 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
2020-10-28 23:33:12 +02:00
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
2018-06-11 15:39:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
|
2016-04-25 18:28:56 -04:00
|
|
|
SDE_PORTC_HOTPLUG_CPT |
|
|
|
|
SDE_PORTD_HOTPLUG_CPT);
|
|
|
|
|
2020-06-02 15:05:40 +01:00
|
|
|
if (IS_SKYLAKE(dev_priv) ||
|
|
|
|
IS_KABYLAKE(dev_priv) ||
|
|
|
|
IS_COFFEELAKE(dev_priv) ||
|
|
|
|
IS_COMETLAKE(dev_priv)) {
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
|
2016-04-25 18:28:56 -04:00
|
|
|
SDE_PORTE_HOTPLUG_SPT);
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
|
2017-03-17 09:38:57 +08:00
|
|
|
SKL_FUSE_DOWNLOAD_STATUS |
|
2017-07-11 23:42:35 +03:00
|
|
|
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
|
|
|
|
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
|
|
|
|
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
|
2020-05-08 14:05:06 +08:00
|
|
|
/*
|
|
|
|
* Only 1 PIPE enabled in current vGPU display and PIPE_A is
|
|
|
|
* tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
|
|
|
|
* TRANSCODER_A can be enabled. PORT_x depends on the input of
|
|
|
|
* setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x
|
|
|
|
* so we fixed to DPLL0 here.
|
|
|
|
* Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode
|
|
|
|
*/
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL1) =
|
|
|
|
DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0);
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL1) |=
|
|
|
|
DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0);
|
|
|
|
vgpu_vreg_t(vgpu, LCPLL1_CTL) =
|
|
|
|
LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK;
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0);
|
|
|
|
/*
|
|
|
|
* Golden M/N are calculated based on:
|
|
|
|
* 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
|
|
|
|
* DP link clk 1620 MHz and non-constant_n.
|
|
|
|
* TODO: calculate DP link symbol clk and stream clk m/n.
|
|
|
|
*/
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64);
|
|
|
|
vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e;
|
|
|
|
vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000;
|
|
|
|
vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e;
|
|
|
|
vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000;
|
2017-03-17 09:38:57 +08:00
|
|
|
}
|
2016-04-25 18:28:56 -04:00
|
|
|
|
2017-02-28 11:39:48 -05:00
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
2020-05-08 14:05:06 +08:00
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
|
|
|
|
~DPLL_CTRL2_DDI_CLK_OFF(PORT_B);
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
|
|
|
|
DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B);
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
|
|
|
|
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
|
2017-04-07 16:50:16 +08:00
|
|
|
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
|
|
|
TRANS_DDI_PORT_MASK);
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
|
2020-03-17 17:36:15 +08:00
|
|
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
2017-04-07 16:50:16 +08:00
|
|
|
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
|
|
|
TRANS_DDI_FUNC_ENABLE);
|
2017-06-20 11:37:22 +08:00
|
|
|
if (IS_BROADWELL(dev_priv)) {
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &=
|
2017-06-20 11:37:22 +08:00
|
|
|
~PORT_CLK_SEL_MASK;
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |=
|
2017-06-20 11:37:22 +08:00
|
|
|
PORT_CLK_SEL_LCPLL_810;
|
|
|
|
}
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
|
|
|
|
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
2017-02-28 11:39:48 -05:00
|
|
|
}
|
2016-04-25 18:28:56 -04:00
|
|
|
|
2017-02-28 11:39:48 -05:00
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
2020-05-08 14:05:06 +08:00
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
|
|
|
|
~DPLL_CTRL2_DDI_CLK_OFF(PORT_C);
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
|
|
|
|
DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C);
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
|
|
|
|
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
|
2017-04-07 16:50:16 +08:00
|
|
|
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
|
|
|
TRANS_DDI_PORT_MASK);
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
|
2020-03-17 17:36:15 +08:00
|
|
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
2017-04-07 16:50:16 +08:00
|
|
|
(PORT_C << TRANS_DDI_PORT_SHIFT) |
|
|
|
|
TRANS_DDI_FUNC_ENABLE);
|
2017-06-20 11:37:22 +08:00
|
|
|
if (IS_BROADWELL(dev_priv)) {
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &=
|
2017-06-20 11:37:22 +08:00
|
|
|
~PORT_CLK_SEL_MASK;
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |=
|
2017-06-20 11:37:22 +08:00
|
|
|
PORT_CLK_SEL_LCPLL_810;
|
|
|
|
}
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
|
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
|
2017-02-28 11:39:48 -05:00
|
|
|
}
|
2016-04-25 18:28:56 -04:00
|
|
|
|
2017-02-28 11:39:48 -05:00
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
|
2020-05-08 14:05:06 +08:00
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
|
|
|
|
~DPLL_CTRL2_DDI_CLK_OFF(PORT_D);
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
|
|
|
|
DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D);
|
|
|
|
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
|
|
|
|
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
|
2017-04-07 16:50:16 +08:00
|
|
|
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
|
|
|
TRANS_DDI_PORT_MASK);
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
|
2020-03-17 17:36:15 +08:00
|
|
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
2017-04-07 16:50:16 +08:00
|
|
|
(PORT_D << TRANS_DDI_PORT_SHIFT) |
|
|
|
|
TRANS_DDI_FUNC_ENABLE);
|
2017-06-20 11:37:22 +08:00
|
|
|
if (IS_BROADWELL(dev_priv)) {
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &=
|
2017-06-20 11:37:22 +08:00
|
|
|
~PORT_CLK_SEL_MASK;
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |=
|
2017-06-20 11:37:22 +08:00
|
|
|
PORT_CLK_SEL_LCPLL_810;
|
|
|
|
}
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
|
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
|
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
2017-02-28 11:39:48 -05:00
|
|
|
}
|
2016-04-25 18:28:56 -04:00
|
|
|
|
2020-06-02 15:05:40 +01:00
|
|
|
if ((IS_SKYLAKE(dev_priv) ||
|
|
|
|
IS_KABYLAKE(dev_priv) ||
|
|
|
|
IS_COFFEELAKE(dev_priv) ||
|
|
|
|
IS_COMETLAKE(dev_priv)) &&
|
2016-04-25 18:28:56 -04:00
|
|
|
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
|
2016-04-25 18:28:56 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
|
|
|
if (IS_BROADWELL(dev_priv))
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
2020-10-28 23:33:12 +02:00
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
2016-04-25 18:28:56 -04:00
|
|
|
else
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
|
2017-02-28 11:39:48 -05:00
|
|
|
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
|
2016-04-25 18:28:56 -04:00
|
|
|
}
|
2017-06-28 02:03:16 +08:00
|
|
|
|
|
|
|
/* Clear host CRT status, so guest couldn't detect this host CRT. */
|
|
|
|
if (IS_BROADWELL(dev_priv))
|
2017-12-19 13:02:51 +08:00
|
|
|
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
|
2017-12-05 14:45:32 +08:00
|
|
|
|
2018-03-28 05:30:14 +08:00
|
|
|
/* Disable Primary/Sprite/Cursor plane */
|
2024-11-20 14:03:32 +02:00
|
|
|
for_each_pipe(display, pipe) {
|
|
|
|
vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE;
|
2018-03-28 05:30:14 +08:00
|
|
|
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK;
|
|
|
|
vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE;
|
2018-03-28 05:30:14 +08:00
|
|
|
}
|
|
|
|
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE;
|
2016-04-25 18:28:56 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
|
|
|
|
{
|
|
|
|
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
|
|
|
|
|
|
|
|
kfree(port->edid);
|
|
|
|
port->edid = NULL;
|
|
|
|
|
|
|
|
kfree(port->dpcd);
|
|
|
|
port->dpcd = NULL;
|
|
|
|
}
|
|
|
|
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
|
|
|
|
{
|
|
|
|
struct intel_vgpu_vblank_timer *vblank_timer;
|
|
|
|
struct intel_vgpu *vgpu;
|
|
|
|
|
|
|
|
vblank_timer = container_of(data, struct intel_vgpu_vblank_timer, timer);
|
|
|
|
vgpu = container_of(vblank_timer, struct intel_vgpu, vblank_timer);
|
|
|
|
|
|
|
|
/* Set vblank emulation request per-vGPU bit */
|
|
|
|
intel_gvt_request_service(vgpu->gvt,
|
|
|
|
INTEL_GVT_REQUEST_EMULATE_VBLANK + vgpu->id);
|
|
|
|
hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
|
|
|
|
return HRTIMER_RESTART;
|
|
|
|
}
|
|
|
|
|
2016-04-25 18:28:56 -04:00
|
|
|
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
2017-02-24 10:58:21 +08:00
|
|
|
int type, unsigned int resolution)
|
2016-04-25 18:28:56 -04:00
|
|
|
{
|
2020-03-06 10:08:10 +08:00
|
|
|
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
2016-04-25 18:28:56 -04:00
|
|
|
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer;
|
2016-04-25 18:28:56 -04:00
|
|
|
|
2020-02-20 22:25:07 +05:30
|
|
|
if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
|
2017-02-24 10:58:21 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-04-25 18:28:56 -04:00
|
|
|
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
|
|
|
|
if (!port->edid)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
|
|
|
|
if (!port->dpcd) {
|
|
|
|
kfree(port->edid);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-02-24 10:58:21 +08:00
|
|
|
memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
|
2016-04-25 18:28:56 -04:00
|
|
|
EDID_SIZE);
|
|
|
|
port->edid->data_valid = true;
|
|
|
|
|
|
|
|
memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
|
|
|
|
port->dpcd->data_valid = true;
|
2024-09-30 16:53:42 +03:00
|
|
|
port->dpcd->data[DP_SINK_COUNT] = 0x1;
|
2016-04-25 18:28:56 -04:00
|
|
|
port->type = type;
|
2019-01-30 18:25:52 +08:00
|
|
|
port->id = resolution;
|
2021-02-26 12:45:59 +08:00
|
|
|
port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC;
|
|
|
|
vgpu->display.port_num = port_num;
|
2016-04-25 18:28:56 -04:00
|
|
|
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
/* Init hrtimer based on default refresh rate */
|
2025-02-05 11:46:20 +01:00
|
|
|
hrtimer_setup(&vblank_timer->timer, vblank_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
vblank_timer->vrefresh_k = port->vrefresh_k;
|
|
|
|
vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
|
2016-04-25 18:28:56 -04:00
|
|
|
|
|
|
|
emulate_monitor_status_change(vgpu);
|
2017-12-05 14:45:32 +08:00
|
|
|
|
2016-04-25 18:28:56 -04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
* vgpu_update_vblank_emulation - Update per-vGPU vblank_timer
|
|
|
|
* @vgpu: vGPU operated
|
|
|
|
* @turnon: Turn ON/OFF vblank_timer
|
2016-04-25 18:28:56 -04:00
|
|
|
*
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
* This function is used to turn on/off or update the per-vGPU vblank_timer
|
2023-02-14 00:52:50 +02:00
|
|
|
* when TRANSCONF is enabled or disabled. vblank_timer period is also updated
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
* if guest changed the refresh rate.
|
2016-04-25 18:28:56 -04:00
|
|
|
*
|
|
|
|
*/
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
|
2016-04-25 18:28:56 -04:00
|
|
|
{
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer;
|
|
|
|
struct intel_vgpu_port *port =
|
|
|
|
intel_vgpu_port(vgpu, vgpu->display.port_num);
|
|
|
|
|
|
|
|
if (turnon) {
|
|
|
|
/*
|
|
|
|
* Skip the re-enable if already active and vrefresh unchanged.
|
|
|
|
* Otherwise, stop timer if already active and restart with new
|
|
|
|
* period.
|
|
|
|
*/
|
|
|
|
if (vblank_timer->vrefresh_k != port->vrefresh_k ||
|
|
|
|
!hrtimer_active(&vblank_timer->timer)) {
|
|
|
|
/* Stop timer before start with new period if active */
|
|
|
|
if (hrtimer_active(&vblank_timer->timer))
|
|
|
|
hrtimer_cancel(&vblank_timer->timer);
|
|
|
|
|
|
|
|
/* Make sure new refresh rate updated to timer period */
|
|
|
|
vblank_timer->vrefresh_k = port->vrefresh_k;
|
|
|
|
vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
|
|
|
|
hrtimer_start(&vblank_timer->timer,
|
|
|
|
ktime_add_ns(ktime_get(), vblank_timer->period),
|
|
|
|
HRTIMER_MODE_ABS);
|
2016-04-25 18:28:56 -04:00
|
|
|
}
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
} else {
|
|
|
|
/* Caller request to stop vblank */
|
|
|
|
hrtimer_cancel(&vblank_timer->timer);
|
2016-04-25 18:28:56 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
|
|
|
{
|
2020-03-06 10:08:10 +08:00
|
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
2025-05-07 18:22:54 +03:00
|
|
|
struct intel_display *display = dev_priv->display;
|
2016-04-25 18:28:56 -04:00
|
|
|
struct intel_vgpu_irq *irq = &vgpu->irq;
|
|
|
|
int vblank_event[] = {
|
|
|
|
[PIPE_A] = PIPE_A_VBLANK,
|
|
|
|
[PIPE_B] = PIPE_B_VBLANK,
|
|
|
|
[PIPE_C] = PIPE_C_VBLANK,
|
|
|
|
};
|
|
|
|
int event;
|
|
|
|
|
|
|
|
if (pipe < PIPE_A || pipe > PIPE_C)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_set_bit(event, irq->flip_done_event[pipe],
|
|
|
|
INTEL_GVT_EVENT_MAX) {
|
|
|
|
clear_bit(event, irq->flip_done_event[pipe]);
|
|
|
|
if (!pipe_is_enabled(vgpu, pipe))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
intel_vgpu_trigger_virtual_event(vgpu, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pipe_is_enabled(vgpu, pipe)) {
|
2024-11-20 14:03:32 +02:00
|
|
|
vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(display, pipe))++;
|
2016-04-25 18:28:56 -04:00
|
|
|
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
|
2016-04-25 18:28:56 -04:00
|
|
|
{
|
2024-11-20 14:03:32 +02:00
|
|
|
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
2025-05-07 18:22:54 +03:00
|
|
|
struct intel_display *display = i915->display;
|
2016-04-25 18:28:56 -04:00
|
|
|
int pipe;
|
|
|
|
|
drm/i915/gvt: Use vgpu_lock to protect per vgpu access
The patch set splits out 2 small locks from the original big gvt lock:
- vgpu_lock protects per-vGPU data and logic, especially the vGPU
trap emulation path.
- sched_lock protects gvt scheudler structure, context schedule logic
and vGPU's schedule data.
Use vgpu_lock to replace the gvt big lock. By doing this, the
mmio read/write trap path, vgpu virtual event emulation and other
vgpu related process, would be protected under per vgpu_lock.
v9:
- Change commit author since the patches are improved a lot compared
with original version.
Original author: Pei Zhang <pei.zhang@intel.com>
- Rebase to latest gvt-staging.
v8:
- Correct coding and comment style.
- Rebase to latest gvt-staging.
v7:
- Remove gtt_lock since already proteced by gvt_lock and vgpu_lock.
- Fix a typo in intel_gvt_deactivate_vgpu, unlock the wrong lock.
v6:
- Rebase to latest gvt-staging.
v5:
- Rebase to latest gvt-staging.
- intel_vgpu_page_track_handler should use vgpu_lock.
v4:
- Rebase to latest gvt-staging.
- Protect vgpu->active access with vgpu_lock.
- Do not wait gpu idle in vgpu_lock.
v3: update to latest code base
v2: add gvt->lock in function gvt_check_vblank_emulation
Performance comparison on Kabylake platform.
- Configuration:
Host: Ubuntu 16.04.
Guest 1 & 2: Ubuntu 16.04.
glmark2 score comparison:
- Configuration:
Host: glxgears.
Guests: glmark2.
+--------------------------------+-----------------+
| Setup | glmark2 score |
+--------------------------------+-----------------+
| unified lock, iommu=on | 58~62 (avg. 60) |
+--------------------------------+-----------------+
| unified lock, iommu=igfx_off | 57~61 (avg. 59) |
+--------------------------------+-----------------+
| per-logic lock, iommu=on | 60~68 (avg. 64) |
+--------------------------------+-----------------+
| per-logic lock, iommu=igfx_off | 61~67 (avg. 64) |
+--------------------------------+-----------------+
lock_stat comparison:
- Configuration:
Stop lock stat immediately after boot up.
Boot 2 VM Guests.
Run glmark2 in guests.
Start perf lock_stat for 20 seconds and stop again.
- Legend: c - contentions; w - waittime-avg
+------------+-----------------+-----------+---------------+------------+
| | gvt_lock |sched_lock | vgpu_lock | gtt_lock |
+ lock type; +-----------------+-----------+---------------+------------+
| iommu set | c | w | c | w | c | w | c | w |
+------------+-------+---------+----+------+------+--------+-----+------+
| unified; | 20697 | 839 |N/A | N/A | N/A | N/A | N/A | N/A |
| on | | | | | | | | |
+------------+-------+---------+----+------+------+--------+-----+------+
| unified; | 21838 | 658.15 |N/A | N/A | N/A | N/A | N/A | N/A |
| igfx_off | | | | | | | | |
+------------+-------+---------+----+------+------+--------+-----+------+
| per-logic; | 1553 | 1599.96 |9458|429.97| 5846 | 274.33 | 0 | 0.00 |
| on | | | | | | | | |
+------------+-------+---------+----+------+------+--------+-----+------+
| per-logic; | 1911 | 1678.32 |8335|445.16| 5451 | 244.80 | 0 | 0.00 |
| igfx_off | | | | | | | | |
+------------+-------+---------+----+------+------+--------+-----+------+
Signed-off-by: Pei Zhang <pei.zhang@intel.com>
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2018-05-19 12:28:54 +08:00
|
|
|
mutex_lock(&vgpu->vgpu_lock);
|
2024-11-20 14:03:32 +02:00
|
|
|
for_each_pipe(display, pipe)
|
2016-04-25 18:28:56 -04:00
|
|
|
emulate_vblank_on_pipe(vgpu, pipe);
|
drm/i915/gvt: Use vgpu_lock to protect per vgpu access
The patch set splits out 2 small locks from the original big gvt lock:
- vgpu_lock protects per-vGPU data and logic, especially the vGPU
trap emulation path.
- sched_lock protects gvt scheudler structure, context schedule logic
and vGPU's schedule data.
Use vgpu_lock to replace the gvt big lock. By doing this, the
mmio read/write trap path, vgpu virtual event emulation and other
vgpu related process, would be protected under per vgpu_lock.
v9:
- Change commit author since the patches are improved a lot compared
with original version.
Original author: Pei Zhang <pei.zhang@intel.com>
- Rebase to latest gvt-staging.
v8:
- Correct coding and comment style.
- Rebase to latest gvt-staging.
v7:
- Remove gtt_lock since already proteced by gvt_lock and vgpu_lock.
- Fix a typo in intel_gvt_deactivate_vgpu, unlock the wrong lock.
v6:
- Rebase to latest gvt-staging.
v5:
- Rebase to latest gvt-staging.
- intel_vgpu_page_track_handler should use vgpu_lock.
v4:
- Rebase to latest gvt-staging.
- Protect vgpu->active access with vgpu_lock.
- Do not wait gpu idle in vgpu_lock.
v3: update to latest code base
v2: add gvt->lock in function gvt_check_vblank_emulation
Performance comparison on Kabylake platform.
- Configuration:
Host: Ubuntu 16.04.
Guest 1 & 2: Ubuntu 16.04.
glmark2 score comparison:
- Configuration:
Host: glxgears.
Guests: glmark2.
+--------------------------------+-----------------+
| Setup | glmark2 score |
+--------------------------------+-----------------+
| unified lock, iommu=on | 58~62 (avg. 60) |
+--------------------------------+-----------------+
| unified lock, iommu=igfx_off | 57~61 (avg. 59) |
+--------------------------------+-----------------+
| per-logic lock, iommu=on | 60~68 (avg. 64) |
+--------------------------------+-----------------+
| per-logic lock, iommu=igfx_off | 61~67 (avg. 64) |
+--------------------------------+-----------------+
lock_stat comparison:
- Configuration:
Stop lock stat immediately after boot up.
Boot 2 VM Guests.
Run glmark2 in guests.
Start perf lock_stat for 20 seconds and stop again.
- Legend: c - contentions; w - waittime-avg
+------------+-----------------+-----------+---------------+------------+
| | gvt_lock |sched_lock | vgpu_lock | gtt_lock |
+ lock type; +-----------------+-----------+---------------+------------+
| iommu set | c | w | c | w | c | w | c | w |
+------------+-------+---------+----+------+------+--------+-----+------+
| unified; | 20697 | 839 |N/A | N/A | N/A | N/A | N/A | N/A |
| on | | | | | | | | |
+------------+-------+---------+----+------+------+--------+-----+------+
| unified; | 21838 | 658.15 |N/A | N/A | N/A | N/A | N/A | N/A |
| igfx_off | | | | | | | | |
+------------+-------+---------+----+------+------+--------+-----+------+
| per-logic; | 1553 | 1599.96 |9458|429.97| 5846 | 274.33 | 0 | 0.00 |
| on | | | | | | | | |
+------------+-------+---------+----+------+------+--------+-----+------+
| per-logic; | 1911 | 1678.32 |8335|445.16| 5451 | 244.80 | 0 | 0.00 |
| igfx_off | | | | | | | | |
+------------+-------+---------+----+------+------+--------+-----+------+
Signed-off-by: Pei Zhang <pei.zhang@intel.com>
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2018-05-19 12:28:54 +08:00
|
|
|
mutex_unlock(&vgpu->vgpu_lock);
|
2016-04-25 18:28:56 -04:00
|
|
|
}
|
|
|
|
|
2019-01-30 18:25:53 +08:00
|
|
|
/**
|
|
|
|
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
|
|
|
|
* @vgpu: a vGPU
|
2019-02-05 20:30:33 +00:00
|
|
|
* @connected: link state
|
2019-01-30 18:25:53 +08:00
|
|
|
*
|
|
|
|
* This function is used to trigger hotplug interrupt for vGPU
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
|
|
|
|
{
|
2020-03-06 10:08:10 +08:00
|
|
|
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
2019-01-30 18:25:53 +08:00
|
|
|
|
|
|
|
/* TODO: add more platforms support */
|
2020-06-02 15:05:40 +01:00
|
|
|
if (IS_SKYLAKE(i915) ||
|
|
|
|
IS_KABYLAKE(i915) ||
|
|
|
|
IS_COFFEELAKE(i915) ||
|
|
|
|
IS_COMETLAKE(i915)) {
|
2019-01-30 18:25:53 +08:00
|
|
|
if (connected) {
|
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
|
|
|
|
SFUSE_STRAP_DDID_DETECTED;
|
|
|
|
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
|
|
|
|
} else {
|
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
|
|
|
|
~SFUSE_STRAP_DDID_DETECTED;
|
|
|
|
vgpu_vreg_t(vgpu, SDEISR) &= ~SDE_PORTD_HOTPLUG_CPT;
|
|
|
|
}
|
|
|
|
vgpu_vreg_t(vgpu, SDEIIR) |= SDE_PORTD_HOTPLUG_CPT;
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
|
|
|
PORTD_HOTPLUG_STATUS_MASK;
|
|
|
|
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
|
2020-11-09 15:39:22 +08:00
|
|
|
} else if (IS_BROXTON(i915)) {
|
2020-12-01 14:03:29 +08:00
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
|
|
|
if (connected) {
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
2020-12-01 14:03:29 +08:00
|
|
|
} else {
|
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
|
|
|
|
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
2020-11-09 15:39:22 +08:00
|
|
|
}
|
2020-12-01 14:03:29 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
|
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
|
|
|
~PORTA_HOTPLUG_STATUS_MASK;
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
|
|
|
PORTA_HOTPLUG_LONG_DETECT;
|
|
|
|
intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
|
|
|
|
}
|
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
|
|
|
if (connected) {
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
|
2020-12-01 14:03:29 +08:00
|
|
|
SFUSE_STRAP_DDIB_DETECTED;
|
|
|
|
} else {
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
|
2020-12-01 14:03:29 +08:00
|
|
|
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
|
|
|
|
~SFUSE_STRAP_DDIB_DETECTED;
|
|
|
|
}
|
2020-12-01 14:03:29 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
|
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
|
|
|
~PORTB_HOTPLUG_STATUS_MASK;
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
|
|
|
PORTB_HOTPLUG_LONG_DETECT;
|
|
|
|
intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
|
|
|
|
}
|
|
|
|
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
|
|
|
if (connected) {
|
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
|
|
|
|
SFUSE_STRAP_DDIC_DETECTED;
|
|
|
|
} else {
|
2020-11-09 15:39:22 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
|
|
|
|
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
2020-12-01 14:03:29 +08:00
|
|
|
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
|
|
|
|
~SFUSE_STRAP_DDIC_DETECTED;
|
2020-11-09 15:39:22 +08:00
|
|
|
}
|
2020-12-01 14:03:29 +08:00
|
|
|
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
|
|
|
|
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
|
|
|
~PORTC_HOTPLUG_STATUS_MASK;
|
|
|
|
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
|
|
|
PORTC_HOTPLUG_LONG_DETECT;
|
|
|
|
intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
|
2020-11-09 15:39:22 +08:00
|
|
|
}
|
2019-01-30 18:25:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-25 18:28:56 -04:00
|
|
|
/**
|
|
|
|
* intel_vgpu_clean_display - clean vGPU virtual display emulation
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
*
|
|
|
|
* This function is used to clean vGPU virtual display emulation stuffs
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
|
|
|
|
{
|
2020-03-06 10:08:10 +08:00
|
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
2016-04-25 18:28:56 -04:00
|
|
|
|
2020-06-02 15:05:40 +01:00
|
|
|
if (IS_SKYLAKE(dev_priv) ||
|
|
|
|
IS_KABYLAKE(dev_priv) ||
|
|
|
|
IS_COFFEELAKE(dev_priv) ||
|
|
|
|
IS_COMETLAKE(dev_priv))
|
2016-04-25 18:28:56 -04:00
|
|
|
clean_virtual_dp_monitor(vgpu, PORT_D);
|
|
|
|
else
|
|
|
|
clean_virtual_dp_monitor(vgpu, PORT_B);
|
drm/i915/gvt: Refactor GVT vblank emulator for vGPU virtual display
Current vblank emulator uses single hrtimer at 16ms period for all vGPUs,
which introduces three major issues:
- 16ms matches the refresh rate at 62.5Hz (instead of 60Hz) which
doesn't follow standard timing. This leads to some frame drop or glitch
issue during video playback. SW expects a vsync interval of 16.667ms or
higher precision for an accurate 60Hz refresh rate. However current
vblank emulator only works at 16ms.
- Doesn't respect the fact that with current virtual EDID timing set,
not all resolutions are running at 60Hz. For example, current virtual
EDID also supports refresh rate at 56Hz, 59.97Hz, 60Hz, 75Hz, etc.
- Current vblank emulator use single hrtimer for all vGPUs. Regardsless
the possibility that different guests could run in different
resolutions, all vsync interrupts are injected at 16ms interval with
same hrtimer.
Based on previous patch which decode guest expected refresh rate from
vreg, the vblank emulator refactor patch makes following changes:
- Change the vblank emulator hrtimer from gvt global to per-vGPU.
By doing this, each vGPU display can operates at different refresh
rates. Currently only one dislay is supported for each vGPU so per-vGPU
hrtimer is enough. If multiple displays are supported per-vGPU in
future, we can expand to per-PIPE further.
- Change the fixed hrtimer period from 16ms to dynamic based on vreg.
GVT is expected to emulate the HW as close as possible. So reflacting
the accurate vsync interrupt interval is more correct than fixed 16ms.
- Change the vblank timer period and start the timer on PIPECONF change.
The initial period is updated to 16666667 based on 60Hz refresh rate.
According to PRM, PIPECONF controls the timing generator of the
connected display on this pipe, so it's safe to stop hrtimer on
PIPECONF disabling, and re-start hrtimer at new period on enabling.
Other changes including:
- Move vblank_timer_fn from irq.c into display.c.
- Clean per-vGPU vblank timer at clean_display instead of clean_irq.
To run quick test, launch a web browser and goto URL: www.displayhz.com
The actual refresh rate from guest can now always match guest settings.
V2:
Rebase to 5.11.
Remove unused intel_gvt_clean_irq().
Simplify enable logic in update_vblank_emulation(). (zhenyu)
Loop all vGPU by idr when check all vblank timer. (zhenyu)
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20210226044630.284269-1-colin.xu@intel.com
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2021-02-26 12:46:30 +08:00
|
|
|
|
|
|
|
vgpu_update_vblank_emulation(vgpu, false);
|
2016-04-25 18:28:56 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_init_display- initialize vGPU virtual display emulation
|
|
|
|
* @vgpu: a vGPU
|
2018-07-31 11:02:12 +08:00
|
|
|
* @resolution: resolution index for intel_vgpu_edid
|
2016-04-25 18:28:56 -04:00
|
|
|
*
|
|
|
|
* This function is used to initialize vGPU virtual display emulation stuffs
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative error code if failed.
|
|
|
|
*
|
|
|
|
*/
|
2017-02-24 10:58:21 +08:00
|
|
|
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
|
2016-04-25 18:28:56 -04:00
|
|
|
{
|
2020-03-06 10:08:10 +08:00
|
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
2016-04-25 18:28:56 -04:00
|
|
|
|
|
|
|
intel_vgpu_init_i2c_edid(vgpu);
|
|
|
|
|
2020-06-02 15:05:40 +01:00
|
|
|
if (IS_SKYLAKE(dev_priv) ||
|
|
|
|
IS_KABYLAKE(dev_priv) ||
|
|
|
|
IS_COFFEELAKE(dev_priv) ||
|
|
|
|
IS_COMETLAKE(dev_priv))
|
2017-02-24 10:58:21 +08:00
|
|
|
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
|
|
|
|
resolution);
|
2016-04-25 18:28:56 -04:00
|
|
|
else
|
2017-02-24 10:58:21 +08:00
|
|
|
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
|
|
|
|
resolution);
|
2016-04-25 18:28:56 -04:00
|
|
|
}
|
2017-02-14 14:50:18 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_reset_display- reset vGPU virtual display emulation
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
*
|
|
|
|
* This function is used to reset vGPU virtual display emulation stuffs
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void intel_vgpu_reset_display(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
emulate_monitor_status_change(vgpu);
|
|
|
|
}
|