linux/drivers/gpu/drm/i915/display/intel_vrr.h

46 lines
2 KiB
C
Raw Permalink Normal View History

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_VRR_H__
#define __INTEL_VRR_H__
#include <linux/types.h>
struct drm_connector_state;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_dsb;
struct intel_display;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
bool intel_vrr_possible(const struct intel_crtc_state *crtc_state);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state);
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
void intel_vrr_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_send_push(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
drm/i915/vrr: Check that the push send bit is clear after delayed vblank Since we don't do mailbox updates the push send bit should alwyas clear by the time the delay vblank fires and the flip completes. Check for that to make sure we haven't screwed up the sequencing/vblank evasion/etc. On the DSB path we should be able to guarantee this since we don't have to deal with any scheduler latencies and whatnot. I suppose unexpected DMA/memory latencies might be the only thing that might trip us up here. For the MMIO path we do always have a non-zero chance that vblank evasion fails (since we can't really guarantee anything about the scheduling behaviour). That could trip up this check, but that seems fine since we already print errors for other types of vblank evasion failures. Should the CPU vblank evasion actually fail, then the push send bit can still be set when the next commit happens. But both the DSB and MMIO paths should handle that situation gracefully. v2: Only check once instead of polling for two scanlines since we should now be guaranteed to be past the delayed vblank. Also check in the MMIO path for good measure v3: Skip the push send check when VRR is disabled. With joiner the secondary pipe's DSBs doen't have access to the transcoder registers, and so doing this check there triggers a reponse timeout error on the DSB. VRR is not currently allowed when using joiner, so this will prevent the bogus register access. Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250210160711.24010-1-ville.syrjala@linux.intel.com
2025-02-10 18:07:11 +02:00
void intel_vrr_check_push_sent(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
void intel_vrr_get_config(struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
#endif /* __INTEL_VRR_H__ */