mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-04 16:25:34 +00:00
drm/amd/display: Introduce DML2
DC is transitioning from DML to DML2, and this commit introduces all the required changes for some of the already available ASICs and adds the required code infra to support new ASICs under DML2. DML2 is also a generated code that provides better mode verification and programming models for software/hardware, and it enables a better way to create validation tools. This version is more like a middle step to the complete transition to the DML2 version. Changes since V1: - Alex: Fix typos Changes since V2: - Update DC includes Changes since V3: - Fix 32 bit compilation issues on x86 Changes since V4: - Avoid compilation of DML2 on some not supported 32-bit architecture - Update commit message Co-developed-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com> Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com> Co-developed-by: Roman Li <roman.li@amd.com> Signed-off-by: Roman Li <roman.li@amd.com> Signed-off-by: Qingqing Zhuo <Qingqing.Zhuo@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
6e2c4941ce
commit
7966f319c6
36 changed files with 19572 additions and 4 deletions
|
@ -28,8 +28,8 @@ ifdef CONFIG_DRM_AMD_DC_FP
|
|||
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
DC_LIBS += dcn20
|
||||
DC_LIBS += dcn10
|
||||
DC_LIBS += dcn20
|
||||
DC_LIBS += dcn21
|
||||
DC_LIBS += dcn201
|
||||
DC_LIBS += dcn30
|
||||
|
@ -44,6 +44,7 @@ DC_LIBS += dcn32
|
|||
DC_LIBS += dcn321
|
||||
DC_LIBS += dcn35
|
||||
DC_LIBS += dml
|
||||
DC_LIBS += dml2
|
||||
endif
|
||||
|
||||
DC_LIBS += dce120
|
||||
|
|
|
@ -77,6 +77,8 @@
|
|||
|
||||
#include "hw_sequencer_private.h"
|
||||
|
||||
#include "dml2/dml2_internal_types.h"
|
||||
|
||||
#include "dce/dmub_outbox.h"
|
||||
|
||||
#define CTX \
|
||||
|
@ -2176,6 +2178,11 @@ struct dc_state *dc_create_state(struct dc *dc)
|
|||
|
||||
init_state(dc, context);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
if (dc->debug.using_dml2) {
|
||||
dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
|
||||
}
|
||||
#endif
|
||||
kref_init(&context->refcount);
|
||||
|
||||
return context;
|
||||
|
@ -2185,11 +2192,25 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx)
|
|||
{
|
||||
int i, j;
|
||||
struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
struct dml2_context *dml2 = NULL;
|
||||
#endif
|
||||
|
||||
if (!new_ctx)
|
||||
return NULL;
|
||||
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
if (new_ctx->bw_ctx.dml2) {
|
||||
dml2 = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
|
||||
if (!dml2)
|
||||
return NULL;
|
||||
|
||||
memcpy(dml2, src_ctx->bw_ctx.dml2, sizeof(struct dml2_context));
|
||||
new_ctx->bw_ctx.dml2 = dml2;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
|
||||
|
||||
|
@ -2228,6 +2249,12 @@ static void dc_state_free(struct kref *kref)
|
|||
{
|
||||
struct dc_state *context = container_of(kref, struct dc_state, refcount);
|
||||
dc_resource_state_destruct(context);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
dml2_destroy(context->bw_ctx.dml2);
|
||||
context->bw_ctx.dml2 = 0;
|
||||
#endif
|
||||
|
||||
kvfree(context);
|
||||
}
|
||||
|
||||
|
@ -4679,6 +4706,9 @@ bool dc_set_power_state(
|
|||
{
|
||||
struct kref refcount;
|
||||
struct display_mode_lib *dml;
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
struct dml2_context *dml2 = NULL;
|
||||
#endif
|
||||
|
||||
if (!dc->current_state)
|
||||
return true;
|
||||
|
@ -4698,6 +4728,10 @@ bool dc_set_power_state(
|
|||
|
||||
break;
|
||||
default:
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
if (dc->debug.using_dml2)
|
||||
dml2 = dc->current_state->bw_ctx.dml2;
|
||||
#endif
|
||||
ASSERT(dc->current_state->stream_count == 0);
|
||||
/* Zero out the current context so that on resume we start with
|
||||
* clean state, and dc hw programming optimizations will not
|
||||
|
@ -4724,6 +4758,11 @@ bool dc_set_power_state(
|
|||
|
||||
kfree(dml);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
if (dc->debug.using_dml2)
|
||||
dc->current_state->bw_ctx.dml2 = dml2;
|
||||
#endif
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include "dpcd_defs.h"
|
||||
#include "link_enc_cfg.h"
|
||||
#include "link.h"
|
||||
#include "clk_mgr.h"
|
||||
#include "virtual/virtual_link_hwss.h"
|
||||
#include "link/hwss/link_hwss_dio.h"
|
||||
#include "link/hwss/link_hwss_dpia.h"
|
||||
|
@ -86,6 +87,8 @@
|
|||
dc->ctx->logger
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
#include "dml2/dml2_wrapper.h"
|
||||
|
||||
#define UNABLE_TO_SPLIT -1
|
||||
|
||||
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
|
@ -318,6 +321,10 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
|
|||
res_pool->ref_clocks.xtalin_clock_inKhz;
|
||||
res_pool->ref_clocks.dchub_ref_clock_inKhz =
|
||||
res_pool->ref_clocks.xtalin_clock_inKhz;
|
||||
if ((res_pool->hubbub->funcs->get_dchub_ref_freq))
|
||||
res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub,
|
||||
res_pool->ref_clocks.dccg_ref_clock_inKhz,
|
||||
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
|
||||
} else
|
||||
ASSERT_CRITICAL(false);
|
||||
}
|
||||
|
@ -4358,9 +4365,22 @@ void dc_resource_state_copy_construct(
|
|||
{
|
||||
int i, j;
|
||||
struct kref refcount = dst_ctx->refcount;
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
struct dml2_context *dml2 = NULL;
|
||||
|
||||
// Need to preserve allocated dml2 context
|
||||
if (src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
|
||||
dml2 = dst_ctx->bw_ctx.dml2;
|
||||
#endif
|
||||
|
||||
*dst_ctx = *src_ctx;
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
// Preserve allocated dml2 context
|
||||
if (src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
|
||||
dst_ctx->bw_ctx.dml2 = dml2;
|
||||
#endif
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@
|
|||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#include "dml2/dml2_wrapper.h"
|
||||
|
||||
struct abm_save_restore;
|
||||
|
||||
/* forward declaration */
|
||||
|
@ -942,6 +944,7 @@ struct dc_debug_options {
|
|||
bool dml_disallow_alternate_prefetch_modes;
|
||||
bool use_legacy_soc_bb_mechanism;
|
||||
bool exit_idle_opt_for_cursor_updates;
|
||||
bool using_dml2;
|
||||
bool enable_single_display_2to1_odm_policy;
|
||||
bool enable_double_buffered_dsc_pg_support;
|
||||
bool enable_dp_dig_pixel_rate_div_policy;
|
||||
|
@ -1049,6 +1052,8 @@ struct dc {
|
|||
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
|
||||
} update_bw_bounding_box;
|
||||
} scratch;
|
||||
|
||||
struct dml2_configuration_options dml2_options;
|
||||
};
|
||||
|
||||
enum frame_buffer_mode {
|
||||
|
|
|
@ -89,6 +89,8 @@
|
|||
#include "dcn20/dcn20_vmid.h"
|
||||
#include "dml/dcn32/dcn32_fpu.h"
|
||||
|
||||
#include "dml2/dml2_wrapper.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
enum dcn32_clk_src_array_id {
|
||||
|
@ -714,6 +716,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.use_max_lb = true,
|
||||
.force_disable_subvp = false,
|
||||
.exit_idle_opt_for_cursor_updates = true,
|
||||
.using_dml2 = false,
|
||||
.enable_single_display_2to1_odm_policy = true,
|
||||
|
||||
/* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
|
||||
|
@ -1805,9 +1808,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
|
|||
}
|
||||
}
|
||||
|
||||
bool dcn32_validate_bandwidth(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
bool fast_validate)
|
||||
static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate)
|
||||
{
|
||||
bool out = false;
|
||||
|
||||
|
@ -1885,6 +1886,19 @@ validate_out:
|
|||
return out;
|
||||
}
|
||||
|
||||
bool dcn32_validate_bandwidth(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
bool fast_validate)
|
||||
{
|
||||
bool out = false;
|
||||
|
||||
if (dc->debug.using_dml2)
|
||||
out = dml2_validate(dc, context, fast_validate);
|
||||
else
|
||||
out = dml1_validate(dc, context, fast_validate);
|
||||
return out;
|
||||
}
|
||||
|
||||
int dcn32_populate_dml_pipes_from_context(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
|
@ -2422,6 +2436,47 @@ static bool dcn32_resource_construct(
|
|||
pool->base.oem_device = NULL;
|
||||
}
|
||||
|
||||
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
|
||||
dc->dml2_options.use_native_pstate_optimization = false;
|
||||
dc->dml2_options.use_native_soc_bb_construction = true;
|
||||
dc->dml2_options.minimize_dispclk_using_odm = true;
|
||||
|
||||
dc->dml2_options.callbacks.dc = dc;
|
||||
dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
|
||||
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
|
||||
dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
|
||||
|
||||
dc->dml2_options.svp_pstate.callbacks.dc = dc;
|
||||
dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
|
||||
dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
|
||||
dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
|
||||
dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
|
||||
dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
|
||||
dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
|
||||
dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
|
||||
dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
|
||||
dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
|
||||
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
|
||||
|
||||
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
|
||||
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
|
||||
dc->dml2_options.svp_pstate.subvp_pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
|
||||
dc->dml2_options.svp_pstate.subvp_swath_height_margin_lines = dc->caps.subvp_swath_height_margin_lines;
|
||||
|
||||
dc->dml2_options.svp_pstate.force_disable_subvp = dc->debug.force_disable_subvp;
|
||||
dc->dml2_options.svp_pstate.force_enable_subvp = dc->debug.force_subvp_mclk_switch;
|
||||
|
||||
dc->dml2_options.mall_cfg.cache_line_size_bytes = dc->caps.cache_line_size;
|
||||
dc->dml2_options.mall_cfg.cache_num_ways = dc->caps.cache_num_ways;
|
||||
dc->dml2_options.mall_cfg.max_cab_allocation_bytes = dc->caps.max_cab_allocation_bytes;
|
||||
dc->dml2_options.mall_cfg.mblk_height_4bpe_pixels = DCN3_2_MBLK_HEIGHT_4BPE;
|
||||
dc->dml2_options.mall_cfg.mblk_height_8bpe_pixels = DCN3_2_MBLK_HEIGHT_8BPE;
|
||||
dc->dml2_options.mall_cfg.mblk_size_bytes = DCN3_2_MALL_MBLK_SIZE_BYTES;
|
||||
dc->dml2_options.mall_cfg.mblk_width_pixels = DCN3_2_MBLK_WIDTH;
|
||||
|
||||
dc->dml2_options.max_segments_per_hubp = 18;
|
||||
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;
|
||||
|
||||
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
|
||||
dc->config.sdpif_request_limit_words_per_umc = 16;
|
||||
|
||||
|
|
|
@ -1989,6 +1989,47 @@ static bool dcn321_resource_construct(
|
|||
pool->base.oem_device = NULL;
|
||||
}
|
||||
|
||||
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
|
||||
dc->dml2_options.use_native_pstate_optimization = false;
|
||||
dc->dml2_options.use_native_soc_bb_construction = true;
|
||||
dc->dml2_options.minimize_dispclk_using_odm = true;
|
||||
|
||||
dc->dml2_options.callbacks.dc = dc;
|
||||
dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
|
||||
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
|
||||
dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
|
||||
|
||||
dc->dml2_options.svp_pstate.callbacks.dc = dc;
|
||||
dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
|
||||
dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
|
||||
dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
|
||||
dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
|
||||
dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
|
||||
dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
|
||||
dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
|
||||
dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
|
||||
dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
|
||||
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
|
||||
|
||||
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
|
||||
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
|
||||
dc->dml2_options.svp_pstate.subvp_pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
|
||||
dc->dml2_options.svp_pstate.subvp_swath_height_margin_lines = dc->caps.subvp_swath_height_margin_lines;
|
||||
|
||||
dc->dml2_options.svp_pstate.force_disable_subvp = dc->debug.force_disable_subvp;
|
||||
dc->dml2_options.svp_pstate.force_enable_subvp = dc->debug.force_subvp_mclk_switch;
|
||||
|
||||
dc->dml2_options.mall_cfg.cache_line_size_bytes = dc->caps.cache_line_size;
|
||||
dc->dml2_options.mall_cfg.cache_num_ways = dc->caps.cache_num_ways;
|
||||
dc->dml2_options.mall_cfg.max_cab_allocation_bytes = dc->caps.max_cab_allocation_bytes;
|
||||
dc->dml2_options.mall_cfg.mblk_height_4bpe_pixels = DCN3_2_MBLK_HEIGHT_4BPE;
|
||||
dc->dml2_options.mall_cfg.mblk_height_8bpe_pixels = DCN3_2_MBLK_HEIGHT_8BPE;
|
||||
dc->dml2_options.mall_cfg.mblk_size_bytes = DCN3_2_MALL_MBLK_SIZE_BYTES;
|
||||
dc->dml2_options.mall_cfg.mblk_width_pixels = DCN3_2_MBLK_WIDTH;
|
||||
|
||||
dc->dml2_options.max_segments_per_hubp = 18;
|
||||
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;
|
||||
|
||||
return true;
|
||||
|
||||
create_fail:
|
||||
|
|
|
@ -2957,12 +2957,14 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
|||
/* Override from passed dc->bb_overrides if available*/
|
||||
if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
|
||||
&& dc->bb_overrides.sr_exit_time_ns) {
|
||||
dc->dml2_options.bbox_overrides.sr_exit_latency_us =
|
||||
dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
|
||||
}
|
||||
|
||||
if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000)
|
||||
!= dc->bb_overrides.sr_enter_plus_exit_time_ns
|
||||
&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
|
||||
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
|
||||
dcn3_2_soc.sr_enter_plus_exit_time_us =
|
||||
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
|
||||
}
|
||||
|
@ -2970,12 +2972,14 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
|||
if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
|
||||
&& dc->bb_overrides.urgent_latency_ns) {
|
||||
dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.urgent_latency_us =
|
||||
dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
}
|
||||
|
||||
if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
|
||||
!= dc->bb_overrides.dram_clock_change_latency_ns
|
||||
&& dc->bb_overrides.dram_clock_change_latency_ns) {
|
||||
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
|
||||
dcn3_2_soc.dram_clock_change_latency_us =
|
||||
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
|
||||
}
|
||||
|
@ -2983,6 +2987,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
|||
if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
|
||||
!= dc->bb_overrides.fclk_clock_change_latency_ns
|
||||
&& dc->bb_overrides.fclk_clock_change_latency_ns) {
|
||||
dc->dml2_options.bbox_overrides.fclk_change_latency_us =
|
||||
dcn3_2_soc.fclk_change_latency_us =
|
||||
dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
|
||||
}
|
||||
|
@ -3000,14 +3005,17 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
|||
|
||||
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
|
||||
if (bb_info.dram_clock_change_latency_100ns > 0)
|
||||
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
|
||||
dcn3_2_soc.dram_clock_change_latency_us =
|
||||
bb_info.dram_clock_change_latency_100ns * 10;
|
||||
|
||||
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
|
||||
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
|
||||
dcn3_2_soc.sr_enter_plus_exit_time_us =
|
||||
bb_info.dram_sr_enter_exit_latency_100ns * 10;
|
||||
|
||||
if (bb_info.dram_sr_exit_latency_100ns > 0)
|
||||
dc->dml2_options.bbox_overrides.sr_exit_latency_us =
|
||||
dcn3_2_soc.sr_exit_time_us =
|
||||
bb_info.dram_sr_exit_latency_100ns * 10;
|
||||
}
|
||||
|
@ -3015,12 +3023,14 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
|||
|
||||
/* Override from VBIOS for num_chan */
|
||||
if (dc->ctx->dc_bios->vram_info.num_chans) {
|
||||
dc->dml2_options.bbox_overrides.dram_num_chan =
|
||||
dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
|
||||
dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
|
||||
dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
|
||||
}
|
||||
|
||||
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
|
||||
dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
|
||||
dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
|
||||
|
||||
/* DML DSC delay factor workaround */
|
||||
|
@ -3031,6 +3041,10 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
|||
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
|
||||
dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
|
||||
|
||||
/* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */
|
||||
if (bw_params->clk_table.entries[0].memclk_mhz) {
|
||||
|
@ -3186,6 +3200,72 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
|||
if (dc->current_state)
|
||||
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
|
||||
}
|
||||
|
||||
if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
|
||||
unsigned int i = 0;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
|
||||
|
|
|
@ -616,12 +616,14 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
|||
/* Override from passed dc->bb_overrides if available*/
|
||||
if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
|
||||
&& dc->bb_overrides.sr_exit_time_ns) {
|
||||
dc->dml2_options.bbox_overrides.sr_exit_latency_us =
|
||||
dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
|
||||
}
|
||||
|
||||
if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000)
|
||||
!= dc->bb_overrides.sr_enter_plus_exit_time_ns
|
||||
&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
|
||||
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
|
||||
dcn3_21_soc.sr_enter_plus_exit_time_us =
|
||||
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
|
||||
}
|
||||
|
@ -629,12 +631,14 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
|||
if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
|
||||
&& dc->bb_overrides.urgent_latency_ns) {
|
||||
dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.urgent_latency_us =
|
||||
dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
}
|
||||
|
||||
if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000)
|
||||
!= dc->bb_overrides.dram_clock_change_latency_ns
|
||||
&& dc->bb_overrides.dram_clock_change_latency_ns) {
|
||||
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
|
||||
dcn3_21_soc.dram_clock_change_latency_us =
|
||||
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
|
||||
}
|
||||
|
@ -642,6 +646,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
|||
if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
|
||||
!= dc->bb_overrides.fclk_clock_change_latency_ns
|
||||
&& dc->bb_overrides.fclk_clock_change_latency_ns) {
|
||||
dc->dml2_options.bbox_overrides.fclk_change_latency_us =
|
||||
dcn3_21_soc.fclk_change_latency_us =
|
||||
dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
|
||||
}
|
||||
|
@ -659,14 +664,17 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
|||
|
||||
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
|
||||
if (bb_info.dram_clock_change_latency_100ns > 0)
|
||||
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
|
||||
dcn3_21_soc.dram_clock_change_latency_us =
|
||||
bb_info.dram_clock_change_latency_100ns * 10;
|
||||
|
||||
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
|
||||
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
|
||||
dcn3_21_soc.sr_enter_plus_exit_time_us =
|
||||
bb_info.dram_sr_enter_exit_latency_100ns * 10;
|
||||
|
||||
if (bb_info.dram_sr_exit_latency_100ns > 0)
|
||||
dc->dml2_options.bbox_overrides.sr_exit_latency_us =
|
||||
dcn3_21_soc.sr_exit_time_us =
|
||||
bb_info.dram_sr_exit_latency_100ns * 10;
|
||||
}
|
||||
|
@ -674,12 +682,14 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
|||
|
||||
/* Override from VBIOS for num_chan */
|
||||
if (dc->ctx->dc_bios->vram_info.num_chans) {
|
||||
dc->dml2_options.bbox_overrides.dram_num_chan =
|
||||
dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
|
||||
dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
|
||||
dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
|
||||
}
|
||||
|
||||
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
|
||||
dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
|
||||
dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
|
||||
|
||||
/* DML DSC delay factor workaround */
|
||||
|
@ -690,6 +700,10 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
|||
/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
|
||||
dcn3_21_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
|
||||
dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
|
||||
|
||||
/* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */
|
||||
if (dc->debug.use_legacy_soc_bb_mechanism) {
|
||||
|
@ -836,5 +850,72 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
|||
dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
|
||||
if (dc->current_state)
|
||||
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
|
||||
|
||||
if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
|
||||
unsigned int i = 0;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
|
||||
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
|
||||
dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
|
||||
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
|
||||
if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
|
||||
dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
91
drivers/gpu/drm/amd/display/dc/dml2/Makefile
Normal file
91
drivers/gpu/drm/amd/display/dc/dml2/Makefile
Normal file
|
@ -0,0 +1,91 @@
|
|||
# SPDX-License-Identifier: MIT */
|
||||
#
|
||||
# Copyright 2023 Advanced Micro Devices, Inc.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
#
|
||||
# makefile for dml2
|
||||
|
||||
ifdef CONFIG_X86
|
||||
dml2_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float
|
||||
dml2_ccflags := $(dml2_ccflags-y) -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
dml2_ccflags := -mhard-float -maltivec
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ARM64
|
||||
dml2_rcflags := -mgeneral-regs-only
|
||||
endif
|
||||
|
||||
ifdef CONFIG_LOONGARCH
|
||||
dml2_ccflags := -mfpu=64
|
||||
dml2_rcflags := -msoft-float
|
||||
endif
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
dml2_ccflags += -mpreferred-stack-boundary=4
|
||||
else
|
||||
dml2_ccflags += -msse2
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_FRAME_WARN),0)
|
||||
frame_warn_flag := -Wframe-larger-than=2048
|
||||
endif
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) -Wframe-larger-than=2048
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
|
||||
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
|
||||
|
||||
DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
|
||||
dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
|
||||
dml_display_rq_dlg_calc.o
|
||||
|
||||
AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
|
||||
|
92
drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h
Normal file
92
drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h
Normal file
|
@ -0,0 +1,92 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __CMNTYPES_H__
|
||||
#define __CMNTYPES_H__
|
||||
|
||||
#ifdef __GNUC__
|
||||
#if __GNUC__ == 4 && __GNUC_MINOR__ > 7
|
||||
typedef unsigned int uint;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef signed char int8, *pint8;
|
||||
typedef signed short int16, *pint16;
|
||||
typedef signed int int32, *pint32;
|
||||
typedef signed int64, *pint64;
|
||||
|
||||
typedef unsigned char uint8, *puint8;
|
||||
typedef unsigned short uint16, *puint16;
|
||||
typedef unsigned int uint32, *puint32;
|
||||
typedef unsigned uint64, *puint64;
|
||||
|
||||
typedef unsigned long int ulong;
|
||||
typedef unsigned char uchar;
|
||||
typedef unsigned int uint;
|
||||
|
||||
typedef void *pvoid;
|
||||
typedef char *pchar;
|
||||
typedef const void *const_pvoid;
|
||||
typedef const char *const_pchar;
|
||||
|
||||
typedef struct rgba_struct {
|
||||
uint8 a;
|
||||
uint8 r;
|
||||
uint8 g;
|
||||
uint8 b;
|
||||
} rgba_t;
|
||||
|
||||
typedef struct {
|
||||
uint8 blue;
|
||||
uint8 green;
|
||||
uint8 red;
|
||||
uint8 alpha;
|
||||
} gen_color_t;
|
||||
|
||||
typedef union {
|
||||
uint32 val;
|
||||
gen_color_t f;
|
||||
} gen_color_u;
|
||||
|
||||
//
|
||||
// Types to make it easy to get or set the bits of a float/double.
|
||||
// Avoids automatic casting from int to float and back.
|
||||
//
|
||||
#if 0
|
||||
typedef union {
|
||||
uint32 i;
|
||||
float f;
|
||||
} uintfloat32;
|
||||
|
||||
typedef union {
|
||||
uint64 i;
|
||||
double f;
|
||||
} uintfloat64;
|
||||
|
||||
#ifndef UNREFERENCED_PARAMETER
|
||||
#define UNREFERENCED_PARAMETER(x) x = x
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif //__CMNTYPES_H__
|
10275
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
Normal file
10275
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
Normal file
File diff suppressed because it is too large
Load diff
199
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
Normal file
199
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
Normal file
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DISPLAY_MODE_CORE_H__
|
||||
#define __DISPLAY_MODE_CORE_H__
|
||||
|
||||
#include "display_mode_core_structs.h"
|
||||
|
||||
struct display_mode_lib_st;
|
||||
|
||||
dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib);
|
||||
void dml_core_mode_support_partial(struct display_mode_lib_st *mode_lib);
|
||||
void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struct dml_clk_cfg_st *clk_cfg);
|
||||
|
||||
void dml_core_get_row_heights(
|
||||
dml_uint_t *dpte_row_height,
|
||||
dml_uint_t *meta_row_height,
|
||||
const struct display_mode_lib_st *mode_lib,
|
||||
dml_bool_t is_plane1,
|
||||
enum dml_source_format_class SourcePixelFormat,
|
||||
enum dml_swizzle_mode SurfaceTiling,
|
||||
enum dml_rotation_angle ScanDirection,
|
||||
dml_uint_t pitch,
|
||||
dml_uint_t GPUVMMinPageSizeKBytes);
|
||||
|
||||
dml_float_t dml_get_return_bw_mbps_vm_only(
|
||||
const struct soc_bounding_box_st *soc,
|
||||
dml_bool_t use_ideal_dram_bw_strobe,
|
||||
dml_bool_t HostVMEnable,
|
||||
dml_float_t DCFCLK,
|
||||
dml_float_t FabricClock,
|
||||
dml_float_t DRAMSpeed);
|
||||
|
||||
dml_float_t dml_get_return_bw_mbps(
|
||||
const struct soc_bounding_box_st *soc,
|
||||
dml_bool_t use_ideal_dram_bw_strobe,
|
||||
dml_bool_t HostVMEnable,
|
||||
dml_float_t DCFCLK,
|
||||
dml_float_t FabricClock,
|
||||
dml_float_t DRAMSpeed);
|
||||
|
||||
dml_bool_t dml_mode_support(
|
||||
struct display_mode_lib_st *mode_lib,
|
||||
dml_uint_t state_idx,
|
||||
const struct dml_display_cfg_st *display_cfg);
|
||||
|
||||
dml_bool_t dml_mode_programming(
|
||||
struct display_mode_lib_st *mode_lib,
|
||||
dml_uint_t state_idx,
|
||||
const struct dml_display_cfg_st *display_cfg,
|
||||
bool call_standalone);
|
||||
|
||||
dml_uint_t dml_mode_support_ex(
|
||||
struct dml_mode_support_ex_params_st *in_out_params);
|
||||
|
||||
dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uint_t pipe_idx);
|
||||
|
||||
#define dml_get_per_surface_var_decl(variable, type) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx)
|
||||
#define dml_get_var_decl(var, type) type dml_get_##var(struct display_mode_lib_st *mode_lib)
|
||||
|
||||
dml_get_var_decl(wm_urgent, dml_float_t);
|
||||
dml_get_var_decl(wm_stutter_exit, dml_float_t);
|
||||
dml_get_var_decl(wm_stutter_enter_exit, dml_float_t);
|
||||
dml_get_var_decl(wm_memory_trip, dml_float_t);
|
||||
dml_get_var_decl(wm_dram_clock_change, dml_float_t);
|
||||
dml_get_var_decl(urgent_latency, dml_float_t);
|
||||
dml_get_var_decl(clk_dcf_deepsleep, dml_float_t);
|
||||
dml_get_var_decl(wm_fclk_change, dml_float_t);
|
||||
dml_get_var_decl(wm_usr_retraining, dml_float_t);
|
||||
dml_get_var_decl(urgent_latency, dml_float_t);
|
||||
|
||||
dml_get_var_decl(wm_writeback_dram_clock_change, dml_float_t);
|
||||
dml_get_var_decl(stutter_efficiency_no_vblank, dml_float_t);
|
||||
dml_get_var_decl(stutter_efficiency, dml_float_t);
|
||||
dml_get_var_decl(stutter_efficiency_z8, dml_float_t);
|
||||
dml_get_var_decl(stutter_num_bursts_z8, dml_float_t);
|
||||
dml_get_var_decl(stutter_period, dml_float_t);
|
||||
dml_get_var_decl(stutter_efficiency_z8_bestcase, dml_float_t);
|
||||
dml_get_var_decl(stutter_num_bursts_z8_bestcase, dml_float_t);
|
||||
dml_get_var_decl(stutter_period_bestcase, dml_float_t);
|
||||
dml_get_var_decl(urgent_latency, dml_float_t);
|
||||
dml_get_var_decl(urgent_extra_latency, dml_float_t);
|
||||
dml_get_var_decl(nonurgent_latency, dml_float_t);
|
||||
dml_get_var_decl(dispclk_calculated, dml_float_t);
|
||||
dml_get_var_decl(total_data_read_bw, dml_float_t);
|
||||
dml_get_var_decl(return_bw, dml_float_t);
|
||||
dml_get_var_decl(tcalc, dml_float_t);
|
||||
dml_get_var_decl(fraction_of_urgent_bandwidth, dml_float_t);
|
||||
dml_get_var_decl(fraction_of_urgent_bandwidth_imm_flip, dml_float_t);
|
||||
dml_get_var_decl(comp_buffer_size_kbytes, dml_uint_t);
|
||||
dml_get_var_decl(pixel_chunk_size_in_kbyte, dml_uint_t);
|
||||
dml_get_var_decl(alpha_pixel_chunk_size_in_kbyte, dml_uint_t);
|
||||
dml_get_var_decl(meta_chunk_size_in_kbyte, dml_uint_t);
|
||||
dml_get_var_decl(min_pixel_chunk_size_in_byte, dml_uint_t);
|
||||
dml_get_var_decl(min_meta_chunk_size_in_byte, dml_uint_t);
|
||||
dml_get_var_decl(total_immediate_flip_bytes, dml_uint_t);
|
||||
|
||||
dml_get_per_surface_var_decl(dsc_delay, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dppclk_calculated, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dscclk_calculated, dml_float_t);
|
||||
dml_get_per_surface_var_decl(min_ttu_vblank_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(vratio_prefetch_l, dml_float_t);
|
||||
dml_get_per_surface_var_decl(vratio_prefetch_c, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_x_after_scaler, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dst_y_after_scaler, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dst_y_per_vm_vblank, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_y_per_row_vblank, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_y_prefetch, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_y_per_vm_flip, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_y_per_row_flip, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_y_per_pte_row_nom_l, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_y_per_pte_row_nom_c, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_y_per_meta_row_nom_l, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dst_y_per_meta_row_nom_c, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_vm_group_vblank_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_vm_group_flip_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_vm_req_vblank_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_vm_req_flip_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_vm_dmdata_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(dmdata_dl_delta_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_line_delivery_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_line_delivery_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_line_delivery_pre_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_line_delivery_pre_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_req_delivery_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_req_delivery_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_req_delivery_pre_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_req_delivery_pre_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_cursor_req_delivery_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_cursor_req_delivery_pre_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_meta_chunk_nom_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_meta_chunk_nom_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_meta_chunk_vblank_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_meta_chunk_vblank_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_meta_chunk_flip_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_meta_chunk_flip_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_pte_group_nom_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_pte_group_nom_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_pte_group_vblank_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_pte_group_vblank_c_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_pte_group_flip_l_in_us, dml_float_t);
|
||||
dml_get_per_surface_var_decl(refcyc_per_pte_group_flip_c_in_us, dml_float_t);
|
||||
|
||||
dml_get_per_surface_var_decl(dpte_group_size_in_bytes, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(vm_group_size_in_bytes, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(swath_height_l, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(swath_height_c, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dpte_row_height_l, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dpte_row_height_c, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dpte_row_height_linear_l, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dpte_row_height_linear_c, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(meta_row_height_l, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(meta_row_height_c, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(vstartup_calculated, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(vupdate_offset, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(vupdate_width, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(vready_offset, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(vready_at_or_after_vsync, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(min_dst_y_next_start, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(det_stored_buffer_size_l_bytes, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(det_stored_buffer_size_c_bytes, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(use_mall_for_static_screen, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(surface_size_for_mall, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dcc_max_uncompressed_block_l, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dcc_max_uncompressed_block_c, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dcc_max_compressed_block_l, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dcc_max_compressed_block_c, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dcc_independent_block_l, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dcc_independent_block_c, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(max_active_dram_clock_change_latency_supported, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(pte_buffer_mode, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(bigk_fragment_size, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(dpte_bytes_per_row, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(meta_bytes_per_row, dml_uint_t);
|
||||
dml_get_per_surface_var_decl(det_buffer_size_kbytes, dml_uint_t);
|
||||
|
||||
#endif
|
1968
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
Normal file
1968
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,75 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DISPLAY_MODE_LIB_DEFINES_H__
|
||||
#define __DISPLAY_MODE_LIB_DEFINES_H__
|
||||
|
||||
#define DCN_DML__DML_STANDALONE 1
|
||||
#define DCN_DML__DML_STANDALONE__1 1
|
||||
#define DCN_DML__PRESENT 1
|
||||
#define DCN_DML__PRESENT__1 1
|
||||
#define DCN_DML__NUM_PLANE 8
|
||||
#define DCN_DML__NUM_PLANE__8 1
|
||||
#define DCN_DML__NUM_CURSOR 1
|
||||
#define DCN_DML__NUM_CURSOR__1 1
|
||||
#define DCN_DML__NUM_PWR_STATE 30
|
||||
#define DCN_DML__NUM_PWR_STATE__30 1
|
||||
#define DCN_DML__VM_PRESENT 1
|
||||
#define DCN_DML__VM_PRESENT__1 1
|
||||
#define DCN_DML__HOST_VM_PRESENT 1
|
||||
#define DCN_DML__HOST_VM_PRESENT__1 1
|
||||
|
||||
#include "dml_depedencies.h"
|
||||
|
||||
#include "dml_logging.h"
|
||||
#include "dml_assert.h"
|
||||
|
||||
// To enable a lot of debug msg
|
||||
#define __DML_VBA_DEBUG__
|
||||
#define __DML_VBA_ENABLE_INLINE_CHECK_ 0
|
||||
#define __DML_VBA_MIN_VSTARTUP__ 9 //<brief At which vstartup the DML start to try if the mode can be supported
|
||||
#define __DML_ARB_TO_RET_DELAY__ 7 + 95 //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
|
||||
#define __DML_MIN_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
|
||||
#define __DML_MAX_VRATIO_PRE__ 4.0 //<brief Prefetch schedule max vratio
|
||||
#define __DML_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
|
||||
#define __DML_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ 6.0 //<brief Prefetch schedule max vratio when enhance prefetch schedule acceleration is enabled and vstartup is earliest possible already
|
||||
#define __DML_NUM_PLANES__ DCN_DML__NUM_PLANE
|
||||
#define __DML_NUM_CURSORS__ DCN_DML__NUM_CURSOR
|
||||
#define __DML_DPP_INVALID__ 0
|
||||
#define __DML_PIPE_NO_PLANE__ 99
|
||||
|
||||
#define __DML_MAX_STATE_ARRAY_SIZE__ DCN_DML__NUM_PWR_STATE
|
||||
|
||||
// Compilation define
|
||||
#define __DML_DLL_EXPORT__
|
||||
|
||||
typedef int dml_int_t; // int is 32-bit in C/C++, but Integer datatype is 16-bit in VBA. this should map to Long in VBA
|
||||
typedef unsigned int dml_uint_t;
|
||||
typedef double dml_float_t;
|
||||
|
||||
// Note: bool is 8-bit in C/C++, but Boolean is 16-bit in VBA, use "short" in C/C++ DLL so the struct work when vba uses DLL
|
||||
// Or the VBA side don't use Boolean, just use "Byte", then C side can use bool
|
||||
typedef bool dml_bool_t;
|
||||
|
||||
#endif
|
796
drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
Normal file
796
drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
Normal file
|
@ -0,0 +1,796 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "display_mode_util.h"
|
||||
|
||||
static dml_float_t _log(float in)
|
||||
{
|
||||
int * const exp_ptr = (int *)(&in);
|
||||
int x = *exp_ptr;
|
||||
const int log_2 = ((x >> 23) & 255) - 128;
|
||||
|
||||
x &= ~(255 << 23);
|
||||
x += 127 << 23;
|
||||
*exp_ptr = x;
|
||||
|
||||
in = ((-1.0f / 3) * in + 2) * in - 2.0f / 3;
|
||||
|
||||
return (in + log_2);
|
||||
}
|
||||
|
||||
dml_bool_t dml_util_is_420(enum dml_source_format_class source_format)
|
||||
{
|
||||
dml_bool_t val = false;
|
||||
|
||||
switch (source_format) {
|
||||
case dml_444_16:
|
||||
val = 0;
|
||||
break;
|
||||
case dml_444_32:
|
||||
val = 0;
|
||||
break;
|
||||
case dml_444_64:
|
||||
val = 0;
|
||||
break;
|
||||
case dml_420_8:
|
||||
val = 1;
|
||||
break;
|
||||
case dml_420_10:
|
||||
val = 1;
|
||||
break;
|
||||
case dml_422_8:
|
||||
val = 0;
|
||||
break;
|
||||
case dml_422_10:
|
||||
val = 0;
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
break;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline float dcn_bw_pow(float a, float exp)
|
||||
{
|
||||
float temp;
|
||||
/*ASSERT(exp == (int)exp);*/
|
||||
if ((int)exp == 0)
|
||||
return 1;
|
||||
temp = dcn_bw_pow(a, (int)(exp / 2));
|
||||
if (((int)exp % 2) == 0) {
|
||||
return temp * temp;
|
||||
} else {
|
||||
if ((int)exp > 0)
|
||||
return a * temp * temp;
|
||||
else
|
||||
return (temp * temp) / a;
|
||||
}
|
||||
}
|
||||
|
||||
static inline float dcn_bw_ceil2(const float arg, const float significance)
|
||||
{
|
||||
ASSERT(significance != 0);
|
||||
|
||||
return ((int)(arg / significance + 0.99999)) * significance;
|
||||
}
|
||||
|
||||
static inline float dcn_bw_floor2(const float arg, const float significance)
|
||||
{
|
||||
ASSERT(significance != 0);
|
||||
|
||||
return ((int)(arg / significance)) * significance;
|
||||
}
|
||||
|
||||
dml_float_t dml_ceil(dml_float_t x, dml_float_t granularity)
|
||||
{
|
||||
if (granularity == 0)
|
||||
return 0;
|
||||
//return (dml_float_t) (ceil(x / granularity) * granularity);
|
||||
return (dml_float_t)dcn_bw_ceil2(x, granularity);
|
||||
}
|
||||
|
||||
dml_float_t dml_floor(dml_float_t x, dml_float_t granularity)
|
||||
{
|
||||
if (granularity == 0)
|
||||
return 0;
|
||||
//return (dml_float_t) (floor(x / granularity) * granularity);
|
||||
return (dml_float_t)dcn_bw_floor2(x, granularity);
|
||||
}
|
||||
|
||||
dml_float_t dml_min(dml_float_t x, dml_float_t y)
|
||||
{
|
||||
if (x != x)
|
||||
return y;
|
||||
if (y != y)
|
||||
return x;
|
||||
if (x < y)
|
||||
return x;
|
||||
else
|
||||
return y;
|
||||
}
|
||||
|
||||
dml_float_t dml_min3(dml_float_t x, dml_float_t y, dml_float_t z)
|
||||
{
|
||||
return dml_min(dml_min(x, y), z);
|
||||
}
|
||||
|
||||
dml_float_t dml_min4(dml_float_t x, dml_float_t y, dml_float_t z, dml_float_t w)
|
||||
{
|
||||
return dml_min(dml_min(dml_min(x, y), z), w);
|
||||
}
|
||||
|
||||
dml_float_t dml_max(dml_float_t x, dml_float_t y)
|
||||
{
|
||||
if (x != x)
|
||||
return y;
|
||||
if (y != y)
|
||||
return x;
|
||||
if (x > y)
|
||||
return x;
|
||||
else
|
||||
return y;
|
||||
}
|
||||
dml_float_t dml_max3(dml_float_t x, dml_float_t y, dml_float_t z)
|
||||
{
|
||||
return dml_max(dml_max(x, y), z);
|
||||
}
|
||||
dml_float_t dml_max4(dml_float_t a, dml_float_t b, dml_float_t c, dml_float_t d)
|
||||
{
|
||||
return dml_max(dml_max(a, b), dml_max(c, d));
|
||||
}
|
||||
dml_float_t dml_max5(dml_float_t a, dml_float_t b, dml_float_t c, dml_float_t d, dml_float_t e)
|
||||
{
|
||||
return dml_max(dml_max4(a, b, c, d), e);
|
||||
}
|
||||
dml_float_t dml_log(dml_float_t x, dml_float_t base)
|
||||
{
|
||||
return (dml_float_t) (_log(x) / _log(base));
|
||||
}
|
||||
|
||||
dml_float_t dml_log2(dml_float_t x)
|
||||
{
|
||||
return (dml_float_t) (_log(x) / _log(2));
|
||||
}
|
||||
|
||||
dml_float_t dml_round(dml_float_t val, dml_bool_t bankers_rounding)
|
||||
{
|
||||
// if (bankers_rounding)
|
||||
// return (dml_float_t) lrint(val);
|
||||
// else {
|
||||
// return round(val);
|
||||
double round_pt = 0.5;
|
||||
double ceil = dml_ceil(val, 1);
|
||||
double floor = dml_floor(val, 1);
|
||||
|
||||
if (val - floor >= round_pt)
|
||||
return ceil;
|
||||
else
|
||||
return floor;
|
||||
// }
|
||||
}
|
||||
|
||||
dml_float_t dml_pow(dml_float_t base, int exp)
|
||||
{
|
||||
return (dml_float_t) dcn_bw_pow(base, exp);
|
||||
}
|
||||
|
||||
dml_uint_t dml_round_to_multiple(dml_uint_t num, dml_uint_t multiple, dml_bool_t up)
|
||||
{
|
||||
dml_uint_t remainder;
|
||||
|
||||
if (multiple == 0)
|
||||
return num;
|
||||
|
||||
remainder = num % multiple;
|
||||
if (remainder == 0)
|
||||
return num;
|
||||
|
||||
if (up)
|
||||
return (num + multiple - remainder);
|
||||
else
|
||||
return (num - remainder);
|
||||
}
|
||||
|
||||
void dml_print_data_rq_regs_st(const dml_display_plane_rq_regs_st *rq_regs)
|
||||
{
|
||||
dml_print("DML: ===================================== \n");
|
||||
dml_print("DML: DISPLAY_PLANE_RQ_REGS_ST\n");
|
||||
dml_print("DML: chunk_size = 0x%x\n", rq_regs->chunk_size);
|
||||
dml_print("DML: min_chunk_size = 0x%x\n", rq_regs->min_chunk_size);
|
||||
dml_print("DML: meta_chunk_size = 0x%x\n", rq_regs->meta_chunk_size);
|
||||
dml_print("DML: min_meta_chunk_size = 0x%x\n", rq_regs->min_meta_chunk_size);
|
||||
dml_print("DML: dpte_group_size = 0x%x\n", rq_regs->dpte_group_size);
|
||||
dml_print("DML: mpte_group_size = 0x%x\n", rq_regs->mpte_group_size);
|
||||
dml_print("DML: swath_height = 0x%x\n", rq_regs->swath_height);
|
||||
dml_print("DML: pte_row_height_linear = 0x%x\n", rq_regs->pte_row_height_linear);
|
||||
dml_print("DML: ===================================== \n");
|
||||
}
|
||||
|
||||
void dml_print_rq_regs_st(const dml_display_rq_regs_st *rq_regs)
|
||||
{
|
||||
dml_print("DML: ===================================== \n");
|
||||
dml_print("DML: DISPLAY_RQ_REGS_ST\n");
|
||||
dml_print("DML: <LUMA> \n");
|
||||
dml_print_data_rq_regs_st(&rq_regs->rq_regs_l);
|
||||
dml_print("DML: <CHROMA> \n");
|
||||
dml_print_data_rq_regs_st(&rq_regs->rq_regs_c);
|
||||
dml_print("DML: drq_expansion_mode = 0x%x\n", rq_regs->drq_expansion_mode);
|
||||
dml_print("DML: prq_expansion_mode = 0x%x\n", rq_regs->prq_expansion_mode);
|
||||
dml_print("DML: mrq_expansion_mode = 0x%x\n", rq_regs->mrq_expansion_mode);
|
||||
dml_print("DML: crq_expansion_mode = 0x%x\n", rq_regs->crq_expansion_mode);
|
||||
dml_print("DML: plane1_base_address = 0x%x\n", rq_regs->plane1_base_address);
|
||||
dml_print("DML: ===================================== \n");
|
||||
}
|
||||
|
||||
void dml_print_dlg_regs_st(const dml_display_dlg_regs_st *dlg_regs)
|
||||
{
|
||||
dml_print("DML: ===================================== \n");
|
||||
dml_print("DML: DISPLAY_DLG_REGS_ST \n");
|
||||
dml_print("DML: refcyc_h_blank_end = 0x%x\n", dlg_regs->refcyc_h_blank_end);
|
||||
dml_print("DML: dlg_vblank_end = 0x%x\n", dlg_regs->dlg_vblank_end);
|
||||
dml_print("DML: min_dst_y_next_start = 0x%x\n", dlg_regs->min_dst_y_next_start);
|
||||
dml_print("DML: refcyc_per_htotal = 0x%x\n", dlg_regs->refcyc_per_htotal);
|
||||
dml_print("DML: refcyc_x_after_scaler = 0x%x\n", dlg_regs->refcyc_x_after_scaler);
|
||||
dml_print("DML: dst_y_after_scaler = 0x%x\n", dlg_regs->dst_y_after_scaler);
|
||||
dml_print("DML: dst_y_prefetch = 0x%x\n", dlg_regs->dst_y_prefetch);
|
||||
dml_print("DML: dst_y_per_vm_vblank = 0x%x\n", dlg_regs->dst_y_per_vm_vblank);
|
||||
dml_print("DML: dst_y_per_row_vblank = 0x%x\n", dlg_regs->dst_y_per_row_vblank);
|
||||
dml_print("DML: dst_y_per_vm_flip = 0x%x\n", dlg_regs->dst_y_per_vm_flip);
|
||||
dml_print("DML: dst_y_per_row_flip = 0x%x\n", dlg_regs->dst_y_per_row_flip);
|
||||
dml_print("DML: ref_freq_to_pix_freq = 0x%x\n", dlg_regs->ref_freq_to_pix_freq);
|
||||
dml_print("DML: vratio_prefetch = 0x%x\n", dlg_regs->vratio_prefetch);
|
||||
dml_print("DML: vratio_prefetch_c = 0x%x\n", dlg_regs->vratio_prefetch_c);
|
||||
dml_print("DML: refcyc_per_pte_group_vblank_l = 0x%x\n", dlg_regs->refcyc_per_pte_group_vblank_l);
|
||||
dml_print("DML: refcyc_per_pte_group_vblank_c = 0x%x\n", dlg_regs->refcyc_per_pte_group_vblank_c);
|
||||
dml_print("DML: refcyc_per_meta_chunk_vblank_l = 0x%x\n", dlg_regs->refcyc_per_meta_chunk_vblank_l);
|
||||
dml_print("DML: refcyc_per_meta_chunk_vblank_c = 0x%x\n", dlg_regs->refcyc_per_meta_chunk_vblank_c);
|
||||
dml_print("DML: refcyc_per_pte_group_flip_l = 0x%x\n", dlg_regs->refcyc_per_pte_group_flip_l);
|
||||
dml_print("DML: refcyc_per_pte_group_flip_c = 0x%x\n", dlg_regs->refcyc_per_pte_group_flip_c);
|
||||
dml_print("DML: refcyc_per_meta_chunk_flip_l = 0x%x\n", dlg_regs->refcyc_per_meta_chunk_flip_l);
|
||||
dml_print("DML: refcyc_per_meta_chunk_flip_c = 0x%x\n", dlg_regs->refcyc_per_meta_chunk_flip_c);
|
||||
dml_print("DML: dst_y_per_pte_row_nom_l = 0x%x\n", dlg_regs->dst_y_per_pte_row_nom_l);
|
||||
dml_print("DML: dst_y_per_pte_row_nom_c = 0x%x\n", dlg_regs->dst_y_per_pte_row_nom_c);
|
||||
dml_print("DML: refcyc_per_pte_group_nom_l = 0x%x\n", dlg_regs->refcyc_per_pte_group_nom_l);
|
||||
dml_print("DML: refcyc_per_pte_group_nom_c = 0x%x\n", dlg_regs->refcyc_per_pte_group_nom_c);
|
||||
dml_print("DML: dst_y_per_meta_row_nom_l = 0x%x\n", dlg_regs->dst_y_per_meta_row_nom_l);
|
||||
dml_print("DML: dst_y_per_meta_row_nom_c = 0x%x\n", dlg_regs->dst_y_per_meta_row_nom_c);
|
||||
dml_print("DML: refcyc_per_meta_chunk_nom_l = 0x%x\n", dlg_regs->refcyc_per_meta_chunk_nom_l);
|
||||
dml_print("DML: refcyc_per_meta_chunk_nom_c = 0x%x\n", dlg_regs->refcyc_per_meta_chunk_nom_c);
|
||||
dml_print("DML: refcyc_per_line_delivery_pre_l = 0x%x\n", dlg_regs->refcyc_per_line_delivery_pre_l);
|
||||
dml_print("DML: refcyc_per_line_delivery_pre_c = 0x%x\n", dlg_regs->refcyc_per_line_delivery_pre_c);
|
||||
dml_print("DML: refcyc_per_line_delivery_l = 0x%x\n", dlg_regs->refcyc_per_line_delivery_l);
|
||||
dml_print("DML: refcyc_per_line_delivery_c = 0x%x\n", dlg_regs->refcyc_per_line_delivery_c);
|
||||
dml_print("DML: refcyc_per_vm_group_vblank = 0x%x\n", dlg_regs->refcyc_per_vm_group_vblank);
|
||||
dml_print("DML: refcyc_per_vm_group_flip = 0x%x\n", dlg_regs->refcyc_per_vm_group_flip);
|
||||
dml_print("DML: refcyc_per_vm_req_vblank = 0x%x\n", dlg_regs->refcyc_per_vm_req_vblank);
|
||||
dml_print("DML: refcyc_per_vm_req_flip = 0x%x\n", dlg_regs->refcyc_per_vm_req_flip);
|
||||
dml_print("DML: chunk_hdl_adjust_cur0 = 0x%x\n", dlg_regs->chunk_hdl_adjust_cur0);
|
||||
dml_print("DML: dst_y_offset_cur1 = 0x%x\n", dlg_regs->dst_y_offset_cur1);
|
||||
dml_print("DML: chunk_hdl_adjust_cur1 = 0x%x\n", dlg_regs->chunk_hdl_adjust_cur1);
|
||||
dml_print("DML: vready_after_vcount0 = 0x%x\n", dlg_regs->vready_after_vcount0);
|
||||
dml_print("DML: dst_y_delta_drq_limit = 0x%x\n", dlg_regs->dst_y_delta_drq_limit);
|
||||
dml_print("DML: refcyc_per_vm_dmdata = 0x%x\n", dlg_regs->refcyc_per_vm_dmdata);
|
||||
dml_print("DML: ===================================== \n");
|
||||
}
|
||||
|
||||
void dml_print_ttu_regs_st(const dml_display_ttu_regs_st *ttu_regs)
|
||||
{
|
||||
dml_print("DML: ===================================== \n");
|
||||
dml_print("DML: DISPLAY_TTU_REGS_ST \n");
|
||||
dml_print("DML: qos_level_low_wm = 0x%x\n", ttu_regs->qos_level_low_wm);
|
||||
dml_print("DML: qos_level_high_wm = 0x%x\n", ttu_regs->qos_level_high_wm);
|
||||
dml_print("DML: min_ttu_vblank = 0x%x\n", ttu_regs->min_ttu_vblank);
|
||||
dml_print("DML: qos_level_flip = 0x%x\n", ttu_regs->qos_level_flip);
|
||||
dml_print("DML: refcyc_per_req_delivery_pre_l = 0x%x\n", ttu_regs->refcyc_per_req_delivery_pre_l);
|
||||
dml_print("DML: refcyc_per_req_delivery_l = 0x%x\n", ttu_regs->refcyc_per_req_delivery_l);
|
||||
dml_print("DML: refcyc_per_req_delivery_pre_c = 0x%x\n", ttu_regs->refcyc_per_req_delivery_pre_c);
|
||||
dml_print("DML: refcyc_per_req_delivery_c = 0x%x\n", ttu_regs->refcyc_per_req_delivery_c);
|
||||
dml_print("DML: refcyc_per_req_delivery_cur0 = 0x%x\n", ttu_regs->refcyc_per_req_delivery_cur0);
|
||||
dml_print("DML: refcyc_per_req_delivery_pre_cur0 = 0x%x\n", ttu_regs->refcyc_per_req_delivery_pre_cur0);
|
||||
dml_print("DML: refcyc_per_req_delivery_cur1 = 0x%x\n", ttu_regs->refcyc_per_req_delivery_cur1);
|
||||
dml_print("DML: refcyc_per_req_delivery_pre_cur1 = 0x%x\n", ttu_regs->refcyc_per_req_delivery_pre_cur1);
|
||||
dml_print("DML: qos_level_fixed_l = 0x%x\n", ttu_regs->qos_level_fixed_l);
|
||||
dml_print("DML: qos_ramp_disable_l = 0x%x\n", ttu_regs->qos_ramp_disable_l);
|
||||
dml_print("DML: qos_level_fixed_c = 0x%x\n", ttu_regs->qos_level_fixed_c);
|
||||
dml_print("DML: qos_ramp_disable_c = 0x%x\n", ttu_regs->qos_ramp_disable_c);
|
||||
dml_print("DML: qos_level_fixed_cur0 = 0x%x\n", ttu_regs->qos_level_fixed_cur0);
|
||||
dml_print("DML: qos_ramp_disable_cur0 = 0x%x\n", ttu_regs->qos_ramp_disable_cur0);
|
||||
dml_print("DML: qos_level_fixed_cur1 = 0x%x\n", ttu_regs->qos_level_fixed_cur1);
|
||||
dml_print("DML: qos_ramp_disable_cur1 = 0x%x\n", ttu_regs->qos_ramp_disable_cur1);
|
||||
dml_print("DML: ===================================== \n");
|
||||
}
|
||||
|
||||
void dml_print_dml_policy(const struct dml_mode_eval_policy_st *policy)
|
||||
{
|
||||
dml_print("DML: ===================================== \n");
|
||||
dml_print("DML: DML_MODE_EVAL_POLICY_ST\n");
|
||||
dml_print("DML: Policy: UseUnboundedRequesting = 0x%x\n", policy->UseUnboundedRequesting);
|
||||
dml_print("DML: Policy: UseMinimumRequiredDCFCLK = 0x%x\n", policy->UseMinimumRequiredDCFCLK);
|
||||
dml_print("DML: Policy: DRAMClockChangeRequirementFinal = 0x%x\n", policy->DRAMClockChangeRequirementFinal);
|
||||
dml_print("DML: Policy: FCLKChangeRequirementFinal = 0x%x\n", policy->FCLKChangeRequirementFinal);
|
||||
dml_print("DML: Policy: USRRetrainingRequiredFinal = 0x%x\n", policy->USRRetrainingRequiredFinal);
|
||||
dml_print("DML: Policy: EnhancedPrefetchScheduleAccelerationFinal = 0x%x\n", policy->EnhancedPrefetchScheduleAccelerationFinal);
|
||||
dml_print("DML: Policy: NomDETInKByteOverrideEnable = 0x%x\n", policy->NomDETInKByteOverrideEnable);
|
||||
dml_print("DML: Policy: NomDETInKByteOverrideValue = 0x%x\n", policy->NomDETInKByteOverrideValue);
|
||||
dml_print("DML: Policy: DCCProgrammingAssumesScanDirectionUnknownFinal = 0x%x\n", policy->DCCProgrammingAssumesScanDirectionUnknownFinal);
|
||||
dml_print("DML: Policy: SynchronizeTimingsFinal = 0x%x\n", policy->SynchronizeTimingsFinal);
|
||||
dml_print("DML: Policy: SynchronizeDRRDisplaysForUCLKPStateChangeFinal = 0x%x\n", policy->SynchronizeDRRDisplaysForUCLKPStateChangeFinal);
|
||||
dml_print("DML: Policy: AssumeModeSupportAtMaxPwrStateEvenDRAMClockChangeNotSupported = 0x%x\n", policy->AssumeModeSupportAtMaxPwrStateEvenDRAMClockChangeNotSupported);
|
||||
dml_print("DML: Policy: AssumeModeSupportAtMaxPwrStateEvenFClockChangeNotSupported = 0x%x\n", policy->AssumeModeSupportAtMaxPwrStateEvenFClockChangeNotSupported);
|
||||
|
||||
for (dml_uint_t i = 0; i < DCN_DML__NUM_PLANE; i++) {
|
||||
dml_print("DML: i=%0d, Policy: MPCCombineUse = 0x%x\n", i, policy->MPCCombineUse[i]);
|
||||
dml_print("DML: i=%0d, Policy: ODMUse = 0x%x\n", i, policy->ODMUse[i]);
|
||||
dml_print("DML: i=%0d, Policy: ImmediateFlipRequirement = 0x%x\n", i, policy->ImmediateFlipRequirement[i]);
|
||||
dml_print("DML: i=%0d, Policy: AllowForPStateChangeOrStutterInVBlank = 0x%x\n", i, policy->AllowForPStateChangeOrStutterInVBlank[i]);
|
||||
}
|
||||
dml_print("DML: ===================================== \n");
|
||||
}
|
||||
|
||||
void dml_print_mode_support(struct display_mode_lib_st *mode_lib, dml_uint_t j)
|
||||
{
|
||||
dml_print("DML: MODE SUPPORT: ===============================================\n");
|
||||
dml_print("DML: MODE SUPPORT: Voltage State %d\n", j);
|
||||
dml_print("DML: MODE SUPPORT: Mode Supported : %s\n", mode_lib->ms.support.ModeSupport[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Scale Ratio And Taps : %s\n", mode_lib->ms.support.ScaleRatioAndTapsSupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Source Format Pixel And Scan : %s\n", mode_lib->ms.support.SourceFormatPixelAndScanSupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Viewport Size : %s\n", mode_lib->ms.support.ViewportSizeSupport[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Link Rate Does Not Match DP Version : %s\n", mode_lib->ms.support.LinkRateDoesNotMatchDPVersion == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Link Rate For Multistream Not Indicated : %s\n", mode_lib->ms.support.LinkRateForMultistreamNotIndicated == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: BPP For Multi stream Not Indicated : %s\n", mode_lib->ms.support.BPPForMultistreamNotIndicated == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Multistream With HDMI Or eDP : %s\n", mode_lib->ms.support.MultistreamWithHDMIOreDP == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Exceeded Multistream Slots : %s\n", mode_lib->ms.support.ExceededMultistreamSlots == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: MSO Or ODM Split With Non DP Link : %s\n", mode_lib->ms.support.MSOOrODMSplitWithNonDPLink == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Not Enough Lanes For MSO : %s\n", mode_lib->ms.support.NotEnoughLanesForMSO == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: LinkCapacitySupport : %s\n", mode_lib->ms.support.LinkCapacitySupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: P2IWith420 : %s\n", mode_lib->ms.support.P2IWith420 == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: DSCOnlyIfNecessaryWithBPP : %s\n", mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: DSC422NativeNotSupported : %s\n", mode_lib->ms.support.DSC422NativeNotSupported == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: MPCCombineMethodIncompatible : %s\n", mode_lib->ms.support.MPCCombineMethodIncompatible == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: ODMCombineTwoToOneSupportCheckOK : %s\n", mode_lib->ms.support.ODMCombineTwoToOneSupportCheckOK == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: ODMCombineFourToOneSupportCheckOK : %s\n", mode_lib->ms.support.ODMCombineFourToOneSupportCheckOK == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: NotEnoughDSCUnits : %s\n", mode_lib->ms.support.NotEnoughDSCUnits == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: NotEnoughDSCSlices : %s\n", mode_lib->ms.support.NotEnoughDSCSlices == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe : %s\n", mode_lib->ms.support.ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: InvalidCombinationOfMALLUseForPStateAndStaticScreen : %s\n", mode_lib->ms.support.InvalidCombinationOfMALLUseForPStateAndStaticScreen == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: DSCCLKRequiredMoreThanSupported : %s\n", mode_lib->ms.support.DSCCLKRequiredMoreThanSupported == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: PixelsPerLinePerDSCUnitSupport : %s\n", mode_lib->ms.support.PixelsPerLinePerDSCUnitSupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: DTBCLKRequiredMoreThanSupported : %s\n", mode_lib->ms.support.DTBCLKRequiredMoreThanSupported == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: InvalidCombinationOfMALLUseForPState : %s\n", mode_lib->ms.support.InvalidCombinationOfMALLUseForPState == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified : %s\n", mode_lib->ms.support.ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: ROB Support : %s\n", mode_lib->ms.support.ROBSupport[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: DISPCLK DPPCLK Support : %s\n", mode_lib->ms.support.DISPCLK_DPPCLK_Support[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Total Available Pipes Support : %s\n", mode_lib->ms.support.TotalAvailablePipesSupport[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Number Of OTG Support : %s\n", mode_lib->ms.support.NumberOfOTGSupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Number Of DP2p0 Support : %s\n", mode_lib->ms.support.NumberOfDP2p0Support == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Writeback Latency Support : %s\n", mode_lib->ms.support.WritebackLatencySupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Writeback Scale Ratio And Taps Support : %s\n", mode_lib->ms.support.WritebackScaleRatioAndTapsSupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Cursor Support : %s\n", mode_lib->ms.support.CursorSupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Pitch Support : %s\n", mode_lib->ms.support.PitchSupport == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Viewport Exceeds Surface : %s\n", mode_lib->ms.support.ViewportExceedsSurface == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Prefetch Supported : %s\n", mode_lib->ms.support.PrefetchSupported[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: VActive Bandwith Support : %s\n", mode_lib->ms.support.VActiveBandwithSupport[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Dynamic Metadata Supported : %s\n", mode_lib->ms.support.DynamicMetadataSupported[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Total Vertical Active Bandwidth Support : %s\n", mode_lib->ms.support.TotalVerticalActiveBandwidthSupport[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: VRatio In Prefetch Supported : %s\n", mode_lib->ms.support.VRatioInPrefetchSupported[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: PTE Buffer Size Not Exceeded : %s\n", mode_lib->ms.support.PTEBufferSizeNotExceeded[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: DCC Meta Buffer Size Not Exceeded : %s\n", mode_lib->ms.support.DCCMetaBufferSizeNotExceeded[j] == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Non supported DSC Input BPC : %s\n", mode_lib->ms.support.NonsupportedDSCInputBPC == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Exceeded MALL Size : %s\n", mode_lib->ms.support.ExceededMALLSize == false ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: Host VM or Immediate Flip Supported : %s\n", ((mode_lib->ms.cache_display_cfg.plane.HostVMEnable == false && !mode_lib->scratch.dml_core_mode_support_locals.ImmediateFlipRequiredFinal) || mode_lib->ms.support.ImmediateFlipSupportedForState[j]) ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: dram clock change support : %s\n", mode_lib->scratch.dml_core_mode_support_locals.dram_clock_change_support == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: f_clock change support : %s\n", mode_lib->scratch.dml_core_mode_support_locals.f_clock_change_support == true ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: USR Retraining Support : %s\n", (!mode_lib->ms.policy.USRRetrainingRequiredFinal || &mode_lib->ms.support.USRRetrainingSupport[j]) ? "Supported" : "NOT Supported");
|
||||
dml_print("DML: MODE SUPPORT: ===============================================\n");
|
||||
}
|
||||
|
||||
void dml_print_dml_mode_support_info(const struct dml_mode_support_info_st *support, dml_bool_t fail_only)
|
||||
{
|
||||
dml_print("DML: ===================================== \n");
|
||||
dml_print("DML: DML_MODE_SUPPORT_INFO_ST\n");
|
||||
if (!fail_only || support->ModeIsSupported == 0)
|
||||
dml_print("DML: support: ModeIsSupported = 0x%x\n", support->ModeIsSupported);
|
||||
if (!fail_only || support->ImmediateFlipSupport == 0)
|
||||
dml_print("DML: support: ImmediateFlipSupport = 0x%x\n", support->ImmediateFlipSupport);
|
||||
if (!fail_only || support->WritebackLatencySupport == 0)
|
||||
dml_print("DML: support: WritebackLatencySupport = 0x%x\n", support->WritebackLatencySupport);
|
||||
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
|
||||
dml_print("DML: support: ScaleRatioAndTapsSupport = 0x%x\n", support->ScaleRatioAndTapsSupport);
|
||||
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
|
||||
dml_print("DML: support: SourceFormatPixelAndScanSupport = 0x%x\n", support->SourceFormatPixelAndScanSupport);
|
||||
if (!fail_only || support->MPCCombineMethodIncompatible == 1)
|
||||
dml_print("DML: support: MPCCombineMethodIncompatible = 0x%x\n", support->MPCCombineMethodIncompatible);
|
||||
if (!fail_only || support->P2IWith420 == 1)
|
||||
dml_print("DML: support: P2IWith420 = 0x%x\n", support->P2IWith420);
|
||||
if (!fail_only || support->DSCOnlyIfNecessaryWithBPP == 1)
|
||||
dml_print("DML: support: DSCOnlyIfNecessaryWithBPP = 0x%x\n", support->DSCOnlyIfNecessaryWithBPP);
|
||||
if (!fail_only || support->DSC422NativeNotSupported == 1)
|
||||
dml_print("DML: support: DSC422NativeNotSupported = 0x%x\n", support->DSC422NativeNotSupported);
|
||||
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
|
||||
dml_print("DML: support: LinkRateDoesNotMatchDPVersion = 0x%x\n", support->LinkRateDoesNotMatchDPVersion);
|
||||
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
|
||||
dml_print("DML: support: LinkRateForMultistreamNotIndicated = 0x%x\n", support->LinkRateForMultistreamNotIndicated);
|
||||
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
|
||||
dml_print("DML: support: BPPForMultistreamNotIndicated = 0x%x\n", support->BPPForMultistreamNotIndicated);
|
||||
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
|
||||
dml_print("DML: support: MultistreamWithHDMIOreDP = 0x%x\n", support->MultistreamWithHDMIOreDP);
|
||||
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
|
||||
dml_print("DML: support: MSOOrODMSplitWithNonDPLink = 0x%x\n", support->MSOOrODMSplitWithNonDPLink);
|
||||
if (!fail_only || support->NotEnoughLanesForMSO == 1)
|
||||
dml_print("DML: support: NotEnoughLanesForMSO = 0x%x\n", support->NotEnoughLanesForMSO);
|
||||
if (!fail_only || support->NumberOfOTGSupport == 0)
|
||||
dml_print("DML: support: NumberOfOTGSupport = 0x%x\n", support->NumberOfOTGSupport);
|
||||
if (!fail_only || support->NumberOfDP2p0Support == 0)
|
||||
dml_print("DML: support: NumberOfDP2p0Support = 0x%x\n", support->NumberOfDP2p0Support);
|
||||
if (!fail_only || support->NonsupportedDSCInputBPC == 1)
|
||||
dml_print("DML: support: NonsupportedDSCInputBPC = 0x%x\n", support->NonsupportedDSCInputBPC);
|
||||
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
|
||||
dml_print("DML: support: WritebackScaleRatioAndTapsSupport = 0x%x\n", support->WritebackScaleRatioAndTapsSupport);
|
||||
if (!fail_only || support->CursorSupport == 0)
|
||||
dml_print("DML: support: CursorSupport = 0x%x\n", support->CursorSupport);
|
||||
if (!fail_only || support->PitchSupport == 0)
|
||||
dml_print("DML: support: PitchSupport = 0x%x\n", support->PitchSupport);
|
||||
if (!fail_only || support->ViewportExceedsSurface == 1)
|
||||
dml_print("DML: support: ViewportExceedsSurface = 0x%x\n", support->ViewportExceedsSurface);
|
||||
if (!fail_only || support->ExceededMALLSize == 1)
|
||||
dml_print("DML: support: ExceededMALLSize = 0x%x\n", support->ExceededMALLSize);
|
||||
if (!fail_only || support->EnoughWritebackUnits == 0)
|
||||
dml_print("DML: support: EnoughWritebackUnits = 0x%x\n", support->EnoughWritebackUnits);
|
||||
if (!fail_only || support->ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified == 1)
|
||||
dml_print("DML: support: ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified = 0x%x\n", support->ImmediateFlipRequiredButTheRequirementForEachSurfaceIsNotSpecified);
|
||||
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
|
||||
dml_print("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = 0x%x\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
|
||||
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
|
||||
dml_print("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = 0x%x\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
|
||||
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
|
||||
dml_print("DML: support: InvalidCombinationOfMALLUseForPState = 0x%x\n", support->InvalidCombinationOfMALLUseForPState);
|
||||
|
||||
if (!fail_only || support->ExceededMultistreamSlots == 1)
|
||||
dml_print("DML: support: ExceededMultistreamSlots = 0x%x\n", support->ExceededMultistreamSlots);
|
||||
if (!fail_only || support->ODMCombineTwoToOneSupportCheckOK == 0)
|
||||
dml_print("DML: support: ODMCombineTwoToOneSupportCheckOK = 0x%x\n", support->ODMCombineTwoToOneSupportCheckOK);
|
||||
if (!fail_only || support->ODMCombineFourToOneSupportCheckOK == 0)
|
||||
dml_print("DML: support: ODMCombineFourToOneSupportCheckOK = 0x%x\n", support->ODMCombineFourToOneSupportCheckOK);
|
||||
if (!fail_only || support->NotEnoughDSCUnits == 1)
|
||||
dml_print("DML: support: NotEnoughDSCUnits = 0x%x\n", support->NotEnoughDSCUnits);
|
||||
if (!fail_only || support->NotEnoughDSCSlices == 1)
|
||||
dml_print("DML: support: NotEnoughDSCSlices = 0x%x\n", support->NotEnoughDSCSlices);
|
||||
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
|
||||
dml_print("DML: support: PixelsPerLinePerDSCUnitSupport = 0x%x\n", support->PixelsPerLinePerDSCUnitSupport);
|
||||
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
|
||||
dml_print("DML: support: DSCCLKRequiredMoreThanSupported = 0x%x\n", support->DSCCLKRequiredMoreThanSupported);
|
||||
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
|
||||
dml_print("DML: support: DTBCLKRequiredMoreThanSupported = 0x%x\n", support->DTBCLKRequiredMoreThanSupported);
|
||||
if (!fail_only || support->LinkCapacitySupport == 0)
|
||||
dml_print("DML: support: LinkCapacitySupport = 0x%x\n", support->LinkCapacitySupport);
|
||||
|
||||
for (dml_uint_t j = 0; j < 2; j++) {
|
||||
if (!fail_only || support->DRAMClockChangeSupport[j] == dml_dram_clock_change_unsupported)
|
||||
dml_print("DML: support: combine=%d, DRAMClockChangeSupport = %d\n", j, support->DRAMClockChangeSupport[j]);
|
||||
if (!fail_only || support->FCLKChangeSupport[j] == dml_fclock_change_unsupported)
|
||||
dml_print("DML: support: combine=%d, FCLKChangeSupport = %d\n", j, support->FCLKChangeSupport[j]);
|
||||
if (!fail_only || support->ROBSupport[j] == 0)
|
||||
dml_print("DML: support: combine=%d, ROBSupport = %d\n", j, support->ROBSupport[j]);
|
||||
if (!fail_only || support->PTEBufferSizeNotExceeded[j] == 0)
|
||||
dml_print("DML: support: combine=%d, PTEBufferSizeNotExceeded = %d\n", j, support->PTEBufferSizeNotExceeded[j]);
|
||||
if (!fail_only || support->DCCMetaBufferSizeNotExceeded[j] == 0)
|
||||
dml_print("DML: support: combine=%d, DCCMetaBufferSizeNotExceeded = %d\n", j, support->DCCMetaBufferSizeNotExceeded[j]);
|
||||
if (!fail_only || support->TotalVerticalActiveBandwidthSupport[j] == 0)
|
||||
dml_print("DML: support: combine=%d, TotalVerticalActiveBandwidthSupport = %d\n", j, support->TotalVerticalActiveBandwidthSupport[j]);
|
||||
if (!fail_only || support->USRRetrainingSupport[j] == 0)
|
||||
dml_print("DML: support: combine=%d, USRRetrainingSupport = %d\n", j, support->USRRetrainingSupport[j]);
|
||||
if (!fail_only || support->VActiveBandwithSupport[j] == 0)
|
||||
dml_print("DML: support: combine=%d, VActiveBandwithSupport = %d\n", j, support->VActiveBandwithSupport[j]);
|
||||
if (!fail_only || support->PrefetchSupported[j] == 0)
|
||||
dml_print("DML: support: combine=%d, PrefetchSupported = %d\n", j, support->PrefetchSupported[j]);
|
||||
if (!fail_only || support->DynamicMetadataSupported[j] == 0)
|
||||
dml_print("DML: support: combine=%d, DynamicMetadataSupported = %d\n", j, support->DynamicMetadataSupported[j]);
|
||||
if (!fail_only || support->VRatioInPrefetchSupported[j] == 0)
|
||||
dml_print("DML: support: combine=%d, VRatioInPrefetchSupported = %d\n", j, support->VRatioInPrefetchSupported[j]);
|
||||
if (!fail_only || support->DISPCLK_DPPCLK_Support[j] == 0)
|
||||
dml_print("DML: support: combine=%d, DISPCLK_DPPCLK_Support = %d\n", j, support->DISPCLK_DPPCLK_Support[j]);
|
||||
if (!fail_only || support->TotalAvailablePipesSupport[j] == 0)
|
||||
dml_print("DML: support: combine=%d, TotalAvailablePipesSupport = %d\n", j, support->TotalAvailablePipesSupport[j]);
|
||||
if (!fail_only || support->ModeSupport[j] == 0)
|
||||
dml_print("DML: support: combine=%d, ModeSupport = %d\n", j, support->ModeSupport[j]);
|
||||
if (!fail_only || support->ViewportSizeSupport[j] == 0)
|
||||
dml_print("DML: support: combine=%d, ViewportSizeSupport = %d\n", j, support->ViewportSizeSupport[j]);
|
||||
if (!fail_only || support->ImmediateFlipSupportedForState[j] == 0)
|
||||
dml_print("DML: support: combine=%d, ImmediateFlipSupportedForState = %d\n", j, support->ImmediateFlipSupportedForState[j]);
|
||||
}
|
||||
}
|
||||
|
||||
void dml_print_dml_display_cfg_timing(const struct dml_timing_cfg_st *timing, dml_uint_t num_plane)
|
||||
{
|
||||
for (dml_uint_t i = 0; i < num_plane; i++) {
|
||||
dml_print("DML: timing_cfg: plane=%d, HTotal = %d\n", i, timing->HTotal[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, VTotal = %d\n", i, timing->VTotal[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, HActive = %d\n", i, timing->HActive[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, VActive = %d\n", i, timing->VActive[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, VFrontPorch = %d\n", i, timing->VFrontPorch[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, VBlankNom = %d\n", i, timing->VBlankNom[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, RefreshRate = %d\n", i, timing->RefreshRate[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, PixelClock = %f\n", i, timing->PixelClock[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, Interlace = %d\n", i, timing->Interlace[i]);
|
||||
dml_print("DML: timing_cfg: plane=%d, DRRDisplay = %d\n", i, timing->DRRDisplay[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void dml_print_dml_display_cfg_plane(const struct dml_plane_cfg_st *plane, dml_uint_t num_plane)
|
||||
{
|
||||
dml_print("DML: plane_cfg: num_plane = %d\n", num_plane);
|
||||
dml_print("DML: plane_cfg: GPUVMEnable = %d\n", plane->GPUVMEnable);
|
||||
dml_print("DML: plane_cfg: HostVMEnable = %d\n", plane->HostVMEnable);
|
||||
dml_print("DML: plane_cfg: GPUVMMaxPageTableLevels = %d\n", plane->GPUVMMaxPageTableLevels);
|
||||
dml_print("DML: plane_cfg: HostVMMaxPageTableLevels = %d\n", plane->HostVMMaxPageTableLevels);
|
||||
|
||||
for (dml_uint_t i = 0; i < num_plane; i++) {
|
||||
dml_print("DML: plane_cfg: plane=%d, GPUVMMinPageSizeKBytes = %d\n", i, plane->GPUVMMinPageSizeKBytes[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ForceOneRowForFrame = %d\n", i, plane->ForceOneRowForFrame[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, PTEBufferModeOverrideEn = %d\n", i, plane->PTEBufferModeOverrideEn[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, PTEBufferMode = %d\n", i, plane->PTEBufferMode[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, DETSizeOverride = %d\n", i, plane->DETSizeOverride[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, UseMALLForStaticScreen = %d\n", i, plane->UseMALLForStaticScreen[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, UseMALLForPStateChange = %d\n", i, plane->UseMALLForPStateChange[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, BlendingAndTiming = %d\n", i, plane->BlendingAndTiming[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportWidth = %d\n", i, plane->ViewportWidth[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportHeight = %d\n", i, plane->ViewportHeight[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportWidthChroma = %d\n", i, plane->ViewportWidthChroma[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportHeightChroma = %d\n", i, plane->ViewportHeightChroma[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportXStart = %d\n", i, plane->ViewportXStart[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportXStartC = %d\n", i, plane->ViewportXStartC[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportYStart = %d\n", i, plane->ViewportYStart[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportYStartC = %d\n", i, plane->ViewportYStartC[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ViewportStationary = %d\n", i, plane->ViewportStationary[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ScalerEnabled = %d\n", i, plane->ScalerEnabled[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, HRatio = %3.2f\n", i, plane->HRatio[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, VRatio = %3.2f\n", i, plane->VRatio[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, HRatioChroma = %3.2f\n", i, plane->HRatioChroma[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, VRatioChroma = %3.2f\n", i, plane->VRatioChroma[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, HTaps = %d\n", i, plane->HTaps[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, VTaps = %d\n", i, plane->VTaps[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, HTapsChroma = %d\n", i, plane->HTapsChroma[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, VTapsChroma = %d\n", i, plane->VTapsChroma[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, LBBitPerPixel = %d\n", i, plane->LBBitPerPixel[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, SourceScan = %d\n", i, plane->SourceScan[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, ScalerRecoutWidth = %d\n", i, plane->ScalerRecoutWidth[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, NumberOfCursors = %d\n", i, plane->NumberOfCursors[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, CursorWidth = %d\n", i, plane->CursorWidth[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, CursorBPP = %d\n", i, plane->CursorBPP[i]);
|
||||
|
||||
dml_print("DML: plane_cfg: plane=%d, DynamicMetadataEnable = %d\n", i, plane->DynamicMetadataEnable[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, DynamicMetadataLinesBeforeActiveRequired = %d\n", i, plane->DynamicMetadataLinesBeforeActiveRequired[i]);
|
||||
dml_print("DML: plane_cfg: plane=%d, DynamicMetadataTransmittedBytes = %d\n", i, plane->DynamicMetadataTransmittedBytes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void dml_print_dml_display_cfg_surface(const struct dml_surface_cfg_st *surface, dml_uint_t num_plane)
|
||||
{
|
||||
for (dml_uint_t i = 0; i < num_plane; i++) {
|
||||
dml_print("DML: surface_cfg: plane=%d, PitchY = %d\n", i, surface->PitchY[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, SurfaceWidthY = %d\n", i, surface->SurfaceWidthY[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, SurfaceHeightY = %d\n", i, surface->SurfaceHeightY[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, PitchC = %d\n", i, surface->PitchC[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, SurfaceWidthC = %d\n", i, surface->SurfaceWidthC[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, SurfaceHeightC = %d\n", i, surface->SurfaceHeightC[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, DCCEnable = %d\n", i, surface->DCCEnable[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, DCCMetaPitchY = %d\n", i, surface->DCCMetaPitchY[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, DCCMetaPitchC = %d\n", i, surface->DCCMetaPitchC[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, DCCRateLuma = %f\n", i, surface->DCCRateLuma[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, DCCRateChroma = %f\n", i, surface->DCCRateChroma[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, DCCFractionOfZeroSizeRequestsLuma = %f\n", i, surface->DCCFractionOfZeroSizeRequestsLuma[i]);
|
||||
dml_print("DML: surface_cfg: plane=%d, DCCFractionOfZeroSizeRequestsChroma= %f\n", i, surface->DCCFractionOfZeroSizeRequestsChroma[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void dml_print_dml_display_cfg_hw_resource(const struct dml_hw_resource_st *hw, dml_uint_t num_plane)
|
||||
{
|
||||
for (dml_uint_t i = 0; i < num_plane; i++) {
|
||||
dml_print("DML: hw_resource: plane=%d, ODMMode = %d\n", i, hw->ODMMode[i]);
|
||||
dml_print("DML: hw_resource: plane=%d, DPPPerSurface = %d\n", i, hw->DPPPerSurface[i]);
|
||||
dml_print("DML: hw_resource: plane=%d, DSCEnabled = %d\n", i, hw->DSCEnabled[i]);
|
||||
dml_print("DML: hw_resource: plane=%d, NumberOfDSCSlices = %d\n", i, hw->NumberOfDSCSlices[i]);
|
||||
}
|
||||
dml_print("DML: hw_resource: DLGRefClkFreqMHz = %f\n", hw->DLGRefClkFreqMHz);
|
||||
}
|
||||
|
||||
__DML_DLL_EXPORT__ void dml_print_soc_state_bounding_box(const struct soc_state_bounding_box_st *state)
|
||||
{
|
||||
dml_print("DML: state_bbox: socclk_mhz = %f\n", state->socclk_mhz);
|
||||
dml_print("DML: state_bbox: dscclk_mhz = %f\n", state->dscclk_mhz);
|
||||
dml_print("DML: state_bbox: phyclk_mhz = %f\n", state->phyclk_mhz);
|
||||
dml_print("DML: state_bbox: phyclk_d18_mhz = %f\n", state->phyclk_d18_mhz);
|
||||
dml_print("DML: state_bbox: phyclk_d32_mhz = %f\n", state->phyclk_d32_mhz);
|
||||
dml_print("DML: state_bbox: dtbclk_mhz = %f\n", state->dtbclk_mhz);
|
||||
dml_print("DML: state_bbox: dispclk_mhz = %f\n", state->dispclk_mhz);
|
||||
dml_print("DML: state_bbox: dppclk_mhz = %f\n", state->dppclk_mhz);
|
||||
dml_print("DML: state_bbox: fabricclk_mhz = %f\n", state->fabricclk_mhz);
|
||||
dml_print("DML: state_bbox: dcfclk_mhz = %f\n", state->dcfclk_mhz);
|
||||
dml_print("DML: state_bbox: dram_speed_mts = %f\n", state->dram_speed_mts);
|
||||
dml_print("DML: state_bbox: urgent_latency_pixel_data_only_us = %f\n", state->urgent_latency_pixel_data_only_us);
|
||||
dml_print("DML: state_bbox: urgent_latency_pixel_mixed_with_vm_data_us = %f\n", state->urgent_latency_pixel_mixed_with_vm_data_us);
|
||||
dml_print("DML: state_bbox: urgent_latency_vm_data_only_us = %f\n", state->urgent_latency_vm_data_only_us);
|
||||
dml_print("DML: state_bbox: writeback_latency_us = %f\n", state->writeback_latency_us);
|
||||
dml_print("DML: state_bbox: urgent_latency_adjustment_fabric_clock_component_us = %f\n", state->urgent_latency_adjustment_fabric_clock_component_us);
|
||||
dml_print("DML: state_bbox: urgent_latency_adjustment_fabric_clock_reference_mhz= %f\n", state->urgent_latency_adjustment_fabric_clock_reference_mhz);
|
||||
dml_print("DML: state_bbox: sr_exit_time_us = %f\n", state->sr_exit_time_us);
|
||||
dml_print("DML: state_bbox: sr_enter_plus_exit_time_us = %f\n", state->sr_enter_plus_exit_time_us);
|
||||
dml_print("DML: state_bbox: sr_exit_z8_time_us = %f\n", state->sr_exit_z8_time_us);
|
||||
dml_print("DML: state_bbox: sr_enter_plus_exit_z8_time_us = %f\n", state->sr_enter_plus_exit_z8_time_us);
|
||||
dml_print("DML: state_bbox: dram_clock_change_latency_us = %f\n", state->dram_clock_change_latency_us);
|
||||
dml_print("DML: state_bbox: fclk_change_latency_us = %f\n", state->fclk_change_latency_us);
|
||||
dml_print("DML: state_bbox: usr_retraining_latency_us = %f\n", state->usr_retraining_latency_us);
|
||||
dml_print("DML: state_bbox: use_ideal_dram_bw_strobe = %d\n", state->use_ideal_dram_bw_strobe);
|
||||
}
|
||||
|
||||
__DML_DLL_EXPORT__ void dml_print_soc_bounding_box(const struct soc_bounding_box_st *soc)
|
||||
{
|
||||
dml_print("DML: soc_bbox: dprefclk_mhz = %f\n", soc->dprefclk_mhz);
|
||||
dml_print("DML: soc_bbox: xtalclk_mhz = %f\n", soc->xtalclk_mhz);
|
||||
dml_print("DML: soc_bbox: pcierefclk_mhz = %f\n", soc->pcierefclk_mhz);
|
||||
dml_print("DML: soc_bbox: refclk_mhz = %f\n", soc->refclk_mhz);
|
||||
dml_print("DML: soc_bbox: amclk_mhz = %f\n", soc->amclk_mhz);
|
||||
|
||||
dml_print("DML: soc_bbox: max_outstanding_reqs = %f\n", soc->max_outstanding_reqs);
|
||||
dml_print("DML: soc_bbox: pct_ideal_sdp_bw_after_urgent = %f\n", soc->pct_ideal_sdp_bw_after_urgent);
|
||||
dml_print("DML: soc_bbox: pct_ideal_fabric_bw_after_urgent = %f\n", soc->pct_ideal_fabric_bw_after_urgent);
|
||||
dml_print("DML: soc_bbox: pct_ideal_dram_bw_after_urgent_pixel_only = %f\n", soc->pct_ideal_dram_bw_after_urgent_pixel_only);
|
||||
dml_print("DML: soc_bbox: pct_ideal_dram_bw_after_urgent_pixel_and_vm = %f\n", soc->pct_ideal_dram_bw_after_urgent_pixel_and_vm);
|
||||
dml_print("DML: soc_bbox: pct_ideal_dram_bw_after_urgent_vm_only = %f\n", soc->pct_ideal_dram_bw_after_urgent_vm_only);
|
||||
dml_print("DML: soc_bbox: pct_ideal_dram_bw_after_urgent_strobe = %f\n", soc->pct_ideal_dram_bw_after_urgent_strobe);
|
||||
dml_print("DML: soc_bbox: max_avg_sdp_bw_use_normal_percent = %f\n", soc->max_avg_sdp_bw_use_normal_percent);
|
||||
dml_print("DML: soc_bbox: max_avg_fabric_bw_use_normal_percent = %f\n", soc->max_avg_fabric_bw_use_normal_percent);
|
||||
dml_print("DML: soc_bbox: max_avg_dram_bw_use_normal_percent = %f\n", soc->max_avg_dram_bw_use_normal_percent);
|
||||
dml_print("DML: soc_bbox: max_avg_dram_bw_use_normal_strobe_percent = %f\n", soc->max_avg_dram_bw_use_normal_strobe_percent);
|
||||
dml_print("DML: soc_bbox: round_trip_ping_latency_dcfclk_cycles = %d\n", soc->round_trip_ping_latency_dcfclk_cycles);
|
||||
dml_print("DML: soc_bbox: urgent_out_of_order_return_per_channel_pixel_only_bytes = %d\n", soc->urgent_out_of_order_return_per_channel_pixel_only_bytes);
|
||||
dml_print("DML: soc_bbox: urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = %d\n", soc->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
|
||||
dml_print("DML: soc_bbox: urgent_out_of_order_return_per_channel_vm_only_bytes = %d\n", soc->urgent_out_of_order_return_per_channel_vm_only_bytes);
|
||||
dml_print("DML: soc_bbox: num_chans = %d\n", soc->num_chans);
|
||||
dml_print("DML: soc_bbox: return_bus_width_bytes = %d\n", soc->return_bus_width_bytes);
|
||||
dml_print("DML: soc_bbox: dram_channel_width_bytes = %d\n", soc->dram_channel_width_bytes);
|
||||
dml_print("DML: soc_bbox: fabric_datapath_to_dcn_data_return_bytes = %d\n", soc->fabric_datapath_to_dcn_data_return_bytes);
|
||||
dml_print("DML: soc_bbox: hostvm_min_page_size_kbytes = %d\n", soc->hostvm_min_page_size_kbytes);
|
||||
dml_print("DML: soc_bbox: gpuvm_min_page_size_kbytes = %d\n", soc->gpuvm_min_page_size_kbytes);
|
||||
dml_print("DML: soc_bbox: phy_downspread_percent = %f\n", soc->phy_downspread_percent);
|
||||
dml_print("DML: soc_bbox: dcn_downspread_percent = %f\n", soc->dcn_downspread_percent);
|
||||
dml_print("DML: soc_bbox: smn_latency_us = %f\n", soc->smn_latency_us);
|
||||
dml_print("DML: soc_bbox: mall_allocated_for_dcn_mbytes = %d\n", soc->mall_allocated_for_dcn_mbytes);
|
||||
dml_print("DML: soc_bbox: dispclk_dppclk_vco_speed_mhz = %f\n", soc->dispclk_dppclk_vco_speed_mhz);
|
||||
dml_print("DML: soc_bbox: do_urgent_latency_adjustment = %d\n", soc->do_urgent_latency_adjustment);
|
||||
}
|
||||
|
||||
__DML_DLL_EXPORT__ void dml_print_clk_cfg(const struct dml_clk_cfg_st *clk_cfg)
|
||||
{
|
||||
dml_print("DML: clk_cfg: 0-use_required, 1-use pipe.clks_cfg, 2-use state bbox\n");
|
||||
dml_print("DML: clk_cfg: dcfclk_option = %d\n", clk_cfg->dcfclk_option);
|
||||
dml_print("DML: clk_cfg: dispclk_option = %d\n", clk_cfg->dispclk_option);
|
||||
|
||||
dml_print("DML: clk_cfg: dcfclk_freq_mhz = %f\n", clk_cfg->dcfclk_freq_mhz);
|
||||
dml_print("DML: clk_cfg: dispclk_freq_mhz = %f\n", clk_cfg->dispclk_freq_mhz);
|
||||
|
||||
for (dml_uint_t i = 0; i < DCN_DML__NUM_PLANE; i++) {
|
||||
dml_print("DML: clk_cfg: i=%d, dppclk_option = %d\n", i, clk_cfg->dppclk_option[i]);
|
||||
dml_print("DML: clk_cfg: i=%d, dppclk_freq_mhz = %f\n", i, clk_cfg->dppclk_freq_mhz[i]);
|
||||
}
|
||||
}
|
||||
|
||||
dml_bool_t dml_is_vertical_rotation(enum dml_rotation_angle Scan)
|
||||
{
|
||||
dml_bool_t is_vert = false;
|
||||
if (Scan == dml_rotation_90 || Scan == dml_rotation_90m || Scan == dml_rotation_270 || Scan == dml_rotation_270m) {
|
||||
is_vert = true;
|
||||
} else {
|
||||
is_vert = false;
|
||||
}
|
||||
return is_vert;
|
||||
} // dml_is_vertical_rotation
|
||||
|
||||
dml_uint_t dml_get_cursor_bit_per_pixel(enum dml_cursor_bpp ebpp)
|
||||
{
|
||||
switch (ebpp) {
|
||||
case dml_cur_2bit:
|
||||
return 2;
|
||||
case dml_cur_32bit:
|
||||
return 32;
|
||||
case dml_cur_64bit:
|
||||
return 64;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief Determine the physical pipe to logical plane mapping using the display_cfg
|
||||
dml_uint_t dml_get_num_active_planes(const struct dml_display_cfg_st *display_cfg)
|
||||
{
|
||||
dml_uint_t num_active_planes = 0;
|
||||
|
||||
for (dml_uint_t k = 0; k < __DML_NUM_PLANES__; k++) {
|
||||
if (display_cfg->plane.ViewportWidth[k] > 0)
|
||||
num_active_planes = num_active_planes + 1;
|
||||
}
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML::%s: num_active_planes = %d\n", __func__, num_active_planes);
|
||||
#endif
|
||||
return num_active_planes;
|
||||
}
|
||||
|
||||
/// @brief Determine the physical pipe to logical plane mapping using the display_cfg
|
||||
dml_uint_t dml_get_num_active_pipes(const struct dml_display_cfg_st *display_cfg)
|
||||
{
|
||||
dml_uint_t num_active_pipes = 0;
|
||||
|
||||
for (dml_uint_t j = 0; j < dml_get_num_active_planes(display_cfg); j++) {
|
||||
num_active_pipes = num_active_pipes + display_cfg->hw.DPPPerSurface[j];
|
||||
}
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
|
||||
#endif
|
||||
return num_active_pipes;
|
||||
}
|
||||
|
||||
dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t pipe_idx)
|
||||
{
|
||||
dml_uint_t plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
|
||||
return plane_idx;
|
||||
}
|
||||
|
||||
dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx)
|
||||
{
|
||||
dml_uint_t pipe_idx = 0;
|
||||
dml_bool_t pipe_found = 0;
|
||||
|
||||
ASSERT(plane_idx < __DML_NUM_PLANES__);
|
||||
|
||||
for (dml_uint_t i = 0; i < __DML_NUM_PLANES__; i++) {
|
||||
if (plane_idx == mode_lib->mp.pipe_plane[i]) {
|
||||
pipe_idx = i;
|
||||
pipe_found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT(pipe_found != 0);
|
||||
|
||||
return pipe_idx;
|
||||
}
|
||||
|
||||
void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane)
|
||||
{
|
||||
dml_uint_t pipe_idx = 0;
|
||||
|
||||
for (dml_uint_t k = 0; k < __DML_NUM_PLANES__; ++k) {
|
||||
pipe_plane[k] = __DML_PIPE_NO_PLANE__;
|
||||
}
|
||||
|
||||
for (dml_uint_t plane_idx = 0; plane_idx < __DML_NUM_PLANES__; plane_idx++) {
|
||||
for (dml_uint_t i = 0; i < hw->DPPPerSurface[plane_idx]; i++) {
|
||||
pipe_plane[pipe_idx] = plane_idx;
|
||||
pipe_idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
74
drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h
Normal file
74
drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h
Normal file
|
@ -0,0 +1,74 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DISPLAY_MODE_UTIL_H__
|
||||
#define __DISPLAY_MODE_UTIL_H__
|
||||
|
||||
#include "display_mode_core_structs.h"
|
||||
#include "cmntypes.h"
|
||||
|
||||
|
||||
#include "dml_assert.h"
|
||||
#include "dml_logging.h"
|
||||
|
||||
__DML_DLL_EXPORT__ dml_bool_t dml_util_is_420(enum dml_source_format_class source_format);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_ceil(dml_float_t x, dml_float_t granularity);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_floor(dml_float_t x, dml_float_t granularity);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_min(dml_float_t x, dml_float_t y);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_min3(dml_float_t x, dml_float_t y, dml_float_t z);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_min4(dml_float_t x, dml_float_t y, dml_float_t z, dml_float_t w);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_max(dml_float_t x, dml_float_t y);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_max3(dml_float_t x, dml_float_t y, dml_float_t z);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_max4(dml_float_t a, dml_float_t b, dml_float_t c, dml_float_t d);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_max5(dml_float_t a, dml_float_t b, dml_float_t c, dml_float_t d, dml_float_t e);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_log(dml_float_t x, dml_float_t base);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_log2(dml_float_t x);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_round(dml_float_t val, dml_bool_t bankers_rounding);
|
||||
__DML_DLL_EXPORT__ dml_float_t dml_pow(dml_float_t base, int exp);
|
||||
__DML_DLL_EXPORT__ dml_uint_t dml_round_to_multiple(dml_uint_t num, dml_uint_t multiple, dml_bool_t up);
|
||||
__DML_DLL_EXPORT__ dml_bool_t dml_is_vertical_rotation(enum dml_rotation_angle scan);
|
||||
__DML_DLL_EXPORT__ dml_uint_t dml_get_cursor_bit_per_pixel(enum dml_cursor_bpp ebpp);
|
||||
__DML_DLL_EXPORT__ void dml_print_data_rq_regs_st(const dml_display_plane_rq_regs_st *data_rq_regs);
|
||||
__DML_DLL_EXPORT__ void dml_print_rq_regs_st(const dml_display_rq_regs_st *rq_regs);
|
||||
__DML_DLL_EXPORT__ void dml_print_dlg_regs_st(const dml_display_dlg_regs_st *dlg_regs);
|
||||
__DML_DLL_EXPORT__ void dml_print_ttu_regs_st(const dml_display_ttu_regs_st *ttu_regs);
|
||||
__DML_DLL_EXPORT__ void dml_print_dml_policy(const struct dml_mode_eval_policy_st *policy);
|
||||
__DML_DLL_EXPORT__ void dml_print_mode_support(struct display_mode_lib_st *mode_lib, dml_uint_t j);
|
||||
__DML_DLL_EXPORT__ void dml_print_dml_mode_support_info(const struct dml_mode_support_info_st *support, dml_bool_t fail_only);
|
||||
__DML_DLL_EXPORT__ void dml_print_dml_display_cfg_timing(const struct dml_timing_cfg_st *timing, dml_uint_t num_plane);
|
||||
__DML_DLL_EXPORT__ void dml_print_dml_display_cfg_plane(const struct dml_plane_cfg_st *plane, dml_uint_t num_plane);
|
||||
__DML_DLL_EXPORT__ void dml_print_dml_display_cfg_surface(const struct dml_surface_cfg_st *surface, dml_uint_t num_plane);
|
||||
__DML_DLL_EXPORT__ void dml_print_dml_display_cfg_hw_resource(const struct dml_hw_resource_st *hw, dml_uint_t num_plane);
|
||||
__DML_DLL_EXPORT__ void dml_print_soc_state_bounding_box(const struct soc_state_bounding_box_st *state);
|
||||
__DML_DLL_EXPORT__ void dml_print_soc_bounding_box(const struct soc_bounding_box_st *soc);
|
||||
__DML_DLL_EXPORT__ void dml_print_clk_cfg(const struct dml_clk_cfg_st *clk_cfg);
|
||||
|
||||
__DML_DLL_EXPORT__ dml_uint_t dml_get_num_active_planes(const struct dml_display_cfg_st *display_cfg);
|
||||
__DML_DLL_EXPORT__ dml_uint_t dml_get_num_active_pipes(const struct dml_display_cfg_st *display_cfg);
|
||||
__DML_DLL_EXPORT__ dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t pipe_idx);
|
||||
__DML_DLL_EXPORT__ dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx);
|
||||
__DML_DLL_EXPORT__ void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane);
|
||||
|
||||
|
||||
#endif
|
734
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
Normal file
734
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
Normal file
|
@ -0,0 +1,734 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dml2_mall_phantom.h"
|
||||
|
||||
#include "dml2_dc_types.h"
|
||||
#include "dml2_internal_types.h"
|
||||
#include "dml2_utils.h"
|
||||
#include "dml2_dc_resource_mgmt.h"
|
||||
|
||||
#define MAX_ODM_FACTOR 4
|
||||
#define MAX_MPCC_FACTOR 4
|
||||
|
||||
struct dc_plane_pipe_pool {
|
||||
int pipes_assigned_to_plane[MAX_ODM_FACTOR][MAX_MPCC_FACTOR];
|
||||
bool pipe_used[MAX_ODM_FACTOR][MAX_MPCC_FACTOR];
|
||||
int num_pipes_assigned_to_plane_for_mpcc_combine;
|
||||
int num_pipes_assigned_to_plane_for_odm_combine;
|
||||
};
|
||||
|
||||
struct dc_pipe_mapping_scratch {
|
||||
struct {
|
||||
unsigned int odm_factor;
|
||||
unsigned int odm_slice_end_x[MAX_PIPES];
|
||||
struct pipe_ctx *next_higher_pipe_for_odm_slice[MAX_PIPES];
|
||||
} odm_info;
|
||||
struct {
|
||||
unsigned int mpc_factor;
|
||||
struct pipe_ctx *prev_odm_pipe;
|
||||
} mpc_info;
|
||||
|
||||
struct dc_plane_pipe_pool pipe_pool;
|
||||
};
|
||||
|
||||
static bool get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
if (!plane_id)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < state->stream_count; i++) {
|
||||
for (j = 0; j < state->stream_status[i].plane_count; j++) {
|
||||
if (state->stream_status[i].plane_states[j] == plane) {
|
||||
*plane_id = (i << 16) | j;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int find_disp_cfg_idx_by_plane_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int plane_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
|
||||
if (mapping->disp_cfg_to_plane_id_valid[i] && mapping->disp_cfg_to_plane_id[i] == plane_id)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int stream_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
|
||||
if (mapping->disp_cfg_to_stream_id_valid[i] && mapping->disp_cfg_to_stream_id[i] == stream_id)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
// The master pipe of a stream is defined as the top pipe in odm slice 0
|
||||
static struct pipe_ctx *find_master_pipe_of_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int stream_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
|
||||
if (!state->res_ctx.pipe_ctx[i].prev_odm_pipe && !state->res_ctx.pipe_ctx[i].top_pipe)
|
||||
return &state->res_ctx.pipe_ctx[i];
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct pipe_ctx *find_master_pipe_of_plane(struct dml2_context *ctx, struct dc_state *state, unsigned int plane_id)
|
||||
{
|
||||
int i;
|
||||
unsigned int plane_id_assigned_to_pipe;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(state, state->res_ctx.pipe_ctx[i].plane_state, &plane_id_assigned_to_pipe)) {
|
||||
if (plane_id_assigned_to_pipe == plane_id)
|
||||
return &state->res_ctx.pipe_ctx[i];
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx, struct dc_state *state, unsigned int plane_id, unsigned int *pipes)
|
||||
{
|
||||
int i;
|
||||
unsigned int num_found = 0;
|
||||
unsigned int plane_id_assigned_to_pipe;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(state, state->res_ctx.pipe_ctx[i].plane_state, &plane_id_assigned_to_pipe)) {
|
||||
if (plane_id_assigned_to_pipe == plane_id)
|
||||
pipes[num_found++] = i;
|
||||
}
|
||||
}
|
||||
|
||||
return num_found;
|
||||
}
|
||||
|
||||
static bool validate_pipe_assignment(const struct dml2_context *ctx, const struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, const struct dml2_dml_to_dc_pipe_mapping *mapping)
|
||||
{
|
||||
// int i, j, k;
|
||||
//
|
||||
// unsigned int plane_id;
|
||||
//
|
||||
// unsigned int disp_cfg_index;
|
||||
//
|
||||
// unsigned int pipes_assigned_to_plane[MAX_PIPES];
|
||||
// unsigned int num_pipes_assigned_to_plane;
|
||||
//
|
||||
// struct pipe_ctx *top_pipe;
|
||||
//
|
||||
// for (i = 0; i < state->stream_count; i++) {
|
||||
// for (j = 0; j < state->stream_status[i]->plane_count; j++) {
|
||||
// if (get_plane_id(state, state->stream_status.plane_states[j], &plane_id)) {
|
||||
// disp_cfg_index = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
|
||||
// num_pipes_assigned_to_plane = find_pipes_assigned_to_plane(ctx, state, plane_id, pipes_assigned_to_plane);
|
||||
//
|
||||
// if (disp_cfg_index >= 0 && num_pipes_assigned_to_plane > 0) {
|
||||
// // Verify the number of pipes assigned matches
|
||||
// if (disp_cfg->hw.DPPPerSurface != num_pipes_assigned_to_plane)
|
||||
// return false;
|
||||
//
|
||||
// top_pipe = find_top_pipe_in_tree(state->res_ctx.pipe_ctx[pipes_assigned_to_plane[0]]);
|
||||
//
|
||||
// // Verify MPC and ODM combine
|
||||
// if (disp_cfg->hw.ODMMode == dml_odm_mode_bypass) {
|
||||
// verify_combine_tree(top_pipe, state->streams[i]->stream_id, plane_id, state, false);
|
||||
// } else {
|
||||
// verify_combine_tree(top_pipe, state->streams[i]->stream_id, plane_id, state, true);
|
||||
// }
|
||||
//
|
||||
// // TODO: could also do additional verification that the pipes in tree are the same as
|
||||
// // pipes_assigned_to_plane
|
||||
// } else {
|
||||
// ASSERT(false);
|
||||
// return false;
|
||||
// }
|
||||
// } else {
|
||||
// ASSERT(false);
|
||||
// return false;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool is_plane_using_pipe(const struct pipe_ctx *pipe)
|
||||
{
|
||||
if (pipe->plane_state)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_pipe_free(const struct pipe_ctx *pipe)
|
||||
{
|
||||
if (!pipe->plane_state && !pipe->stream)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool find_more_pipes_for_stream(struct dml2_context *ctx,
|
||||
struct dc_state *state, // The state we want to find a free mapping in
|
||||
unsigned int stream_id, // The stream we want this pipe to drive
|
||||
int *assigned_pipes,
|
||||
int *assigned_pipe_count,
|
||||
int pipes_needed,
|
||||
const struct dc_state *existing_state) // The state (optional) that we want to minimize remapping relative to
|
||||
{
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
unsigned int preferred_pipe_candidates[MAX_PIPES];
|
||||
unsigned int num_preferred_candidates = 0;
|
||||
int i;
|
||||
|
||||
if (existing_state) {
|
||||
// To minimize prioritize candidates from existing stream
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
if (existing_state->res_ctx.pipe_ctx[i].stream && existing_state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id)
|
||||
preferred_pipe_candidates[num_preferred_candidates++] = existing_state->res_ctx.pipe_ctx[i].pipe_idx;
|
||||
}
|
||||
}
|
||||
|
||||
// First see if any of the preferred are unmapped, and choose those instead
|
||||
for (i = 0; pipes_needed > 0 && i < num_preferred_candidates; i++) {
|
||||
pipe = &state->res_ctx.pipe_ctx[preferred_pipe_candidates[i]];
|
||||
if (!is_plane_using_pipe(pipe)) {
|
||||
pipes_needed--;
|
||||
// TODO: This doens't make sense really, pipe_idx should always be valid
|
||||
pipe->pipe_idx = preferred_pipe_candidates[i];
|
||||
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
|
||||
}
|
||||
}
|
||||
|
||||
// We like to pair pipes starting from the higher order indicies for combining
|
||||
for (i = ctx->config.dcn_pipe_count - 1; pipes_needed > 0 && i >= 0; i--) {
|
||||
pipe = &state->res_ctx.pipe_ctx[i];
|
||||
if (!is_plane_using_pipe(pipe)) {
|
||||
pipes_needed--;
|
||||
// TODO: This doens't make sense really, pipe_idx should always be valid
|
||||
pipe->pipe_idx = i;
|
||||
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(pipes_needed <= 0); // Validation should prevent us from building a pipe context that exceeds the number of HW resoruces available
|
||||
|
||||
return pipes_needed <= 0;
|
||||
}
|
||||
|
||||
static bool find_more_free_pipes(struct dml2_context *ctx,
|
||||
struct dc_state *state, // The state we want to find a free mapping in
|
||||
unsigned int stream_id, // The stream we want this pipe to drive
|
||||
int *assigned_pipes,
|
||||
int *assigned_pipe_count,
|
||||
int pipes_needed,
|
||||
const struct dc_state *existing_state) // The state (optional) that we want to minimize remapping relative to
|
||||
{
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
unsigned int preferred_pipe_candidates[MAX_PIPES];
|
||||
unsigned int num_preferred_candidates = 0;
|
||||
int i;
|
||||
|
||||
if (existing_state) {
|
||||
// To minimize prioritize candidates from existing stream
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
if (existing_state->res_ctx.pipe_ctx[i].stream && existing_state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id)
|
||||
preferred_pipe_candidates[num_preferred_candidates++] = existing_state->res_ctx.pipe_ctx[i].pipe_idx;
|
||||
}
|
||||
}
|
||||
|
||||
// First see if any of the preferred are unmapped, and choose those instead
|
||||
for (i = 0; pipes_needed > 0 && i < num_preferred_candidates; i++) {
|
||||
pipe = &state->res_ctx.pipe_ctx[preferred_pipe_candidates[i]];
|
||||
if (is_pipe_free(pipe)) {
|
||||
pipes_needed--;
|
||||
// TODO: This doens't make sense really, pipe_idx should always be valid
|
||||
pipe->pipe_idx = preferred_pipe_candidates[i];
|
||||
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
|
||||
}
|
||||
}
|
||||
|
||||
// We like to pair pipes starting from the higher order indicies for combining
|
||||
for (i = ctx->config.dcn_pipe_count - 1; pipes_needed > 0 && i >= 0; i--) {
|
||||
pipe = &state->res_ctx.pipe_ctx[i];
|
||||
if (is_pipe_free(pipe)) {
|
||||
pipes_needed--;
|
||||
// TODO: This doens't make sense really, pipe_idx should always be valid
|
||||
pipe->pipe_idx = i;
|
||||
assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(pipes_needed == 0); // Validation should prevent us from building a pipe context that exceeds the number of HW resoruces available
|
||||
|
||||
return pipes_needed == 0;
|
||||
}
|
||||
|
||||
static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
|
||||
{
|
||||
bool sorted, swapped;
|
||||
unsigned int cur_index;
|
||||
unsigned int temp;
|
||||
int odm_slice_index;
|
||||
|
||||
for (odm_slice_index = 0; odm_slice_index < pipes->num_pipes_assigned_to_plane_for_odm_combine; odm_slice_index++) {
|
||||
// Sort each MPCC set
|
||||
//Un-optimized bubble sort, but that's okay for array sizes <= 6
|
||||
|
||||
if (pipes->num_pipes_assigned_to_plane_for_mpcc_combine <= 1)
|
||||
sorted = true;
|
||||
else
|
||||
sorted = false;
|
||||
|
||||
cur_index = 0;
|
||||
swapped = false;
|
||||
while (!sorted) {
|
||||
if (pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] > pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]) {
|
||||
temp = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index];
|
||||
pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1];
|
||||
pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1] = temp;
|
||||
|
||||
swapped = true;
|
||||
}
|
||||
|
||||
cur_index++;
|
||||
|
||||
if (cur_index == pipes->num_pipes_assigned_to_plane_for_mpcc_combine - 1) {
|
||||
cur_index = 0;
|
||||
|
||||
if (swapped)
|
||||
sorted = false;
|
||||
else
|
||||
sorted = true;
|
||||
|
||||
swapped = false;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// For example, 3840 x 2160, ODM2:1 has a slice array of [1919, 3839], meaning, slice0 spans h_pixels 0->1919, and slice1 spans 1920->3840
|
||||
static void calculate_odm_slices(const struct dc_stream_state *stream, unsigned int odm_factor, unsigned int *odm_slice_end_x)
|
||||
{
|
||||
unsigned int slice_size = 0;
|
||||
int i;
|
||||
|
||||
if (odm_factor < 1 || odm_factor > 4) {
|
||||
ASSERT(false);
|
||||
return;
|
||||
}
|
||||
|
||||
slice_size = stream->src.width / odm_factor;
|
||||
|
||||
for (i = 0; i < odm_factor; i++)
|
||||
odm_slice_end_x[i] = (slice_size * (i + 1)) - 1;
|
||||
|
||||
odm_slice_end_x[odm_factor - 1] = stream->src.width - 1;
|
||||
}
|
||||
|
||||
static bool is_plane_in_odm_slice(const struct dc_plane_state *plane, unsigned int slice_index, unsigned int *odm_slice_end_x, unsigned int num_slices)
|
||||
{
|
||||
unsigned int slice_start_x, slice_end_x;
|
||||
|
||||
if (slice_index == 0)
|
||||
slice_start_x = 0;
|
||||
else
|
||||
slice_start_x = odm_slice_end_x[slice_index - 1] + 1;
|
||||
|
||||
slice_end_x = odm_slice_end_x[slice_index];
|
||||
|
||||
if (plane->clip_rect.x + plane->clip_rect.width < slice_start_x)
|
||||
return false;
|
||||
|
||||
if (plane->clip_rect.x > slice_end_x)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void add_odm_slice_to_odm_tree(struct dml2_context *ctx,
|
||||
struct dc_state *state,
|
||||
struct dc_pipe_mapping_scratch *scratch,
|
||||
unsigned int odm_slice_index)
|
||||
{
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
int i;
|
||||
|
||||
// MPCC Combine + ODM Combine is not supported, so there should never be a case where the current plane
|
||||
// has more than 1 pipe mapped to it for a given slice.
|
||||
ASSERT(scratch->pipe_pool.num_pipes_assigned_to_plane_for_mpcc_combine == 1 || scratch->pipe_pool.num_pipes_assigned_to_plane_for_odm_combine == 1);
|
||||
|
||||
for (i = 0; i < scratch->pipe_pool.num_pipes_assigned_to_plane_for_mpcc_combine; i++) {
|
||||
pipe = &state->res_ctx.pipe_ctx[scratch->pipe_pool.pipes_assigned_to_plane[odm_slice_index][i]];
|
||||
|
||||
if (scratch->mpc_info.prev_odm_pipe)
|
||||
scratch->mpc_info.prev_odm_pipe->next_odm_pipe = pipe;
|
||||
|
||||
pipe->prev_odm_pipe = scratch->mpc_info.prev_odm_pipe;
|
||||
pipe->next_odm_pipe = NULL;
|
||||
}
|
||||
scratch->mpc_info.prev_odm_pipe = pipe;
|
||||
}
|
||||
|
||||
static struct pipe_ctx *add_plane_to_blend_tree(struct dml2_context *ctx,
|
||||
struct dc_state *state,
|
||||
const struct dc_plane_state *plane,
|
||||
struct dc_plane_pipe_pool *pipe_pool,
|
||||
unsigned int odm_slice,
|
||||
struct pipe_ctx *top_pipe)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pipe_pool->num_pipes_assigned_to_plane_for_mpcc_combine; i++) {
|
||||
if (top_pipe)
|
||||
top_pipe->bottom_pipe = &state->res_ctx.pipe_ctx[pipe_pool->pipes_assigned_to_plane[odm_slice][i]];
|
||||
|
||||
pipe_pool->pipe_used[odm_slice][i] = true;
|
||||
|
||||
state->res_ctx.pipe_ctx[pipe_pool->pipes_assigned_to_plane[odm_slice][i]].top_pipe = top_pipe;
|
||||
state->res_ctx.pipe_ctx[pipe_pool->pipes_assigned_to_plane[odm_slice][i]].bottom_pipe = NULL;
|
||||
|
||||
top_pipe = &state->res_ctx.pipe_ctx[pipe_pool->pipes_assigned_to_plane[odm_slice][i]];
|
||||
}
|
||||
|
||||
// After running the above loop, the top pipe actually ends up pointing to the bottom of this MPCC combine tree, so we are actually
|
||||
// returning the bottom pipe here
|
||||
return top_pipe;
|
||||
}
|
||||
|
||||
static unsigned int find_pipes_assigned_to_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int stream_id, unsigned int *pipes)
|
||||
{
|
||||
int i;
|
||||
unsigned int num_found = 0;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
|
||||
pipes[num_found++] = i;
|
||||
}
|
||||
}
|
||||
|
||||
return num_found;
|
||||
}
|
||||
|
||||
static struct pipe_ctx *assign_pipes_to_stream(struct dml2_context *ctx, struct dc_state *state,
|
||||
const struct dc_stream_state *stream,
|
||||
int odm_factor,
|
||||
struct dc_plane_pipe_pool *pipe_pool,
|
||||
const struct dc_state *existing_state)
|
||||
{
|
||||
struct pipe_ctx *master_pipe;
|
||||
unsigned int pipes_needed;
|
||||
unsigned int pipes_assigned;
|
||||
unsigned int pipes[MAX_PIPES];
|
||||
unsigned int next_pipe_to_assign;
|
||||
int odm_slice;
|
||||
|
||||
pipes_needed = odm_factor;
|
||||
|
||||
master_pipe = find_master_pipe_of_stream(ctx, state, stream->stream_id);
|
||||
ASSERT(master_pipe);
|
||||
|
||||
pipes_assigned = find_pipes_assigned_to_stream(ctx, state, stream->stream_id, pipes);
|
||||
|
||||
find_more_free_pipes(ctx, state, stream->stream_id, pipes, &pipes_assigned, pipes_needed - pipes_assigned, existing_state);
|
||||
|
||||
ASSERT(pipes_assigned == pipes_needed);
|
||||
|
||||
next_pipe_to_assign = 0;
|
||||
for (odm_slice = 0; odm_slice < odm_factor; odm_slice++)
|
||||
pipe_pool->pipes_assigned_to_plane[odm_slice][0] = pipes[next_pipe_to_assign++];
|
||||
|
||||
pipe_pool->num_pipes_assigned_to_plane_for_mpcc_combine = 1;
|
||||
pipe_pool->num_pipes_assigned_to_plane_for_odm_combine = odm_factor;
|
||||
|
||||
return master_pipe;
|
||||
}
|
||||
|
||||
static struct pipe_ctx *assign_pipes_to_plane(struct dml2_context *ctx, struct dc_state *state,
|
||||
const struct dc_stream_state *stream,
|
||||
const struct dc_plane_state *plane,
|
||||
int odm_factor,
|
||||
int mpc_factor,
|
||||
struct dc_plane_pipe_pool *pipe_pool,
|
||||
const struct dc_state *existing_state)
|
||||
{
|
||||
struct pipe_ctx *master_pipe = NULL;
|
||||
unsigned int plane_id;
|
||||
unsigned int pipes_needed;
|
||||
unsigned int pipes_assigned;
|
||||
unsigned int pipes[MAX_PIPES];
|
||||
unsigned int next_pipe_to_assign;
|
||||
int odm_slice, mpc_slice;
|
||||
|
||||
if (!get_plane_id(state, plane, &plane_id)) {
|
||||
ASSERT(false);
|
||||
return master_pipe;
|
||||
}
|
||||
|
||||
pipes_needed = mpc_factor * odm_factor;
|
||||
|
||||
master_pipe = find_master_pipe_of_plane(ctx, state, plane_id);
|
||||
ASSERT(master_pipe);
|
||||
|
||||
pipes_assigned = find_pipes_assigned_to_plane(ctx, state, plane_id, pipes);
|
||||
|
||||
find_more_pipes_for_stream(ctx, state, stream->stream_id, pipes, &pipes_assigned, pipes_needed - pipes_assigned, existing_state);
|
||||
|
||||
ASSERT(pipes_assigned >= pipes_needed);
|
||||
|
||||
next_pipe_to_assign = 0;
|
||||
for (odm_slice = 0; odm_slice < odm_factor; odm_slice++)
|
||||
for (mpc_slice = 0; mpc_slice < mpc_factor; mpc_slice++)
|
||||
pipe_pool->pipes_assigned_to_plane[odm_slice][mpc_slice] = pipes[next_pipe_to_assign++];
|
||||
|
||||
pipe_pool->num_pipes_assigned_to_plane_for_mpcc_combine = mpc_factor;
|
||||
pipe_pool->num_pipes_assigned_to_plane_for_odm_combine = odm_factor;
|
||||
|
||||
return master_pipe;
|
||||
}
|
||||
|
||||
static bool is_pipe_used(const struct dc_plane_pipe_pool *pool, unsigned int pipe_idx)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < pool->num_pipes_assigned_to_plane_for_odm_combine; i++) {
|
||||
for (j = 0; j < pool->num_pipes_assigned_to_plane_for_mpcc_combine; j++) {
|
||||
if (pool->pipes_assigned_to_plane[i][j] == pipe_idx && pool->pipe_used[i][j])
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void free_pipe(struct pipe_ctx *pipe)
|
||||
{
|
||||
memset(pipe, 0, sizeof(struct pipe_ctx));
|
||||
}
|
||||
|
||||
static void free_unused_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state, const struct dc_plane_state *plane, const struct dc_plane_pipe_pool *pool)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
if (state->res_ctx.pipe_ctx[i].plane_state == plane && !is_pipe_used(pool, state->res_ctx.pipe_ctx[i].pipe_idx)) {
|
||||
free_pipe(&state->res_ctx.pipe_ctx[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_pipes_from_blend_trees(struct dml2_context *ctx, struct dc_state *state, struct dc_plane_pipe_pool *pipe_pool, unsigned int odm_slice)
|
||||
{
|
||||
struct pipe_ctx *pipe;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pipe_pool->num_pipes_assigned_to_plane_for_mpcc_combine; i++) {
|
||||
pipe = &state->res_ctx.pipe_ctx[pipe_pool->pipes_assigned_to_plane[odm_slice][0]];
|
||||
if (pipe->top_pipe)
|
||||
pipe->top_pipe->bottom_pipe = pipe->bottom_pipe;
|
||||
|
||||
if (pipe->bottom_pipe)
|
||||
pipe->bottom_pipe = pipe->top_pipe;
|
||||
|
||||
pipe_pool->pipe_used[odm_slice][i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void map_pipes_for_stream(struct dml2_context *ctx, struct dc_state *state, const struct dc_stream_state *stream,
|
||||
struct dc_pipe_mapping_scratch *scratch, const struct dc_state *existing_state)
|
||||
{
|
||||
int odm_slice_index;
|
||||
struct pipe_ctx *master_pipe = NULL;
|
||||
|
||||
|
||||
master_pipe = assign_pipes_to_stream(ctx, state, stream, scratch->odm_info.odm_factor, &scratch->pipe_pool, existing_state);
|
||||
sort_pipes_for_splitting(&scratch->pipe_pool);
|
||||
|
||||
for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) {
|
||||
remove_pipes_from_blend_trees(ctx, state, &scratch->pipe_pool, odm_slice_index);
|
||||
|
||||
add_odm_slice_to_odm_tree(ctx, state, scratch, odm_slice_index);
|
||||
|
||||
ctx->config.callbacks.acquire_secondary_pipe_for_mpc_odm(ctx->config.callbacks.dc, state,
|
||||
master_pipe, &state->res_ctx.pipe_ctx[scratch->pipe_pool.pipes_assigned_to_plane[odm_slice_index][0]], true);
|
||||
}
|
||||
}
|
||||
|
||||
static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state, const struct dc_stream_state *stream, const struct dc_plane_state *plane,
|
||||
struct dc_pipe_mapping_scratch *scratch, const struct dc_state *existing_state)
|
||||
{
|
||||
int odm_slice_index;
|
||||
unsigned int plane_id;
|
||||
struct pipe_ctx *master_pipe = NULL;
|
||||
int i;
|
||||
|
||||
if (!get_plane_id(state, plane, &plane_id)) {
|
||||
ASSERT(false);
|
||||
return;
|
||||
}
|
||||
|
||||
master_pipe = assign_pipes_to_plane(ctx, state, stream, plane, scratch->odm_info.odm_factor, scratch->mpc_info.mpc_factor, &scratch->pipe_pool, existing_state);
|
||||
sort_pipes_for_splitting(&scratch->pipe_pool);
|
||||
|
||||
for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) {
|
||||
// We build the tree for one ODM slice at a time.
|
||||
// Each ODM slice shares a common OPP
|
||||
if (!is_plane_in_odm_slice(plane, odm_slice_index, scratch->odm_info.odm_slice_end_x, scratch->odm_info.odm_factor)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now we have a list of all pipes to be used for this plane/stream, now setup the tree.
|
||||
scratch->odm_info.next_higher_pipe_for_odm_slice[odm_slice_index] = add_plane_to_blend_tree(ctx, state,
|
||||
plane,
|
||||
&scratch->pipe_pool,
|
||||
odm_slice_index,
|
||||
scratch->odm_info.next_higher_pipe_for_odm_slice[odm_slice_index]);
|
||||
|
||||
add_odm_slice_to_odm_tree(ctx, state, scratch, odm_slice_index);
|
||||
|
||||
for (i = 0; i < scratch->pipe_pool.num_pipes_assigned_to_plane_for_mpcc_combine; i++) {
|
||||
|
||||
ctx->config.callbacks.acquire_secondary_pipe_for_mpc_odm(ctx->config.callbacks.dc, state,
|
||||
master_pipe, &state->res_ctx.pipe_ctx[scratch->pipe_pool.pipes_assigned_to_plane[odm_slice_index][i]], true);
|
||||
}
|
||||
}
|
||||
|
||||
free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool);
|
||||
}
|
||||
|
||||
bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state)
|
||||
{
|
||||
int stream_index, plane_index, i;
|
||||
|
||||
unsigned int stream_disp_cfg_index;
|
||||
unsigned int plane_disp_cfg_index;
|
||||
|
||||
unsigned int plane_id;
|
||||
unsigned int stream_id;
|
||||
|
||||
const unsigned int *ODMMode, *DPPPerSurface;
|
||||
unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
|
||||
struct dc_pipe_mapping_scratch scratch;
|
||||
|
||||
if (ctx->architecture == dml2_architecture_21) {
|
||||
/*
|
||||
* Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes.
|
||||
* As data cannot be directly extracted in const pointers, assign these arrays to const pointers before proceeding to
|
||||
* maximize the reuse of existing code. Const pointers are required because dml2.0 dml_display_cfg_st is const.
|
||||
*
|
||||
*/
|
||||
ODMMode = (const unsigned int *)odm_mode_array;
|
||||
DPPPerSurface = (const unsigned int *)dpp_per_surface_array;
|
||||
} else {
|
||||
ODMMode = (unsigned int *)disp_cfg->hw.ODMMode;
|
||||
DPPPerSurface = disp_cfg->hw.DPPPerSurface;
|
||||
}
|
||||
|
||||
for (stream_index = 0; stream_index < state->stream_count; stream_index++) {
|
||||
memset(&scratch, 0, sizeof(struct dc_pipe_mapping_scratch));
|
||||
|
||||
stream_id = state->streams[stream_index]->stream_id;
|
||||
stream_disp_cfg_index = find_disp_cfg_idx_by_stream_id(mapping, stream_id);
|
||||
|
||||
if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
|
||||
scratch.odm_info.odm_factor = 1;
|
||||
} else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
|
||||
scratch.odm_info.odm_factor = 2;
|
||||
} else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
|
||||
scratch.odm_info.odm_factor = 4;
|
||||
} else {
|
||||
ASSERT(false);
|
||||
scratch.odm_info.odm_factor = 1;
|
||||
}
|
||||
|
||||
calculate_odm_slices(state->streams[stream_index], scratch.odm_info.odm_factor, scratch.odm_info.odm_slice_end_x);
|
||||
|
||||
// If there are no planes, you still want to setup ODM...
|
||||
if (state->stream_status[stream_index].plane_count == 0) {
|
||||
map_pipes_for_stream(ctx, state, state->streams[stream_index], &scratch, existing_state);
|
||||
}
|
||||
|
||||
for (plane_index = 0; plane_index < state->stream_status[stream_index].plane_count; plane_index++) {
|
||||
// Planes are ordered top to bottom.
|
||||
if (get_plane_id(state, state->stream_status[stream_index].plane_states[plane_index], &plane_id)) {
|
||||
plane_disp_cfg_index = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
|
||||
|
||||
// Setup mpc_info for this plane
|
||||
scratch.mpc_info.prev_odm_pipe = NULL;
|
||||
if (scratch.odm_info.odm_factor == 1) {
|
||||
// If ODM combine is not inuse, then the number of pipes
|
||||
// per plane is determined by MPC combine factor
|
||||
scratch.mpc_info.mpc_factor = DPPPerSurface[plane_disp_cfg_index];
|
||||
} else {
|
||||
// If ODM combine is enabled, then we use at most 1 pipe per
|
||||
// odm slice per plane, i.e. MPC combine is never used
|
||||
scratch.mpc_info.mpc_factor = 1;
|
||||
}
|
||||
|
||||
ASSERT(scratch.odm_info.odm_factor * scratch.mpc_info.mpc_factor > 0);
|
||||
|
||||
// Clear the pool assignment scratch (which is per plane)
|
||||
memset(&scratch.pipe_pool, 0, sizeof(struct dc_plane_pipe_pool));
|
||||
|
||||
map_pipes_for_plane(ctx, state, state->streams[stream_index], state->stream_status[stream_index].plane_states[plane_index], &scratch, existing_state);
|
||||
} else {
|
||||
// Plane ID cannot be generated, therefore no DML mapping can be performed.
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (!validate_pipe_assignment(ctx, state, disp_cfg, mapping))
|
||||
ASSERT(false);
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->plane_state) {
|
||||
if (!ctx->config.callbacks.build_scaling_params(pipe)) {
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
48
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
Normal file
48
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
Normal file
|
@ -0,0 +1,48 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DML2_DC_RESOURCE_MGMT_H__
|
||||
#define __DML2_DC_RESOURCE_MGMT_H__
|
||||
|
||||
#include "dml2_dc_types.h"
|
||||
|
||||
struct dml2_context;
|
||||
|
||||
/*
|
||||
* dml2_map_dc_pipes - Creates a pipe linkage in dc_state based on current display config.
|
||||
* @ctx: Input dml2 context
|
||||
* @state: Current dc_state to be updated.
|
||||
* @disp_cfg: Current display config.
|
||||
* @mapping: Pipe mapping logic structure to keep a track of pipes to be used.
|
||||
*
|
||||
* Based on ODM and DPPPersurface outputs calculated by the DML for the current display
|
||||
* config, create a pipe linkage in dc_state which is then used by DC core.
|
||||
* Make this function generic to be used by multiple DML versions.
|
||||
*
|
||||
* Return: True if pipe mapping and linking is successful, false otherwise.
|
||||
*/
|
||||
|
||||
bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state);
|
||||
|
||||
#endif
|
40
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
Normal file
40
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Wrapper header for externally defined types from DC. These types come from
|
||||
* dc headers when building DML2 as part of DC, but are defined here when building
|
||||
* DML2 as a standalone library (such as for unit testing).
|
||||
*/
|
||||
|
||||
#ifndef __DML2_DC_TYPES_H__
|
||||
#define __DML2_DC_TYPES_H__
|
||||
|
||||
#include "resource.h"
|
||||
#include "core_types.h"
|
||||
#include "dsc.h"
|
||||
#include "clk_mgr.h"
|
||||
|
||||
#endif //__DML2_DC_TYPES_H__
|
121
drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
Normal file
121
drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
Normal file
|
@ -0,0 +1,121 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DML2_INTERNAL_TYPES_H__
|
||||
#define __DML2_INTERNAL_TYPES_H__
|
||||
|
||||
#include "dml2_dc_types.h"
|
||||
#include "display_mode_core.h"
|
||||
#include "dml2_wrapper.h"
|
||||
#include "dml2_policy.h"
|
||||
|
||||
|
||||
struct dml2_wrapper_optimize_configuration_params {
|
||||
struct display_mode_lib_st *dml_core_ctx;
|
||||
struct dml2_configuration_options *config;
|
||||
struct ip_params_st *ip_params;
|
||||
struct dml_display_cfg_st *cur_display_config;
|
||||
struct dml_display_cfg_st *new_display_config;
|
||||
const struct dml_mode_support_info_st *cur_mode_support_info;
|
||||
struct dml_mode_eval_policy_st *cur_policy;
|
||||
struct dml_mode_eval_policy_st *new_policy;
|
||||
};
|
||||
|
||||
struct dml2_calculate_lowest_supported_state_for_temp_read_scratch {
|
||||
struct dml_mode_support_info_st evaluation_info;
|
||||
dml_float_t uclk_change_latencies[__DML_MAX_STATE_ARRAY_SIZE__];
|
||||
struct dml_display_cfg_st cur_display_config;
|
||||
struct dml_display_cfg_st new_display_config;
|
||||
struct dml_mode_eval_policy_st new_policy;
|
||||
struct dml_mode_eval_policy_st cur_policy;
|
||||
};
|
||||
|
||||
struct dml2_create_scratch {
|
||||
struct dml2_policy_build_synthetic_soc_states_scratch build_synthetic_socbb_scratch;
|
||||
struct soc_states_st in_states;
|
||||
};
|
||||
|
||||
struct dml2_calculate_rq_and_dlg_params_scratch {
|
||||
struct _vcs_dpi_dml_display_rq_regs_st rq_regs;
|
||||
struct _vcs_dpi_dml_display_dlg_regs_st disp_dlg_regs;
|
||||
struct _vcs_dpi_dml_display_ttu_regs_st disp_ttu_regs;
|
||||
};
|
||||
|
||||
#define __DML2_WRAPPER_MAX_STREAMS_PLANES__ 6
|
||||
|
||||
struct dml2_dml_to_dc_pipe_mapping {
|
||||
unsigned int disp_cfg_to_stream_id[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
|
||||
bool disp_cfg_to_stream_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
|
||||
unsigned int disp_cfg_to_plane_id[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
|
||||
bool disp_cfg_to_plane_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
|
||||
unsigned int dml_pipe_idx_to_stream_id[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
|
||||
bool dml_pipe_idx_to_stream_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
|
||||
unsigned int dml_pipe_idx_to_plane_id[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
|
||||
bool dml_pipe_idx_to_plane_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
|
||||
};
|
||||
|
||||
struct dml2_wrapper_scratch {
|
||||
struct dml_display_cfg_st cur_display_config;
|
||||
struct dml_display_cfg_st new_display_config;
|
||||
struct dml_mode_eval_policy_st new_policy;
|
||||
struct dml_mode_eval_policy_st cur_policy;
|
||||
struct dml_mode_support_info_st mode_support_info;
|
||||
struct dml_mode_support_ex_params_st mode_support_params;
|
||||
|
||||
struct dummy_pstate_entry dummy_pstate_table[4];
|
||||
|
||||
struct dml2_create_scratch create_scratch;
|
||||
struct dml2_calculate_lowest_supported_state_for_temp_read_scratch dml2_calculate_lowest_supported_state_for_temp_read_scratch;
|
||||
struct dml2_calculate_rq_and_dlg_params_scratch calculate_rq_and_dlg_params_scratch;
|
||||
|
||||
struct dml2_wrapper_optimize_configuration_params optimize_configuration_params;
|
||||
struct dml2_policy_build_synthetic_soc_states_params build_synthetic_socbb_params;
|
||||
|
||||
struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping;
|
||||
bool enable_flexible_pipe_mapping;
|
||||
};
|
||||
|
||||
struct dml2_helper_det_policy_scratch {
|
||||
int dpps_per_surface[MAX_PLANES];
|
||||
};
|
||||
|
||||
enum dml2_architecture {
|
||||
dml2_architecture_20,
|
||||
dml2_architecture_21
|
||||
};
|
||||
|
||||
struct dml2_context {
|
||||
enum dml2_architecture architecture;
|
||||
struct dml2_configuration_options config;
|
||||
struct dml2_helper_det_policy_scratch det_helper_scratch;
|
||||
union {
|
||||
struct {
|
||||
struct display_mode_lib_st dml_core_ctx;
|
||||
struct dml2_wrapper_scratch scratch;
|
||||
struct dcn_watermarks g6_temp_read_watermark_set;
|
||||
} v20;
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
913
drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
Normal file
913
drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
Normal file
|
@ -0,0 +1,913 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dml2_dc_types.h"
|
||||
#include "dml2_internal_types.h"
|
||||
#include "dml2_utils.h"
|
||||
#include "dml2_mall_phantom.h"
|
||||
|
||||
unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx, struct dc_state *context)
|
||||
{
|
||||
uint32_t num_ways = 0;
|
||||
uint32_t bytes_per_pixel = 0;
|
||||
uint32_t cache_lines_used = 0;
|
||||
uint32_t lines_per_way = 0;
|
||||
uint32_t total_cache_lines = 0;
|
||||
uint32_t bytes_in_mall = 0;
|
||||
uint32_t num_mblks = 0;
|
||||
uint32_t cache_lines_per_plane = 0;
|
||||
uint32_t i = 0;
|
||||
uint32_t mblk_width = 0;
|
||||
uint32_t mblk_height = 0;
|
||||
uint32_t full_vp_width_blk_aligned = 0;
|
||||
uint32_t mall_alloc_width_blk_aligned = 0;
|
||||
uint32_t mall_alloc_height_blk_aligned = 0;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
// Find the phantom pipes
|
||||
if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
|
||||
mblk_width = ctx->config.mall_cfg.mblk_width_pixels;
|
||||
mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels;
|
||||
|
||||
/* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
|
||||
* FLOOR(vp_x_start, blk_width)
|
||||
*/
|
||||
full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
|
||||
pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) +
|
||||
(pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
|
||||
|
||||
/* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
|
||||
mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
|
||||
|
||||
/* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
|
||||
mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
|
||||
mblk_height * mblk_height + mblk_height;
|
||||
|
||||
/* full_mblk_width_ub_l/c = malldml2_mall_phantom.c_alloc_width_blk_aligned_l/c;
|
||||
* full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
|
||||
* num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
|
||||
* (Should be divisible, but round up if not)
|
||||
*/
|
||||
num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
|
||||
((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
|
||||
bytes_in_mall = num_mblks * ctx->config.mall_cfg.mblk_size_bytes;
|
||||
// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
|
||||
// (MALL is 64-byte aligned)
|
||||
cache_lines_per_plane = bytes_in_mall / ctx->config.mall_cfg.cache_line_size_bytes + 2;
|
||||
|
||||
// For DCC we must cache the meat surface, so double cache lines required
|
||||
if (pipe->plane_state->dcc.enable)
|
||||
cache_lines_per_plane *= 2;
|
||||
cache_lines_used += cache_lines_per_plane;
|
||||
}
|
||||
}
|
||||
|
||||
total_cache_lines = ctx->config.mall_cfg.max_cab_allocation_bytes / ctx->config.mall_cfg.cache_line_size_bytes;
|
||||
lines_per_way = total_cache_lines / ctx->config.mall_cfg.cache_num_ways;
|
||||
num_ways = cache_lines_used / lines_per_way;
|
||||
if (cache_lines_used % lines_per_way > 0)
|
||||
num_ways++;
|
||||
|
||||
return num_ways;
|
||||
}
|
||||
|
||||
static void merge_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* merge pipes if necessary */
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
// For now merge all pipes for SubVP since pipe split case isn't supported yet
|
||||
|
||||
/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
|
||||
if (pipe->prev_odm_pipe) {
|
||||
/*split off odm pipe*/
|
||||
pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
|
||||
if (pipe->next_odm_pipe)
|
||||
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
|
||||
|
||||
pipe->bottom_pipe = NULL;
|
||||
pipe->next_odm_pipe = NULL;
|
||||
pipe->plane_state = NULL;
|
||||
pipe->stream = NULL;
|
||||
pipe->top_pipe = NULL;
|
||||
pipe->prev_odm_pipe = NULL;
|
||||
if (pipe->stream_res.dsc)
|
||||
ctx->config.svp_pstate.callbacks.release_dsc(&context->res_ctx, ctx->config.svp_pstate.callbacks.dc->res_pool, &pipe->stream_res.dsc);
|
||||
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
|
||||
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
|
||||
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
|
||||
struct pipe_ctx *top_pipe = pipe->top_pipe;
|
||||
struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
|
||||
|
||||
top_pipe->bottom_pipe = bottom_pipe;
|
||||
if (bottom_pipe)
|
||||
bottom_pipe->top_pipe = top_pipe;
|
||||
|
||||
pipe->top_pipe = NULL;
|
||||
pipe->bottom_pipe = NULL;
|
||||
pipe->plane_state = NULL;
|
||||
pipe->stream = NULL;
|
||||
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
|
||||
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool all_pipes_have_stream_and_plane(struct dml2_context *ctx, const struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
if (!pipe->plane_state)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mpo_in_use(const struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
if (context->stream_status[i].plane_count > 1)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* dcn32_get_num_free_pipes: Calculate number of free pipes
|
||||
*
|
||||
* This function assumes that a "used" pipe is a pipe that has
|
||||
* both a stream and a plane assigned to it.
|
||||
*
|
||||
* @dc: current dc state
|
||||
* @context: new dc state
|
||||
*
|
||||
* Return:
|
||||
* Number of free pipes available in the context
|
||||
*/
|
||||
static unsigned int get_num_free_pipes(struct dml2_context *ctx, struct dc_state *state)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int free_pipes = 0;
|
||||
unsigned int num_pipes = 0;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->stream && !pipe->top_pipe) {
|
||||
while (pipe) {
|
||||
num_pipes++;
|
||||
pipe = pipe->bottom_pipe;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free_pipes = ctx->config.dcn_pipe_count - num_pipes;
|
||||
return free_pipes;
|
||||
}
|
||||
|
||||
/*
|
||||
* dcn32_assign_subvp_pipe: Function to decide which pipe will use Sub-VP.
|
||||
*
|
||||
* We enter this function if we are Sub-VP capable (i.e. enough pipes available)
|
||||
* and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
|
||||
* we are forcing SubVP P-State switching on the current config.
|
||||
*
|
||||
* The number of pipes used for the chosen surface must be less than or equal to the
|
||||
* number of free pipes available.
|
||||
*
|
||||
* In general we choose surfaces with the longest frame time first (better for SubVP + VBLANK).
|
||||
* For multi-display cases the ActiveDRAMClockChangeMargin doesn't provide enough info on its own
|
||||
* for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
|
||||
* support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
|
||||
*
|
||||
* @param dc: current dc state
|
||||
* @param context: new dc state
|
||||
* @param index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
|
||||
*
|
||||
* Return:
|
||||
* True if a valid pipe assignment was found for Sub-VP. Otherwise false.
|
||||
*/
|
||||
static bool assign_subvp_pipe(struct dml2_context *ctx, struct dc_state *context, unsigned int *index)
|
||||
{
|
||||
unsigned int i, pipe_idx;
|
||||
unsigned int max_frame_time = 0;
|
||||
bool valid_assignment_found = false;
|
||||
unsigned int free_pipes = 2; //dcn32_get_num_free_pipes(dc, context);
|
||||
bool current_assignment_freesync = false;
|
||||
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
unsigned int num_pipes = 0;
|
||||
unsigned int refresh_rate = 0;
|
||||
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
// Round up
|
||||
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
|
||||
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
|
||||
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
|
||||
/* SubVP pipe candidate requirements:
|
||||
* - Refresh rate < 120hz
|
||||
* - Not able to switch in vactive naturally (switching in active means the
|
||||
* DET provides enough buffer to hide the P-State switch latency -- trying
|
||||
* to combine this with SubVP can cause issues with the scheduling).
|
||||
*/
|
||||
if (pipe->plane_state && !pipe->top_pipe &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 &&
|
||||
vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
|
||||
while (pipe) {
|
||||
num_pipes++;
|
||||
pipe = pipe->bottom_pipe;
|
||||
}
|
||||
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
if (num_pipes <= free_pipes) {
|
||||
struct dc_stream_state *stream = pipe->stream;
|
||||
unsigned int frame_us = (stream->timing.v_total * stream->timing.h_total /
|
||||
(double)(stream->timing.pix_clk_100hz * 100)) * 1000000;
|
||||
if (frame_us > max_frame_time && !stream->ignore_msa_timing_param) {
|
||||
*index = i;
|
||||
max_frame_time = frame_us;
|
||||
valid_assignment_found = true;
|
||||
current_assignment_freesync = false;
|
||||
/* For the 2-Freesync display case, still choose the one with the
|
||||
* longest frame time
|
||||
*/
|
||||
} else if (stream->ignore_msa_timing_param && (!valid_assignment_found ||
|
||||
(current_assignment_freesync && frame_us > max_frame_time))) {
|
||||
*index = i;
|
||||
valid_assignment_found = true;
|
||||
current_assignment_freesync = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
pipe_idx++;
|
||||
}
|
||||
return valid_assignment_found;
|
||||
}
|
||||
|
||||
/*
|
||||
* enough_pipes_for_subvp: Function to check if there are "enough" pipes for SubVP.
|
||||
*
|
||||
* This function returns true if there are enough free pipes
|
||||
* to create the required phantom pipes for any given stream
|
||||
* (that does not already have phantom pipe assigned).
|
||||
*
|
||||
* e.g. For a 2 stream config where the first stream uses one
|
||||
* pipe and the second stream uses 2 pipes (i.e. pipe split),
|
||||
* this function will return true because there is 1 remaining
|
||||
* pipe which can be used as the phantom pipe for the non pipe
|
||||
* split pipe.
|
||||
*
|
||||
* @dc: current dc state
|
||||
* @context: new dc state
|
||||
*
|
||||
* Return:
|
||||
* True if there are enough free pipes to assign phantom pipes to at least one
|
||||
* stream that does not already have phantom pipes assigned. Otherwise false.
|
||||
*/
|
||||
static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *state)
|
||||
{
|
||||
unsigned int i, split_cnt, free_pipes;
|
||||
unsigned int min_pipe_split = ctx->config.dcn_pipe_count + 1; // init as max number of pipes + 1
|
||||
bool subvp_possible = false;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
|
||||
|
||||
// Find the minimum pipe split count for non SubVP pipes
|
||||
if (pipe->stream && !pipe->top_pipe &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
|
||||
split_cnt = 0;
|
||||
while (pipe) {
|
||||
split_cnt++;
|
||||
pipe = pipe->bottom_pipe;
|
||||
}
|
||||
|
||||
if (split_cnt < min_pipe_split)
|
||||
min_pipe_split = split_cnt;
|
||||
}
|
||||
}
|
||||
|
||||
free_pipes = get_num_free_pipes(ctx, state);
|
||||
|
||||
// SubVP only possible if at least one pipe is being used (i.e. free_pipes
|
||||
// should not equal to the pipe_count)
|
||||
if (free_pipes >= min_pipe_split && free_pipes < ctx->config.dcn_pipe_count)
|
||||
subvp_possible = true;
|
||||
|
||||
return subvp_possible;
|
||||
}
|
||||
|
||||
/*
|
||||
* subvp_subvp_schedulable: Determine if SubVP + SubVP config is schedulable
|
||||
*
|
||||
* High level algorithm:
|
||||
* 1. Find longest microschedule length (in us) between the two SubVP pipes
|
||||
* 2. Check if the worst case overlap (VBLANK in middle of ACTIVE) for both
|
||||
* pipes still allows for the maximum microschedule to fit in the active
|
||||
* region for both pipes.
|
||||
*
|
||||
* @dc: current dc state
|
||||
* @context: new dc state
|
||||
*
|
||||
* Return:
|
||||
* bool - True if the SubVP + SubVP config is schedulable, false otherwise
|
||||
*/
|
||||
static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *context)
|
||||
{
|
||||
struct pipe_ctx *subvp_pipes[2];
|
||||
struct dc_stream_state *phantom = NULL;
|
||||
uint32_t microschedule_lines = 0;
|
||||
uint32_t index = 0;
|
||||
uint32_t i;
|
||||
uint32_t max_microschedule_us = 0;
|
||||
int32_t vactive1_us, vactive2_us, vblank1_us, vblank2_us;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
uint32_t time_us = 0;
|
||||
|
||||
/* Loop to calculate the maximum microschedule time between the two SubVP pipes,
|
||||
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
|
||||
*/
|
||||
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
|
||||
phantom = pipe->stream->mall_stream_config.paired_stream;
|
||||
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
|
||||
phantom->timing.v_addressable;
|
||||
|
||||
// Round up when calculating microschedule time (+ 1 at the end)
|
||||
time_us = (microschedule_lines * phantom->timing.h_total) /
|
||||
(double)(phantom->timing.pix_clk_100hz * 100) * 1000000 +
|
||||
ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us +
|
||||
ctx->config.svp_pstate.subvp_fw_processing_delay_us + 1;
|
||||
if (time_us > max_microschedule_us)
|
||||
max_microschedule_us = time_us;
|
||||
|
||||
subvp_pipes[index] = pipe;
|
||||
index++;
|
||||
|
||||
// Maximum 2 SubVP pipes
|
||||
if (index == 2)
|
||||
break;
|
||||
}
|
||||
}
|
||||
vactive1_us = ((subvp_pipes[0]->stream->timing.v_addressable * subvp_pipes[0]->stream->timing.h_total) /
|
||||
(double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
|
||||
vactive2_us = ((subvp_pipes[1]->stream->timing.v_addressable * subvp_pipes[1]->stream->timing.h_total) /
|
||||
(double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000;
|
||||
vblank1_us = (((subvp_pipes[0]->stream->timing.v_total - subvp_pipes[0]->stream->timing.v_addressable) *
|
||||
subvp_pipes[0]->stream->timing.h_total) /
|
||||
(double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
|
||||
vblank2_us = (((subvp_pipes[1]->stream->timing.v_total - subvp_pipes[1]->stream->timing.v_addressable) *
|
||||
subvp_pipes[1]->stream->timing.h_total) /
|
||||
(double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000;
|
||||
|
||||
if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us &&
|
||||
(vactive2_us - vblank1_us) / 2 > max_microschedule_us)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* dml2_svp_drr_schedulable: Determine if SubVP + DRR config is schedulable
|
||||
*
|
||||
* High level algorithm:
|
||||
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
|
||||
* 2. Determine the frame time for the DRR display when adding required margin for MCLK switching
|
||||
* (the margin is equal to the MALL region + DRR margin (500us))
|
||||
* 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
|
||||
* then report the configuration as supported
|
||||
*
|
||||
* @dc: current dc state
|
||||
* @context: new dc state
|
||||
* @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
|
||||
*
|
||||
* Return:
|
||||
* bool - True if the SubVP + DRR config is schedulable, false otherwise
|
||||
*/
|
||||
bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context, struct dc_crtc_timing *drr_timing)
|
||||
{
|
||||
bool schedulable = false;
|
||||
uint32_t i;
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
struct dc_crtc_timing *main_timing = NULL;
|
||||
struct dc_crtc_timing *phantom_timing = NULL;
|
||||
int16_t prefetch_us = 0;
|
||||
int16_t mall_region_us = 0;
|
||||
int16_t drr_frame_us = 0; // nominal frame time
|
||||
int16_t subvp_active_us = 0;
|
||||
int16_t stretched_drr_us = 0;
|
||||
int16_t drr_stretched_vblank_us = 0;
|
||||
int16_t max_vblank_mallregion = 0;
|
||||
|
||||
// Find SubVP pipe
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
// We check for master pipe, but it shouldn't matter since we only need
|
||||
// the pipe for timing info (stream should be same for any pipe splits)
|
||||
if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
// Find the SubVP pipe
|
||||
if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
|
||||
break;
|
||||
}
|
||||
|
||||
main_timing = &pipe->stream->timing;
|
||||
phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
|
||||
prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
|
||||
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
|
||||
ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us;
|
||||
subvp_active_us = main_timing->v_addressable * main_timing->h_total /
|
||||
(double)(main_timing->pix_clk_100hz * 100) * 1000000;
|
||||
drr_frame_us = drr_timing->v_total * drr_timing->h_total /
|
||||
(double)(drr_timing->pix_clk_100hz * 100) * 1000000;
|
||||
// P-State allow width and FW delays already included phantom_timing->v_addressable
|
||||
mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
|
||||
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
|
||||
stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
|
||||
drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
|
||||
(double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
|
||||
max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
|
||||
|
||||
/* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
|
||||
* highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
|
||||
* for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
|
||||
* and the max of (VBLANK blanking time, MALL region)).
|
||||
*/
|
||||
if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
|
||||
subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
|
||||
schedulable = true;
|
||||
|
||||
return schedulable;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* subvp_vblank_schedulable: Determine if SubVP + VBLANK config is schedulable
|
||||
*
|
||||
* High level algorithm:
|
||||
* 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
|
||||
* 2. If (SubVP Active - Prefetch > Vblank Frame Time + max(MALL region, Vblank blanking time))
|
||||
* then report the configuration as supported
|
||||
* 3. If the VBLANK display is DRR, then take the DRR static schedulability path
|
||||
*
|
||||
* @dc: current dc state
|
||||
* @context: new dc state
|
||||
*
|
||||
* Return:
|
||||
* bool - True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
|
||||
*/
|
||||
static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *context)
|
||||
{
|
||||
struct pipe_ctx *pipe = NULL;
|
||||
struct pipe_ctx *subvp_pipe = NULL;
|
||||
bool found = false;
|
||||
bool schedulable = false;
|
||||
uint32_t i = 0;
|
||||
uint8_t vblank_index = 0;
|
||||
uint16_t prefetch_us = 0;
|
||||
uint16_t mall_region_us = 0;
|
||||
uint16_t vblank_frame_us = 0;
|
||||
uint16_t subvp_active_us = 0;
|
||||
uint16_t vblank_blank_us = 0;
|
||||
uint16_t max_vblank_mallregion = 0;
|
||||
struct dc_crtc_timing *main_timing = NULL;
|
||||
struct dc_crtc_timing *phantom_timing = NULL;
|
||||
struct dc_crtc_timing *vblank_timing = NULL;
|
||||
|
||||
/* For SubVP + VBLANK/DRR cases, we assume there can only be
|
||||
* a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
|
||||
* is supported, it is either a single VBLANK case or two VBLANK
|
||||
* displays which are synchronized (in which case they have identical
|
||||
* timings).
|
||||
*/
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
// We check for master pipe, but it shouldn't matter since we only need
|
||||
// the pipe for timing info (stream should be same for any pipe splits)
|
||||
if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
|
||||
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
|
||||
vblank_index = i;
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
|
||||
subvp_pipe = pipe;
|
||||
}
|
||||
// Use ignore_msa_timing_param flag to identify as DRR
|
||||
if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) {
|
||||
// SUBVP + DRR case
|
||||
schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing);
|
||||
} else if (found) {
|
||||
main_timing = &subvp_pipe->stream->timing;
|
||||
phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
|
||||
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
|
||||
// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
|
||||
// Also include the prefetch end to mallstart delay time
|
||||
prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
|
||||
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
|
||||
ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us;
|
||||
// P-State allow width and FW delays already included phantom_timing->v_addressable
|
||||
mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
|
||||
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
|
||||
vblank_frame_us = vblank_timing->v_total * vblank_timing->h_total /
|
||||
(double)(vblank_timing->pix_clk_100hz * 100) * 1000000;
|
||||
vblank_blank_us = (vblank_timing->v_total - vblank_timing->v_addressable) * vblank_timing->h_total /
|
||||
(double)(vblank_timing->pix_clk_100hz * 100) * 1000000;
|
||||
subvp_active_us = main_timing->v_addressable * main_timing->h_total /
|
||||
(double)(main_timing->pix_clk_100hz * 100) * 1000000;
|
||||
max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us;
|
||||
|
||||
// Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
|
||||
// and the max of (VBLANK blanking time, MALL region)
|
||||
// TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0)
|
||||
if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0)
|
||||
schedulable = true;
|
||||
}
|
||||
return schedulable;
|
||||
}
|
||||
|
||||
/*
|
||||
* subvp_validate_static_schedulability: Check which SubVP case is calculated and handle
|
||||
* static analysis based on the case.
|
||||
*
|
||||
* Three cases:
|
||||
* 1. SubVP + SubVP
|
||||
* 2. SubVP + VBLANK (DRR checked internally)
|
||||
* 3. SubVP + VACTIVE (currently unsupported)
|
||||
*
|
||||
* @dc: current dc state
|
||||
* @context: new dc state
|
||||
* @vlevel: Voltage level calculated by DML
|
||||
*
|
||||
* Return:
|
||||
* bool - True if statically schedulable, false otherwise
|
||||
*/
|
||||
bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc_state *context, enum dml_dram_clock_change_support pstate_change_type)
|
||||
{
|
||||
bool schedulable = true; // true by default for single display case
|
||||
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
|
||||
uint32_t i, pipe_idx;
|
||||
uint8_t subvp_count = 0;
|
||||
uint8_t vactive_count = 0;
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
if (pipe->plane_state && !pipe->top_pipe &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_MAIN)
|
||||
subvp_count++;
|
||||
|
||||
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
|
||||
// switching (SubVP + VACTIVE unsupported). In situations where we force
|
||||
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
|
||||
if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
|
||||
vactive_count++;
|
||||
}
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
if (subvp_count == 2) {
|
||||
// Static schedulability check for SubVP + SubVP case
|
||||
schedulable = subvp_subvp_schedulable(ctx, context);
|
||||
} else if (pstate_change_type == dml_dram_clock_change_vblank_w_mall_sub_vp) {
|
||||
// Static schedulability check for SubVP + VBLANK case. Also handle the case where
|
||||
// DML outputs SubVP + VBLANK + VACTIVE (DML will report as SubVP + VBLANK)
|
||||
if (vactive_count > 0)
|
||||
schedulable = false;
|
||||
else
|
||||
schedulable = subvp_vblank_schedulable(ctx, context);
|
||||
} else if (pstate_change_type == dml_dram_clock_change_vactive_w_mall_sub_vp &&
|
||||
vactive_count > 0) {
|
||||
// For single display SubVP cases, DML will output dm_dram_clock_change_vactive_w_mall_sub_vp by default.
|
||||
// We tell the difference between SubVP vs. SubVP + VACTIVE by checking the vactive_count.
|
||||
// SubVP + VACTIVE currently unsupported
|
||||
schedulable = false;
|
||||
}
|
||||
return schedulable;
|
||||
}
|
||||
|
||||
static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state *state,
|
||||
struct pipe_ctx *ref_pipe,
|
||||
struct dc_stream_state *phantom_stream,
|
||||
unsigned int dc_pipe_idx,
|
||||
unsigned int svp_height,
|
||||
unsigned int svp_vstartup)
|
||||
{
|
||||
unsigned int i, pipe_idx;
|
||||
double line_time, fp_and_sync_width_time;
|
||||
struct pipe_ctx *pipe;
|
||||
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
|
||||
static const double cvt_rb_vblank_max = ((double) 460 / (1000 * 1000));
|
||||
|
||||
// Find DML pipe index (pipe_idx) using dc_pipe_idx
|
||||
for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
pipe = &state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
if (i == dc_pipe_idx)
|
||||
break;
|
||||
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
// Calculate lines required for pstate allow width and FW processing delays
|
||||
pstate_width_fw_delay_lines = ((double)(ctx->config.svp_pstate.subvp_fw_processing_delay_us +
|
||||
ctx->config.svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
|
||||
(ref_pipe->stream->timing.pix_clk_100hz * 100) /
|
||||
(double)ref_pipe->stream->timing.h_total;
|
||||
|
||||
// DML calculation for MALL region doesn't take into account FW delay
|
||||
// and required pstate allow width for multi-display cases
|
||||
/* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
|
||||
* to 2 swaths (i.e. 16 lines)
|
||||
*/
|
||||
phantom_vactive = svp_height + pstate_width_fw_delay_lines + ctx->config.svp_pstate.subvp_swath_height_margin_lines;
|
||||
|
||||
phantom_stream->timing.v_front_porch = 1;
|
||||
|
||||
line_time = phantom_stream->timing.h_total / ((double)phantom_stream->timing.pix_clk_100hz * 100);
|
||||
fp_and_sync_width_time = (phantom_stream->timing.v_front_porch + phantom_stream->timing.v_sync_width) * line_time;
|
||||
|
||||
if ((svp_vstartup * line_time) + fp_and_sync_width_time > cvt_rb_vblank_max) {
|
||||
svp_vstartup = (cvt_rb_vblank_max - fp_and_sync_width_time) / line_time;
|
||||
}
|
||||
|
||||
// For backporch of phantom pipe, use vstartup of the main pipe
|
||||
phantom_bp = svp_vstartup;
|
||||
|
||||
phantom_stream->dst.y = 0;
|
||||
phantom_stream->dst.height = phantom_vactive;
|
||||
phantom_stream->src.y = 0;
|
||||
phantom_stream->src.height = phantom_vactive;
|
||||
|
||||
phantom_stream->timing.v_addressable = phantom_vactive;
|
||||
|
||||
phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
|
||||
phantom_stream->timing.v_front_porch +
|
||||
phantom_stream->timing.v_sync_width +
|
||||
phantom_bp;
|
||||
phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
|
||||
}
|
||||
|
||||
static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup)
|
||||
{
|
||||
struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx];
|
||||
struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink);
|
||||
|
||||
phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
|
||||
phantom_stream->dpms_off = true;
|
||||
phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
|
||||
phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
|
||||
ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
|
||||
ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
|
||||
|
||||
/* stream has limited viewport and small timing */
|
||||
memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
|
||||
memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src));
|
||||
memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
|
||||
set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup);
|
||||
|
||||
ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
|
||||
return phantom_stream;
|
||||
}
|
||||
|
||||
static void enable_phantom_plane(struct dml2_context *ctx,
|
||||
struct dc_state *state,
|
||||
struct dc_stream_state *phantom_stream,
|
||||
unsigned int dc_pipe_idx)
|
||||
{
|
||||
struct dc_plane_state *phantom_plane = NULL;
|
||||
struct dc_plane_state *prev_phantom_plane = NULL;
|
||||
struct pipe_ctx *curr_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx];
|
||||
|
||||
while (curr_pipe) {
|
||||
if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) {
|
||||
phantom_plane = prev_phantom_plane;
|
||||
} else {
|
||||
phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc);
|
||||
}
|
||||
|
||||
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
|
||||
memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
|
||||
sizeof(phantom_plane->scaling_quality));
|
||||
memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect));
|
||||
memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect));
|
||||
memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect));
|
||||
memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size,
|
||||
sizeof(phantom_plane->plane_size));
|
||||
memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info,
|
||||
sizeof(phantom_plane->tiling_info));
|
||||
memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc));
|
||||
//phantom_plane->tiling_info.gfx10compatible.compat_level = curr_pipe->plane_state->tiling_info.gfx10compatible.compat_level;
|
||||
phantom_plane->format = curr_pipe->plane_state->format;
|
||||
phantom_plane->rotation = curr_pipe->plane_state->rotation;
|
||||
phantom_plane->visible = curr_pipe->plane_state->visible;
|
||||
|
||||
/* Shadow pipe has small viewport. */
|
||||
phantom_plane->clip_rect.y = 0;
|
||||
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
|
||||
|
||||
phantom_plane->is_phantom = true;
|
||||
|
||||
ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
|
||||
|
||||
curr_pipe = curr_pipe->bottom_pipe;
|
||||
prev_phantom_plane = phantom_plane;
|
||||
}
|
||||
}
|
||||
|
||||
static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_state *state, unsigned int main_pipe_idx, unsigned int svp_height, unsigned int vstartup)
|
||||
{
|
||||
struct dc_stream_state *phantom_stream = NULL;
|
||||
unsigned int i;
|
||||
|
||||
// The index of the DC pipe passed into this function is guarenteed to
|
||||
// be a valid candidate for SubVP (i.e. has a plane, stream, doesn't
|
||||
// already have phantom pipe assigned, etc.) by previous checks.
|
||||
phantom_stream = enable_phantom_stream(ctx, state, main_pipe_idx, svp_height, vstartup);
|
||||
enable_phantom_plane(ctx, state, phantom_stream, main_pipe_idx);
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
|
||||
|
||||
// Build scaling params for phantom pipes which were newly added.
|
||||
// We determine which phantom pipes were added by comparing with
|
||||
// the phantom stream.
|
||||
if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
pipe->stream->use_dynamic_meta = false;
|
||||
pipe->plane_state->flip_immediate = false;
|
||||
if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) {
|
||||
// Log / remove phantom pipes since failed to build scaling params
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
|
||||
{
|
||||
int i, old_plane_count;
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
if (context->streams[i] == stream) {
|
||||
stream_status = &context->stream_status[i];
|
||||
break;
|
||||
}
|
||||
|
||||
if (stream_status == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
old_plane_count = stream_status->plane_count;
|
||||
|
||||
for (i = 0; i < old_plane_count; i++)
|
||||
del_planes[i] = stream_status->plane_states[i];
|
||||
|
||||
for (i = 0; i < old_plane_count; i++)
|
||||
if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state *state)
|
||||
{
|
||||
int i;
|
||||
bool removed_pipe = false;
|
||||
struct dc_plane_state *phantom_plane = NULL;
|
||||
struct dc_stream_state *phantom_stream = NULL;
|
||||
|
||||
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
|
||||
// build scaling params for phantom pipes
|
||||
if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
phantom_plane = pipe->plane_state;
|
||||
phantom_stream = pipe->stream;
|
||||
|
||||
remove_all_planes_for_stream(ctx, pipe->stream, state);
|
||||
ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream);
|
||||
|
||||
/* Ref count is incremented on allocation and also when added to the context.
|
||||
* Therefore we must call release for the the phantom plane and stream once
|
||||
* they are removed from the ctx to finally decrement the refcount to 0 to free.
|
||||
*/
|
||||
ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane);
|
||||
ctx->config.svp_pstate.callbacks.stream_release(phantom_stream);
|
||||
|
||||
removed_pipe = true;
|
||||
}
|
||||
|
||||
// Clear all phantom stream info
|
||||
if (pipe->stream) {
|
||||
pipe->stream->mall_stream_config.type = SUBVP_NONE;
|
||||
pipe->stream->mall_stream_config.paired_stream = NULL;
|
||||
}
|
||||
|
||||
if (pipe->plane_state) {
|
||||
pipe->plane_state->is_phantom = false;
|
||||
}
|
||||
}
|
||||
return removed_pipe;
|
||||
}
|
||||
|
||||
|
||||
/* Conditions for setting up phantom pipes for SubVP:
|
||||
* 1. Not force disable SubVP
|
||||
* 2. Full update (i.e. !fast_validate)
|
||||
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
|
||||
* 4. Display configuration passes validation
|
||||
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
|
||||
*/
|
||||
bool dml2_svp_add_phantom_pipe_to_dc_state(struct dml2_context *ctx, struct dc_state *state, struct dml_mode_support_info_st *mode_support_info)
|
||||
{
|
||||
unsigned int dc_pipe_idx, dml_pipe_idx;
|
||||
unsigned int svp_height, vstartup;
|
||||
|
||||
if (ctx->config.svp_pstate.force_disable_subvp)
|
||||
return false;
|
||||
|
||||
if (!all_pipes_have_stream_and_plane(ctx, state))
|
||||
return false;
|
||||
|
||||
if (mpo_in_use(state))
|
||||
return false;
|
||||
|
||||
merge_pipes_for_subvp(ctx, state);
|
||||
// to re-initialize viewport after the pipe merge
|
||||
for (int i = 0; i < ctx->config.dcn_pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx->plane_state || !pipe_ctx->stream)
|
||||
continue;
|
||||
|
||||
ctx->config.svp_pstate.callbacks.build_scaling_params(pipe_ctx);
|
||||
}
|
||||
|
||||
if (enough_pipes_for_subvp(ctx, state) && assign_subvp_pipe(ctx, state, &dc_pipe_idx)) {
|
||||
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(ctx, state->res_ctx.pipe_ctx[dc_pipe_idx].stream->stream_id);
|
||||
svp_height = mode_support_info->SubViewportLinesNeededInMALL[dml_pipe_idx];
|
||||
vstartup = dml_get_vstartup_calculated(&ctx->v20.dml_core_ctx, dml_pipe_idx);
|
||||
|
||||
add_phantom_pipes_for_main_pipe(ctx, state, dc_pipe_idx, svp_height, vstartup);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
50
drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h
Normal file
50
drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DML2_MALL_PHANTOM_H__
|
||||
#define __DML2_MALL_PHANTOM_H__
|
||||
|
||||
#include "dml2_dc_types.h"
|
||||
#include "display_mode_core_structs.h"
|
||||
|
||||
struct dml2_svp_helper_select_best_svp_candidate_params {
|
||||
const struct dml_display_cfg_st *dml_config;
|
||||
const struct dml_mode_support_info_st *mode_support_info;
|
||||
const unsigned int blacklist;
|
||||
unsigned int *candidate_index;
|
||||
};
|
||||
|
||||
struct dml2_context;
|
||||
|
||||
unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx, struct dc_state *context);
|
||||
|
||||
bool dml2_svp_add_phantom_pipe_to_dc_state(struct dml2_context *ctx, struct dc_state *state, struct dml_mode_support_info_st *mode_support_info);
|
||||
|
||||
bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state *state);
|
||||
|
||||
bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc_state *context, enum dml_dram_clock_change_support pstate_change_type);
|
||||
|
||||
bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context, struct dc_crtc_timing *drr_timing);
|
||||
|
||||
#endif
|
301
drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
Normal file
301
drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
Normal file
|
@ -0,0 +1,301 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dml2_policy.h"
|
||||
|
||||
static void get_optimal_ntuple(
|
||||
const struct soc_bounding_box_st *socbb,
|
||||
struct soc_state_bounding_box_st *entry)
|
||||
{
|
||||
if (entry->dcfclk_mhz > 0) {
|
||||
float bw_on_sdp = (float)(entry->dcfclk_mhz * socbb->return_bus_width_bytes * ((float)socbb->pct_ideal_sdp_bw_after_urgent / 100));
|
||||
|
||||
entry->fabricclk_mhz = bw_on_sdp / (socbb->return_bus_width_bytes * ((float)socbb->pct_ideal_fabric_bw_after_urgent / 100));
|
||||
entry->dram_speed_mts = bw_on_sdp / (socbb->num_chans *
|
||||
socbb->dram_channel_width_bytes * ((float)socbb->pct_ideal_dram_bw_after_urgent_pixel_only / 100));
|
||||
} else if (entry->fabricclk_mhz > 0) {
|
||||
float bw_on_fabric = (float)(entry->fabricclk_mhz * socbb->return_bus_width_bytes * ((float)socbb->pct_ideal_fabric_bw_after_urgent / 100));
|
||||
|
||||
entry->dcfclk_mhz = bw_on_fabric / (socbb->return_bus_width_bytes * ((float)socbb->pct_ideal_sdp_bw_after_urgent / 100));
|
||||
entry->dram_speed_mts = bw_on_fabric / (socbb->num_chans *
|
||||
socbb->dram_channel_width_bytes * ((float)socbb->pct_ideal_dram_bw_after_urgent_pixel_only / 100));
|
||||
} else if (entry->dram_speed_mts > 0) {
|
||||
float bw_on_dram = (float)(entry->dram_speed_mts * socbb->num_chans *
|
||||
socbb->dram_channel_width_bytes * ((float)socbb->pct_ideal_dram_bw_after_urgent_pixel_only / 100));
|
||||
|
||||
entry->fabricclk_mhz = bw_on_dram / (socbb->return_bus_width_bytes * ((float)socbb->pct_ideal_fabric_bw_after_urgent / 100));
|
||||
entry->dcfclk_mhz = bw_on_dram / (socbb->return_bus_width_bytes * ((float)socbb->pct_ideal_sdp_bw_after_urgent / 100));
|
||||
}
|
||||
}
|
||||
|
||||
static float calculate_net_bw_in_mbytes_sec(const struct soc_bounding_box_st *socbb,
|
||||
struct soc_state_bounding_box_st *entry)
|
||||
{
|
||||
float memory_bw_mbytes_sec = (float)(entry->dram_speed_mts * socbb->num_chans *
|
||||
socbb->dram_channel_width_bytes * ((float)socbb->pct_ideal_dram_bw_after_urgent_pixel_only / 100));
|
||||
|
||||
float fabric_bw_mbytes_sec = (float)(entry->fabricclk_mhz * socbb->return_bus_width_bytes * ((float)socbb->pct_ideal_fabric_bw_after_urgent / 100));
|
||||
|
||||
float sdp_bw_mbytes_sec = (float)(entry->dcfclk_mhz * socbb->return_bus_width_bytes * ((float)socbb->pct_ideal_sdp_bw_after_urgent / 100));
|
||||
|
||||
float limiting_bw_mbytes_sec = memory_bw_mbytes_sec;
|
||||
|
||||
if (fabric_bw_mbytes_sec < limiting_bw_mbytes_sec)
|
||||
limiting_bw_mbytes_sec = fabric_bw_mbytes_sec;
|
||||
|
||||
if (sdp_bw_mbytes_sec < limiting_bw_mbytes_sec)
|
||||
limiting_bw_mbytes_sec = sdp_bw_mbytes_sec;
|
||||
|
||||
return limiting_bw_mbytes_sec;
|
||||
}
|
||||
|
||||
static void insert_entry_into_table_sorted(const struct soc_bounding_box_st *socbb,
|
||||
struct soc_states_st *table,
|
||||
struct soc_state_bounding_box_st *entry)
|
||||
{
|
||||
int index = 0;
|
||||
int i = 0;
|
||||
float net_bw_of_new_state = 0;
|
||||
|
||||
get_optimal_ntuple(socbb, entry);
|
||||
|
||||
if (table->num_states == 0) {
|
||||
index = 0;
|
||||
} else {
|
||||
net_bw_of_new_state = calculate_net_bw_in_mbytes_sec(socbb, entry);
|
||||
while (net_bw_of_new_state > calculate_net_bw_in_mbytes_sec(socbb, &table->state_array[index])) {
|
||||
index++;
|
||||
if (index >= (int) table->num_states)
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = table->num_states; i > index; i--) {
|
||||
table->state_array[i] = table->state_array[i - 1];
|
||||
}
|
||||
//ASSERT(index < MAX_CLK_TABLE_SIZE);
|
||||
}
|
||||
|
||||
table->state_array[index] = *entry;
|
||||
table->state_array[index].dcfclk_mhz = (int)entry->dcfclk_mhz;
|
||||
table->state_array[index].fabricclk_mhz = (int)entry->fabricclk_mhz;
|
||||
table->state_array[index].dram_speed_mts = (int)entry->dram_speed_mts;
|
||||
table->num_states++;
|
||||
}
|
||||
|
||||
static void remove_entry_from_table_at_index(struct soc_states_st *table,
|
||||
unsigned int index)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (table->num_states == 0)
|
||||
return;
|
||||
|
||||
for (i = index; i < (int) table->num_states - 1; i++) {
|
||||
table->state_array[i] = table->state_array[i + 1];
|
||||
}
|
||||
memset(&table->state_array[--table->num_states], 0, sizeof(struct soc_state_bounding_box_st));
|
||||
}
|
||||
|
||||
int dml2_policy_build_synthetic_soc_states(struct dml2_policy_build_synthetic_soc_states_scratch *s,
|
||||
struct dml2_policy_build_synthetic_soc_states_params *p)
|
||||
{
|
||||
int i, j;
|
||||
unsigned int min_fclk_mhz = p->in_states->state_array[0].fabricclk_mhz;
|
||||
unsigned int min_dcfclk_mhz = p->in_states->state_array[0].dcfclk_mhz;
|
||||
unsigned int min_socclk_mhz = p->in_states->state_array[0].socclk_mhz;
|
||||
|
||||
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
|
||||
max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0,
|
||||
max_uclk_mhz = 0, max_socclk_mhz = 0;
|
||||
|
||||
int num_uclk_dpms = 0, num_fclk_dpms = 0;
|
||||
|
||||
for (i = 0; i < __DML_MAX_STATE_ARRAY_SIZE__; i++) {
|
||||
if (p->in_states->state_array[i].dcfclk_mhz > max_dcfclk_mhz)
|
||||
max_dcfclk_mhz = (int) p->in_states->state_array[i].dcfclk_mhz;
|
||||
if (p->in_states->state_array[i].fabricclk_mhz > max_fclk_mhz)
|
||||
max_fclk_mhz = (int) p->in_states->state_array[i].fabricclk_mhz;
|
||||
if (p->in_states->state_array[i].socclk_mhz > max_socclk_mhz)
|
||||
max_socclk_mhz = (int) p->in_states->state_array[i].socclk_mhz;
|
||||
if (p->in_states->state_array[i].dram_speed_mts > max_uclk_mhz)
|
||||
max_uclk_mhz = (int) p->in_states->state_array[i].dram_speed_mts;
|
||||
if (p->in_states->state_array[i].dispclk_mhz > max_dispclk_mhz)
|
||||
max_dispclk_mhz = (int) p->in_states->state_array[i].dispclk_mhz;
|
||||
if (p->in_states->state_array[i].dppclk_mhz > max_dppclk_mhz)
|
||||
max_dppclk_mhz = (int) p->in_states->state_array[i].dppclk_mhz;
|
||||
if (p->in_states->state_array[i].phyclk_mhz > max_phyclk_mhz)
|
||||
max_phyclk_mhz = (int)p->in_states->state_array[i].phyclk_mhz;
|
||||
if (p->in_states->state_array[i].dtbclk_mhz > max_dtbclk_mhz)
|
||||
max_dtbclk_mhz = (int)p->in_states->state_array[i].dtbclk_mhz;
|
||||
|
||||
if (p->in_states->state_array[i].fabricclk_mhz > 0)
|
||||
num_fclk_dpms++;
|
||||
if (p->in_states->state_array[i].dram_speed_mts > 0)
|
||||
num_uclk_dpms++;
|
||||
}
|
||||
|
||||
if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dppclk_mhz || !max_phyclk_mhz || !max_dtbclk_mhz)
|
||||
return -1;
|
||||
|
||||
p->out_states->num_states = 0;
|
||||
|
||||
s->entry = p->in_states->state_array[0];
|
||||
|
||||
s->entry.dispclk_mhz = max_dispclk_mhz;
|
||||
s->entry.dppclk_mhz = max_dppclk_mhz;
|
||||
s->entry.dtbclk_mhz = max_dtbclk_mhz;
|
||||
s->entry.phyclk_mhz = max_phyclk_mhz;
|
||||
|
||||
s->entry.dscclk_mhz = max_dispclk_mhz / 3;
|
||||
s->entry.phyclk_mhz = max_phyclk_mhz;
|
||||
s->entry.dtbclk_mhz = max_dtbclk_mhz;
|
||||
|
||||
// Insert all the DCFCLK STAs first
|
||||
for (i = 0; i < p->num_dcfclk_stas; i++) {
|
||||
s->entry.dcfclk_mhz = p->dcfclk_stas_mhz[i];
|
||||
s->entry.fabricclk_mhz = 0;
|
||||
s->entry.dram_speed_mts = 0;
|
||||
if (i > 0)
|
||||
s->entry.socclk_mhz = max_socclk_mhz;
|
||||
|
||||
insert_entry_into_table_sorted(p->in_bbox, p->out_states, &s->entry);
|
||||
}
|
||||
|
||||
// Insert the UCLK DPMS
|
||||
for (i = 0; i < num_uclk_dpms; i++) {
|
||||
s->entry.dcfclk_mhz = 0;
|
||||
s->entry.fabricclk_mhz = 0;
|
||||
s->entry.dram_speed_mts = p->in_states->state_array[i].dram_speed_mts;
|
||||
if (i == 0) {
|
||||
s->entry.socclk_mhz = min_socclk_mhz;
|
||||
} else {
|
||||
s->entry.socclk_mhz = max_socclk_mhz;
|
||||
}
|
||||
|
||||
insert_entry_into_table_sorted(p->in_bbox, p->out_states, &s->entry);
|
||||
}
|
||||
|
||||
// Insert FCLK DPMs (if present)
|
||||
if (num_fclk_dpms > 2) {
|
||||
for (i = 0; i < num_fclk_dpms; i++) {
|
||||
s->entry.dcfclk_mhz = 0;
|
||||
s->entry.fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz;
|
||||
s->entry.dram_speed_mts = 0;
|
||||
|
||||
insert_entry_into_table_sorted(p->in_bbox, p->out_states, &s->entry);
|
||||
}
|
||||
}
|
||||
// Add max FCLK
|
||||
else {
|
||||
s->entry.dcfclk_mhz = 0;
|
||||
s->entry.fabricclk_mhz = p->in_states->state_array[num_fclk_dpms - 1].fabricclk_mhz;
|
||||
s->entry.dram_speed_mts = 0;
|
||||
|
||||
insert_entry_into_table_sorted(p->in_bbox, p->out_states, &s->entry);
|
||||
}
|
||||
|
||||
// Remove states that require higher clocks than are supported
|
||||
for (i = p->out_states->num_states - 1; i >= 0; i--) {
|
||||
if (p->out_states->state_array[i].dcfclk_mhz > max_dcfclk_mhz ||
|
||||
p->out_states->state_array[i].fabricclk_mhz > max_fclk_mhz ||
|
||||
p->out_states->state_array[i].dram_speed_mts > max_uclk_mhz)
|
||||
remove_entry_from_table_at_index(p->out_states, i);
|
||||
}
|
||||
|
||||
// At this point, the table contains all "points of interest" based on
|
||||
// DPMs from PMFW, and STAs. Table is sorted by BW, and all clock
|
||||
// ratios (by derate, are exact).
|
||||
|
||||
// Round up UCLK to DPMs
|
||||
for (i = p->out_states->num_states - 1; i >= 0; i--) {
|
||||
for (j = 0; j < num_uclk_dpms; j++) {
|
||||
if (p->in_states->state_array[j].dram_speed_mts >= p->out_states->state_array[i].dram_speed_mts) {
|
||||
p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[j].dram_speed_mts;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If FCLK is coarse grained, round up to next DPMs
|
||||
if (num_fclk_dpms > 2) {
|
||||
for (i = p->out_states->num_states - 1; i >= 0; i--) {
|
||||
for (j = 0; j < num_fclk_dpms; j++) {
|
||||
if (p->in_states->state_array[j].fabricclk_mhz >= p->out_states->state_array[i].fabricclk_mhz) {
|
||||
p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[j].fabricclk_mhz;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clamp to min FCLK/DCFCLK
|
||||
for (i = p->out_states->num_states - 1; i >= 0; i--) {
|
||||
if (p->out_states->state_array[i].fabricclk_mhz < min_fclk_mhz) {
|
||||
p->out_states->state_array[i].fabricclk_mhz = min_fclk_mhz;
|
||||
}
|
||||
if (p->out_states->state_array[i].dcfclk_mhz < min_dcfclk_mhz) {
|
||||
p->out_states->state_array[i].dcfclk_mhz = min_dcfclk_mhz;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove duplicate states, note duplicate states are always neighbouring since table is sorted.
|
||||
i = 0;
|
||||
while (i < (int) p->out_states->num_states - 1) {
|
||||
if (p->out_states->state_array[i].dcfclk_mhz == p->out_states->state_array[i + 1].dcfclk_mhz &&
|
||||
p->out_states->state_array[i].fabricclk_mhz == p->out_states->state_array[i + 1].fabricclk_mhz &&
|
||||
p->out_states->state_array[i].dram_speed_mts == p->out_states->state_array[i + 1].dram_speed_mts)
|
||||
remove_entry_from_table_at_index(p->out_states, i);
|
||||
else
|
||||
i++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_mode_eval_policy_st *policy)
|
||||
{
|
||||
for (int i = 0; i < __DML_NUM_PLANES__; i++) {
|
||||
policy->MPCCombineUse[i] = dml_mpc_as_needed_for_voltage; // TOREVIEW: Is this still needed? When is MPCC useful for pstate given CRB?
|
||||
policy->ODMUse[i] = dml_odm_use_policy_combine_as_needed;
|
||||
policy->ImmediateFlipRequirement[i] = dml_immediate_flip_required;
|
||||
policy->AllowForPStateChangeOrStutterInVBlank[i] = dml_prefetch_support_uclk_fclk_and_stutter_if_possible;
|
||||
}
|
||||
|
||||
/* Change the default policy initializations as per spreadsheet. We might need to
|
||||
* review and change them later on as per Jun's earlier comments.
|
||||
*/
|
||||
policy->UseUnboundedRequesting = dml_unbounded_requesting_enable;
|
||||
policy->UseMinimumRequiredDCFCLK = false;
|
||||
policy->DRAMClockChangeRequirementFinal = true; // TOREVIEW: What does this mean?
|
||||
policy->FCLKChangeRequirementFinal = true; // TOREVIEW: What does this mean?
|
||||
policy->USRRetrainingRequiredFinal = true;
|
||||
policy->EnhancedPrefetchScheduleAccelerationFinal = true; // TOREVIEW: What does this mean?
|
||||
policy->NomDETInKByteOverrideEnable = false;
|
||||
policy->NomDETInKByteOverrideValue = 0;
|
||||
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = true;
|
||||
policy->SynchronizeTimingsFinal = true;
|
||||
policy->SynchronizeDRRDisplaysForUCLKPStateChangeFinal = true;
|
||||
policy->AssumeModeSupportAtMaxPwrStateEvenDRAMClockChangeNotSupported = true; // TOREVIEW: What does this mean?
|
||||
policy->AssumeModeSupportAtMaxPwrStateEvenFClockChangeNotSupported = true; // TOREVIEW: What does this mean?
|
||||
}
|
47
drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h
Normal file
47
drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h
Normal file
|
@ -0,0 +1,47 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DML2_POLICY_H__
|
||||
#define __DML2_POLICY_H__
|
||||
|
||||
#include "display_mode_core_structs.h"
|
||||
|
||||
struct dml2_policy_build_synthetic_soc_states_params {
|
||||
const struct soc_bounding_box_st *in_bbox;
|
||||
struct soc_states_st *in_states;
|
||||
struct soc_states_st *out_states;
|
||||
int *dcfclk_stas_mhz;
|
||||
int num_dcfclk_stas;
|
||||
};
|
||||
|
||||
struct dml2_policy_build_synthetic_soc_states_scratch {
|
||||
struct soc_state_bounding_box_st entry;
|
||||
};
|
||||
|
||||
int dml2_policy_build_synthetic_soc_states(struct dml2_policy_build_synthetic_soc_states_scratch *s,
|
||||
struct dml2_policy_build_synthetic_soc_states_params *p);
|
||||
|
||||
void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_mode_eval_policy_st *policy);
|
||||
|
||||
#endif
|
1109
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
Normal file
1109
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,39 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DML2_TRANSLATION_HELPER_H__
|
||||
#define __DML2_TRANSLATION_HELPER_H__
|
||||
|
||||
void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out);
|
||||
void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, struct soc_bounding_box_st *out);
|
||||
void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
|
||||
const struct soc_bounding_box_st *in_bbox, struct soc_states_st *out);
|
||||
void dml2_translate_ip_params(const struct dc *in_dc, struct ip_params_st *out);
|
||||
void dml2_translate_socbb_params(const struct dc *in_dc, struct soc_bounding_box_st *out);
|
||||
void dml2_translate_soc_states(const struct dc *in_dc, struct soc_states_st *out, int num_states);
|
||||
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg);
|
||||
void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, struct _vcs_dpi_dml_display_dlg_regs_st *disp_dlg_regs, struct _vcs_dpi_dml_display_ttu_regs_st *disp_ttu_regs, struct pipe_ctx *out);
|
||||
bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe);
|
||||
|
||||
#endif //__DML2_TRANSLATION_HELPER_H__
|
452
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
Normal file
452
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
Normal file
|
@ -0,0 +1,452 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
//#include "dml2_utils.h"
|
||||
#include "display_mode_core.h"
|
||||
#include "dml_display_rq_dlg_calc.h"
|
||||
#include "dml2_internal_types.h"
|
||||
#include "dml2_translation_helper.h"
|
||||
#include "dml2_utils.h"
|
||||
|
||||
void dml2_util_copy_dml_timing(struct dml_timing_cfg_st *dml_timing_array, unsigned int dst_index, unsigned int src_index)
|
||||
{
|
||||
dml_timing_array->HTotal[dst_index] = dml_timing_array->HTotal[src_index];
|
||||
dml_timing_array->VTotal[dst_index] = dml_timing_array->VTotal[src_index];
|
||||
dml_timing_array->HBlankEnd[dst_index] = dml_timing_array->HBlankEnd[src_index];
|
||||
dml_timing_array->VBlankEnd[dst_index] = dml_timing_array->VBlankEnd[src_index];
|
||||
dml_timing_array->RefreshRate[dst_index] = dml_timing_array->RefreshRate[src_index];
|
||||
dml_timing_array->VFrontPorch[dst_index] = dml_timing_array->VFrontPorch[src_index];
|
||||
dml_timing_array->PixelClock[dst_index] = dml_timing_array->PixelClock[src_index];
|
||||
dml_timing_array->HActive[dst_index] = dml_timing_array->HActive[src_index];
|
||||
dml_timing_array->VActive[dst_index] = dml_timing_array->VActive[src_index];
|
||||
dml_timing_array->Interlace[dst_index] = dml_timing_array->Interlace[src_index];
|
||||
dml_timing_array->DRRDisplay[dst_index] = dml_timing_array->DRRDisplay[src_index];
|
||||
dml_timing_array->VBlankNom[dst_index] = dml_timing_array->VBlankNom[src_index];
|
||||
}
|
||||
|
||||
void dml2_util_copy_dml_plane(struct dml_plane_cfg_st *dml_plane_array, unsigned int dst_index, unsigned int src_index)
|
||||
{
|
||||
dml_plane_array->GPUVMMinPageSizeKBytes[dst_index] = dml_plane_array->GPUVMMinPageSizeKBytes[src_index];
|
||||
dml_plane_array->ForceOneRowForFrame[dst_index] = dml_plane_array->ForceOneRowForFrame[src_index];
|
||||
dml_plane_array->PTEBufferModeOverrideEn[dst_index] = dml_plane_array->PTEBufferModeOverrideEn[src_index];
|
||||
dml_plane_array->PTEBufferMode[dst_index] = dml_plane_array->PTEBufferMode[src_index];
|
||||
dml_plane_array->ViewportWidth[dst_index] = dml_plane_array->ViewportWidth[src_index];
|
||||
dml_plane_array->ViewportHeight[dst_index] = dml_plane_array->ViewportHeight[src_index];
|
||||
dml_plane_array->ViewportWidthChroma[dst_index] = dml_plane_array->ViewportWidthChroma[src_index];
|
||||
dml_plane_array->ViewportHeightChroma[dst_index] = dml_plane_array->ViewportHeightChroma[src_index];
|
||||
dml_plane_array->ViewportXStart[dst_index] = dml_plane_array->ViewportXStart[src_index];
|
||||
dml_plane_array->ViewportXStartC[dst_index] = dml_plane_array->ViewportXStartC[src_index];
|
||||
dml_plane_array->ViewportYStart[dst_index] = dml_plane_array->ViewportYStart[src_index];
|
||||
dml_plane_array->ViewportYStartC[dst_index] = dml_plane_array->ViewportYStartC[src_index];
|
||||
dml_plane_array->ViewportStationary[dst_index] = dml_plane_array->ViewportStationary[src_index];
|
||||
|
||||
dml_plane_array->ScalerEnabled[dst_index] = dml_plane_array->ScalerEnabled[src_index];
|
||||
dml_plane_array->HRatio[dst_index] = dml_plane_array->HRatio[src_index];
|
||||
dml_plane_array->VRatio[dst_index] = dml_plane_array->VRatio[src_index];
|
||||
dml_plane_array->HRatioChroma[dst_index] = dml_plane_array->HRatioChroma[src_index];
|
||||
dml_plane_array->VRatioChroma[dst_index] = dml_plane_array->VRatioChroma[src_index];
|
||||
dml_plane_array->HTaps[dst_index] = dml_plane_array->HTaps[src_index];
|
||||
dml_plane_array->VTaps[dst_index] = dml_plane_array->VTaps[src_index];
|
||||
dml_plane_array->HTapsChroma[dst_index] = dml_plane_array->HTapsChroma[src_index];
|
||||
dml_plane_array->VTapsChroma[dst_index] = dml_plane_array->VTapsChroma[src_index];
|
||||
dml_plane_array->LBBitPerPixel[dst_index] = dml_plane_array->LBBitPerPixel[src_index];
|
||||
|
||||
dml_plane_array->SourceScan[dst_index] = dml_plane_array->SourceScan[src_index];
|
||||
dml_plane_array->ScalerRecoutWidth[dst_index] = dml_plane_array->ScalerRecoutWidth[src_index];
|
||||
|
||||
dml_plane_array->DynamicMetadataEnable[dst_index] = dml_plane_array->DynamicMetadataEnable[src_index];
|
||||
dml_plane_array->DynamicMetadataLinesBeforeActiveRequired[dst_index] = dml_plane_array->DynamicMetadataLinesBeforeActiveRequired[src_index];
|
||||
dml_plane_array->DynamicMetadataTransmittedBytes[dst_index] = dml_plane_array->DynamicMetadataTransmittedBytes[src_index];
|
||||
dml_plane_array->DETSizeOverride[dst_index] = dml_plane_array->DETSizeOverride[src_index];
|
||||
|
||||
dml_plane_array->NumberOfCursors[dst_index] = dml_plane_array->NumberOfCursors[src_index];
|
||||
dml_plane_array->CursorWidth[dst_index] = dml_plane_array->CursorWidth[src_index];
|
||||
dml_plane_array->CursorBPP[dst_index] = dml_plane_array->CursorBPP[src_index];
|
||||
|
||||
dml_plane_array->UseMALLForStaticScreen[dst_index] = dml_plane_array->UseMALLForStaticScreen[src_index];
|
||||
dml_plane_array->UseMALLForPStateChange[dst_index] = dml_plane_array->UseMALLForPStateChange[src_index];
|
||||
|
||||
dml_plane_array->BlendingAndTiming[dst_index] = dml_plane_array->BlendingAndTiming[src_index];
|
||||
}
|
||||
|
||||
void dml2_util_copy_dml_surface(struct dml_surface_cfg_st *dml_surface_array, unsigned int dst_index, unsigned int src_index)
|
||||
{
|
||||
dml_surface_array->SurfaceTiling[dst_index] = dml_surface_array->SurfaceTiling[src_index];
|
||||
dml_surface_array->SourcePixelFormat[dst_index] = dml_surface_array->SourcePixelFormat[src_index];
|
||||
dml_surface_array->PitchY[dst_index] = dml_surface_array->PitchY[src_index];
|
||||
dml_surface_array->SurfaceWidthY[dst_index] = dml_surface_array->SurfaceWidthY[src_index];
|
||||
dml_surface_array->SurfaceHeightY[dst_index] = dml_surface_array->SurfaceHeightY[src_index];
|
||||
dml_surface_array->PitchC[dst_index] = dml_surface_array->PitchC[src_index];
|
||||
dml_surface_array->SurfaceWidthC[dst_index] = dml_surface_array->SurfaceWidthC[src_index];
|
||||
dml_surface_array->SurfaceHeightC[dst_index] = dml_surface_array->SurfaceHeightC[src_index];
|
||||
|
||||
dml_surface_array->DCCEnable[dst_index] = dml_surface_array->DCCEnable[src_index];
|
||||
dml_surface_array->DCCMetaPitchY[dst_index] = dml_surface_array->DCCMetaPitchY[src_index];
|
||||
dml_surface_array->DCCMetaPitchC[dst_index] = dml_surface_array->DCCMetaPitchC[src_index];
|
||||
|
||||
dml_surface_array->DCCRateLuma[dst_index] = dml_surface_array->DCCRateLuma[src_index];
|
||||
dml_surface_array->DCCRateChroma[dst_index] = dml_surface_array->DCCRateChroma[src_index];
|
||||
dml_surface_array->DCCFractionOfZeroSizeRequestsLuma[dst_index] = dml_surface_array->DCCFractionOfZeroSizeRequestsLuma[src_index];
|
||||
dml_surface_array->DCCFractionOfZeroSizeRequestsChroma[dst_index] = dml_surface_array->DCCFractionOfZeroSizeRequestsChroma[src_index];
|
||||
}
|
||||
|
||||
void dml2_util_copy_dml_output(struct dml_output_cfg_st *dml_output_array, unsigned int dst_index, unsigned int src_index)
|
||||
{
|
||||
dml_output_array->DSCInputBitPerComponent[dst_index] = dml_output_array->DSCInputBitPerComponent[src_index];
|
||||
dml_output_array->OutputFormat[dst_index] = dml_output_array->OutputFormat[src_index];
|
||||
dml_output_array->OutputEncoder[dst_index] = dml_output_array->OutputEncoder[src_index];
|
||||
dml_output_array->OutputMultistreamId[dst_index] = dml_output_array->OutputMultistreamId[src_index];
|
||||
dml_output_array->OutputMultistreamEn[dst_index] = dml_output_array->OutputMultistreamEn[src_index];
|
||||
dml_output_array->OutputBpp[dst_index] = dml_output_array->OutputBpp[src_index];
|
||||
dml_output_array->PixelClockBackEnd[dst_index] = dml_output_array->PixelClockBackEnd[src_index];
|
||||
dml_output_array->DSCEnable[dst_index] = dml_output_array->DSCEnable[src_index];
|
||||
dml_output_array->OutputLinkDPLanes[dst_index] = dml_output_array->OutputLinkDPLanes[src_index];
|
||||
dml_output_array->OutputLinkDPRate[dst_index] = dml_output_array->OutputLinkDPRate[src_index];
|
||||
dml_output_array->ForcedOutputLinkBPP[dst_index] = dml_output_array->ForcedOutputLinkBPP[src_index];
|
||||
dml_output_array->AudioSampleRate[dst_index] = dml_output_array->AudioSampleRate[src_index];
|
||||
dml_output_array->AudioSampleLayout[dst_index] = dml_output_array->AudioSampleLayout[src_index];
|
||||
}
|
||||
|
||||
unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, enum dml_output_encoder_class encoder, bool dsc_enabled)
|
||||
{
|
||||
switch (encoder) {
|
||||
case dml_dp:
|
||||
case dml_edp:
|
||||
return 2;
|
||||
case dml_dp2p0:
|
||||
if (dsc_enabled || force_odm_4to1)
|
||||
return 4;
|
||||
else
|
||||
return 2;
|
||||
case dml_hdmi:
|
||||
return 1;
|
||||
case dml_hdmifrl:
|
||||
if (force_odm_4to1)
|
||||
return 4;
|
||||
else
|
||||
return 2;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
/* If this assert is hit then we have a link encoder dynamic management issue */
|
||||
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
|
||||
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
|
||||
pipe_ctx->link_res.hpo_dp_link_enc &&
|
||||
dc_is_dp_signal(pipe_ctx->stream->signal));
|
||||
}
|
||||
|
||||
bool is_dtbclk_required(const struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
if (is_dp2p0_output_encoder(&context->res_ctx.pipe_ctx[i]))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_state *context)
|
||||
{
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz = out_clks->dispclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_khz = out_clks->dcfclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.dramclk_khz = out_clks->uclk_mts / 16;
|
||||
context->bw_ctx.bw.dcn.clk.fclk_khz = out_clks->fclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.phyclk_khz = out_clks->phyclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.socclk_khz = out_clks->socclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = out_clks->ref_dtbclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = out_clks->p_state_supported;
|
||||
}
|
||||
|
||||
int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
|
||||
if (ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[i] && ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[i] == stream_id)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int plane_id)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
|
||||
if (ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id_valid[i] && ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[i] == plane_id)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
if (!plane_id)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < state->stream_count; i++) {
|
||||
for (j = 0; j < state->stream_status[i].plane_count; j++) {
|
||||
if (state->stream_status[i].plane_states[j] == plane) {
|
||||
*plane_id = (i << 16) | j;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, struct display_mode_lib_st *mode_lib, dml_uint_t pipe_idx)
|
||||
{
|
||||
unsigned int hactive, vactive, hblank_start, vblank_start, hblank_end, vblank_end;
|
||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
|
||||
hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right;
|
||||
vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top;
|
||||
hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch;
|
||||
vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch;
|
||||
|
||||
hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right;
|
||||
vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
|
||||
|
||||
pipe_ctx->pipe_dlg_param.vstartup_start = dml_get_vstartup_calculated(mode_lib, pipe_idx);
|
||||
pipe_ctx->pipe_dlg_param.vupdate_offset = dml_get_vupdate_offset(mode_lib, pipe_idx);
|
||||
pipe_ctx->pipe_dlg_param.vupdate_width = dml_get_vupdate_width(mode_lib, pipe_idx);
|
||||
pipe_ctx->pipe_dlg_param.vready_offset = dml_get_vready_offset(mode_lib, pipe_idx);
|
||||
|
||||
pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst;
|
||||
|
||||
pipe_ctx->pipe_dlg_param.hactive = hactive;
|
||||
pipe_ctx->pipe_dlg_param.vactive = vactive;
|
||||
pipe_ctx->pipe_dlg_param.htotal = pipe_ctx->stream->timing.h_total;
|
||||
pipe_ctx->pipe_dlg_param.vtotal = pipe_ctx->stream->timing.v_total;
|
||||
pipe_ctx->pipe_dlg_param.hblank_end = hblank_end;
|
||||
pipe_ctx->pipe_dlg_param.vblank_end = vblank_end;
|
||||
pipe_ctx->pipe_dlg_param.hblank_start = hblank_start;
|
||||
pipe_ctx->pipe_dlg_param.vblank_start = vblank_start;
|
||||
pipe_ctx->pipe_dlg_param.vfront_porch = pipe_ctx->stream->timing.v_front_porch;
|
||||
pipe_ctx->pipe_dlg_param.pixel_rate_mhz = pipe_ctx->stream->timing.pix_clk_100hz / 10000.00;
|
||||
pipe_ctx->pipe_dlg_param.refresh_rate = ((timing->pix_clk_100hz * 100) / timing->h_total) / timing->v_total;
|
||||
pipe_ctx->pipe_dlg_param.vtotal_max = pipe_ctx->stream->adjust.v_total_max;
|
||||
pipe_ctx->pipe_dlg_param.vtotal_min = pipe_ctx->stream->adjust.v_total_min;
|
||||
pipe_ctx->pipe_dlg_param.recout_height = pipe_ctx->plane_res.scl_data.recout.height;
|
||||
pipe_ctx->pipe_dlg_param.recout_width = pipe_ctx->plane_res.scl_data.recout.width;
|
||||
pipe_ctx->pipe_dlg_param.full_recout_height = pipe_ctx->plane_res.scl_data.recout.height;
|
||||
pipe_ctx->pipe_dlg_param.full_recout_width = pipe_ctx->plane_res.scl_data.recout.width;
|
||||
}
|
||||
|
||||
void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt)
|
||||
{
|
||||
unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id;
|
||||
bool unbounded_req_enabled = false;
|
||||
struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch;
|
||||
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000;
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
|
||||
|
||||
if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx] == dml_fclock_change_unsupported)
|
||||
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false;
|
||||
else
|
||||
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
|
||||
|
||||
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
|
||||
|
||||
unbounded_req_enabled = in_ctx->v20.dml_core_ctx.ms.UnboundedRequestEnabledThisState;
|
||||
|
||||
if (unbounded_req_enabled && pipe_cnt > 1) {
|
||||
// Unbounded requesting should not ever be used when more than 1 pipe is enabled.
|
||||
//ASSERT(false);
|
||||
unbounded_req_enabled = false;
|
||||
}
|
||||
|
||||
context->bw_ctx.bw.dcn.compbuf_size_kb = in_ctx->v20.dml_core_ctx.ip.config_return_buffer_size_in_kbytes;
|
||||
|
||||
for (dc_pipe_ctx_index = 0; dc_pipe_ctx_index < pipe_cnt; dc_pipe_ctx_index++) {
|
||||
if (!context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream)
|
||||
continue;
|
||||
/* The DML2 and the DC logic of determining pipe indices are different from each other so
|
||||
* there is a need to know which DML pipe index maps to which DC pipe. The code below
|
||||
* finds a dml_pipe_index from the plane id if a plane is valid. If a plane is not valid then
|
||||
* it finds a dml_pipe_index from the stream id. */
|
||||
if (get_plane_id(context, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state, &plane_id)) {
|
||||
dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id);
|
||||
} else {
|
||||
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
|
||||
}
|
||||
|
||||
ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[dml_pipe_idx]);
|
||||
ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_pipe_idx] == context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
|
||||
|
||||
/* Use the dml_pipe_index here for the getters to fetch the correct values and dc_pipe_index in the pipe_ctx to populate them
|
||||
* at the right locations.
|
||||
*/
|
||||
populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx);
|
||||
|
||||
if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
|
||||
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0;
|
||||
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false;
|
||||
} else {
|
||||
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = dml_get_det_buffer_size_kbytes(&context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx);
|
||||
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = unbounded_req_enabled;
|
||||
}
|
||||
|
||||
context->bw_ctx.bw.dcn.compbuf_size_kb -= context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb;
|
||||
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_res.bw.dppclk_khz = dml_get_dppclk_calculated(&context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx) * 1000;
|
||||
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_res.bw.dppclk_khz)
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_res.bw.dppclk_khz;
|
||||
|
||||
dml_rq_dlg_get_rq_reg(&s->rq_regs, &in_ctx->v20.dml_core_ctx, dml_pipe_idx);
|
||||
dml_rq_dlg_get_dlg_reg(&s->disp_dlg_regs, &s->disp_ttu_regs, &in_ctx->v20.dml_core_ctx, dml_pipe_idx);
|
||||
dml2_update_pipe_ctx_dchub_regs(&s->rq_regs, &s->disp_dlg_regs, &s->disp_ttu_regs, &out_new_hw_state->pipe_ctx[dc_pipe_ctx_index]);
|
||||
|
||||
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes = dml_get_surface_size_for_mall(&context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx);
|
||||
|
||||
/* Reuse MALL Allocation Sizes logic from dcn32_fpu.c */
|
||||
/* Count from active, top pipes per plane only. Only add mall_ss_size_bytes for each unique plane. */
|
||||
if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream && context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state &&
|
||||
(context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe == NULL ||
|
||||
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) &&
|
||||
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) {
|
||||
/* SS: all active surfaces stored in MALL */
|
||||
if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) {
|
||||
context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes;
|
||||
} else {
|
||||
/* SUBVP: phantom surfaces only stored in MALL */
|
||||
context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
|
||||
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dppclk_mhz
|
||||
* 1000;
|
||||
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dispclk_mhz
|
||||
* 1000;
|
||||
}
|
||||
|
||||
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx)
|
||||
{
|
||||
watermark->urgent_ns = dml_get_wm_urgent(dml_core_ctx) * 1000;
|
||||
watermark->cstate_pstate.cstate_enter_plus_exit_ns = dml_get_wm_stutter_enter_exit(dml_core_ctx) * 1000;
|
||||
watermark->cstate_pstate.cstate_exit_ns = dml_get_wm_stutter_exit(dml_core_ctx) * 1000;
|
||||
watermark->cstate_pstate.pstate_change_ns = dml_get_wm_dram_clock_change(dml_core_ctx) * 1000;
|
||||
watermark->pte_meta_urgent_ns = dml_get_wm_memory_trip(dml_core_ctx) * 1000;
|
||||
watermark->frac_urg_bw_nom = dml_get_fraction_of_urgent_bandwidth(dml_core_ctx) * 1000;
|
||||
watermark->frac_urg_bw_flip = dml_get_fraction_of_urgent_bandwidth_imm_flip(dml_core_ctx) * 1000;
|
||||
watermark->urgent_latency_ns = dml_get_urgent_latency(dml_core_ctx) * 1000;
|
||||
watermark->cstate_pstate.fclk_pstate_change_ns = dml_get_wm_fclk_change(dml_core_ctx) * 1000;
|
||||
watermark->usr_retraining_ns = dml_get_wm_usr_retraining(dml_core_ctx) * 1000;
|
||||
}
|
||||
|
||||
void dml2_initialize_det_scratch(struct dml2_context *in_ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_PLANES; i++) {
|
||||
in_ctx->det_helper_scratch.dpps_per_surface[i] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int find_planes_per_stream_and_stream_count(struct dml2_context *in_ctx, struct dml_display_cfg_st *dml_dispcfg, int *num_of_planes_per_stream)
|
||||
{
|
||||
unsigned int plane_index, stream_index = 0, num_of_streams;
|
||||
|
||||
for (plane_index = 0; plane_index < dml_dispcfg->num_surfaces; plane_index++) {
|
||||
/* Number of planes per stream */
|
||||
num_of_planes_per_stream[stream_index] += 1;
|
||||
|
||||
if (plane_index + 1 < dml_dispcfg->num_surfaces && dml_dispcfg->plane.BlendingAndTiming[plane_index] != dml_dispcfg->plane.BlendingAndTiming[plane_index + 1])
|
||||
stream_index++;
|
||||
}
|
||||
|
||||
num_of_streams = stream_index + 1;
|
||||
|
||||
return num_of_streams;
|
||||
}
|
||||
|
||||
void dml2_apply_det_buffer_allocation_policy(struct dml2_context *in_ctx, struct dml_display_cfg_st *dml_dispcfg)
|
||||
{
|
||||
unsigned int num_of_streams = 0, plane_index = 0, max_det_size, stream_index = 0;
|
||||
int num_of_planes_per_stream[__DML_NUM_PLANES__] = { 0 };
|
||||
|
||||
max_det_size = in_ctx->config.det_segment_size * in_ctx->config.max_segments_per_hubp;
|
||||
|
||||
num_of_streams = find_planes_per_stream_and_stream_count(in_ctx, dml_dispcfg, num_of_planes_per_stream);
|
||||
|
||||
for (plane_index = 0; plane_index < dml_dispcfg->num_surfaces; plane_index++) {
|
||||
|
||||
dml_dispcfg->plane.DETSizeOverride[plane_index] = ((max_det_size / num_of_streams) / num_of_planes_per_stream[stream_index] / in_ctx->det_helper_scratch.dpps_per_surface[plane_index]);
|
||||
|
||||
/* If the override size is not divisible by det_segment_size then round off to nearest number divisible by det_segment_size as
|
||||
* this is a requirement.
|
||||
*/
|
||||
if (dml_dispcfg->plane.DETSizeOverride[plane_index] % in_ctx->config.det_segment_size != 0) {
|
||||
dml_dispcfg->plane.DETSizeOverride[plane_index] = dml_dispcfg->plane.DETSizeOverride[plane_index] & ~0x3F;
|
||||
}
|
||||
|
||||
if (plane_index + 1 < dml_dispcfg->num_surfaces && dml_dispcfg->plane.BlendingAndTiming[plane_index] != dml_dispcfg->plane.BlendingAndTiming[plane_index + 1])
|
||||
stream_index++;
|
||||
}
|
||||
}
|
||||
|
||||
bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc_state *display_state, struct dml2_helper_det_policy_scratch *det_scratch)
|
||||
{
|
||||
unsigned int i = 0, dml_pipe_idx = 0, plane_id = 0;
|
||||
unsigned int max_det_size, total_det_allocated = 0;
|
||||
bool need_recalculation = false;
|
||||
|
||||
max_det_size = in_ctx->config.det_segment_size * in_ctx->config.max_segments_per_hubp;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (!display_state->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
if (get_plane_id(display_state, display_state->res_ctx.pipe_ctx[i].plane_state, &plane_id))
|
||||
dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id);
|
||||
else
|
||||
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id);
|
||||
total_det_allocated += dml_get_det_buffer_size_kbytes(&in_ctx->v20.dml_core_ctx, dml_pipe_idx);
|
||||
if (total_det_allocated > max_det_size) {
|
||||
need_recalculation = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Store the DPPPerSurface for correctly determining the number of planes in the next call. */
|
||||
for (i = 0; i < MAX_PLANES; i++) {
|
||||
det_scratch->dpps_per_surface[i] = in_ctx->v20.scratch.cur_display_config.hw.DPPPerSurface[i];
|
||||
}
|
||||
|
||||
return need_recalculation;
|
||||
}
|
143
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
Normal file
143
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
Normal file
|
@ -0,0 +1,143 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _DML2_UTILS_H_
|
||||
#define _DML2_UTILS_H_
|
||||
|
||||
#include "os_types.h"
|
||||
#include "dml2_dc_types.h"
|
||||
|
||||
struct dc;
|
||||
struct dml_timing_cfg_st;
|
||||
struct dml2_dcn_clocks;
|
||||
struct dc_state;
|
||||
|
||||
void dml2_util_copy_dml_timing(struct dml_timing_cfg_st *dml_timing_array, unsigned int dst_index, unsigned int src_index);
|
||||
void dml2_util_copy_dml_plane(struct dml_plane_cfg_st *dml_plane_array, unsigned int dst_index, unsigned int src_index);
|
||||
void dml2_util_copy_dml_surface(struct dml_surface_cfg_st *dml_surface_array, unsigned int dst_index, unsigned int src_index);
|
||||
void dml2_util_copy_dml_output(struct dml_output_cfg_st *dml_output_array, unsigned int dst_index, unsigned int src_index);
|
||||
unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, enum dml_output_encoder_class encoder, bool dsc_enabled);
|
||||
void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_state *context);
|
||||
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx);
|
||||
int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
|
||||
bool is_dtbclk_required(const struct dc *dc, struct dc_state *context);
|
||||
|
||||
/*
|
||||
* dml2_dc_construct_pipes - This function will determine if we need additional pipes based
|
||||
* on the DML calculated outputs for MPC, ODM and allocate them as necessary. This function
|
||||
* could be called after in dml_validate_build_resource after dml_mode_pragramming like :
|
||||
* {
|
||||
* ...
|
||||
* map_hw_resources(&s->cur_display_config, &s->mode_support_info);
|
||||
* result = dml_mode_programming(&in_ctx->dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
|
||||
* dml2_dc_construct_pipes(in_display_state, s->mode_support_info, out_hw_context);
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
* @context: To obtain res_ctx and read other information like stream ID etc.
|
||||
* @dml_mode_support_st : To get the ODM, MPC outputs as determined by the DML.
|
||||
* @out_hw_context : Handle to the new hardware context.
|
||||
*
|
||||
*
|
||||
* Return: None.
|
||||
*/
|
||||
void dml2_dc_construct_pipes(struct dc_state *context, struct dml_mode_support_info_st *dml_mode_support_st,
|
||||
struct resource_context *out_hw_context);
|
||||
|
||||
/*
|
||||
* dml2_predict_pipe_split - This function is the dml2 version of predict split pipe. It predicts a
|
||||
* if pipe split is required or not and returns the output as a bool.
|
||||
* @context : dc_state.
|
||||
* @pipe : old_index is the index of the pipe as derived from pipe_idx.
|
||||
* @index : index of the pipe
|
||||
*
|
||||
*
|
||||
* Return: Returns the result in boolean.
|
||||
*/
|
||||
bool dml2_predict_pipe_split(struct dc_state *context, display_pipe_params_st pipe, int index);
|
||||
|
||||
/*
|
||||
* dml2_build_mapped_resource - This function is the dml2 version of build_mapped_resource.
|
||||
* In case of ODM, we need to build pipe hardware params again as done in dcn20_build_mapped_resource.
|
||||
* @dc : struct dc
|
||||
* @context : struct dc_state.
|
||||
* @stream : stream whoose corresponding pipe params need to be modified.
|
||||
*
|
||||
*
|
||||
* Return: Returns DC_OK if successful.
|
||||
*/
|
||||
enum dc_status dml2_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
|
||||
|
||||
/*
|
||||
* dml2_extract_rq_regs - This function will extract information needed for struct _vcs_dpi_display_rq_regs_st
|
||||
* and populate it.
|
||||
* @context: To obtain and populate the res_ctx->pipe_ctx->rq_regs with DML outputs.
|
||||
* @support : This structure has the DML intermediate outputs required to populate rq_regs.
|
||||
*
|
||||
*
|
||||
* Return: None.
|
||||
*/
|
||||
|
||||
/*
|
||||
* dml2_calculate_rq_and_dlg_params - This function will call into DML2 functions needed
|
||||
* for populating rq, ttu and dlg param structures and populate it.
|
||||
* @dc : struct dc
|
||||
* @context : dc_state provides a handle to selectively populate pipe_ctx
|
||||
* @out_new_hw_state: To obtain and populate the rq, dlg and ttu regs in
|
||||
* out_new_hw_state->pipe_ctx with DML outputs.
|
||||
* @in_ctx : This structure has the pointer to display_mode_lib_st.
|
||||
* @pipe_cnt : DML functions to obtain RQ, TTu and DLG params need a pipe_index.
|
||||
* This helps provide pipe_index in the pipe_cnt loop.
|
||||
*
|
||||
*
|
||||
* Return: None.
|
||||
*/
|
||||
void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt);
|
||||
|
||||
/*
|
||||
* dml2_apply_det_buffer_allocation_policy - This function will determine the DET Buffer size
|
||||
* and return the number of streams.
|
||||
* @dml2 : Handle for dml2 context
|
||||
* @dml_dispcfg : dml_dispcfg is the DML2 struct representing the current display config
|
||||
* Return : None.
|
||||
*/
|
||||
void dml2_apply_det_buffer_allocation_policy(struct dml2_context *in_ctx, struct dml_display_cfg_st *dml_dispcfg);
|
||||
|
||||
/*
|
||||
* dml2_verify_det_buffer_configuration - This function will verify if the allocated DET buffer exceeds
|
||||
* the total available DET size available and outputs a boolean to indicate if recalulation is needed.
|
||||
* @dml2 : Handle for dml2 context
|
||||
* @dml_dispcfg : dml_dispcfg is the DML2 struct representing the current display config
|
||||
* @struct dml2_helper_det_policy_scratch : Pointer to DET helper scratch
|
||||
* Return : returns true if recalculation is required, false otherwise.
|
||||
*/
|
||||
bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc_state *display_state, struct dml2_helper_det_policy_scratch *det_scratch);
|
||||
|
||||
/*
|
||||
* dml2_initialize_det_scratch - This function will initialize the DET scratch space as per requirements.
|
||||
* @dml2 : Handle for dml2 context
|
||||
* Return : None
|
||||
*/
|
||||
void dml2_initialize_det_scratch(struct dml2_context *in_ctx);
|
||||
#endif
|
730
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
Normal file
730
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
Normal file
|
@ -0,0 +1,730 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "display_mode_core.h"
|
||||
#include "dml2_internal_types.h"
|
||||
#include "dml2_utils.h"
|
||||
#include "dml2_policy.h"
|
||||
#include "dml2_translation_helper.h"
|
||||
#include "dml2_mall_phantom.h"
|
||||
#include "dml2_dc_resource_mgmt.h"
|
||||
|
||||
|
||||
static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
|
||||
{
|
||||
if (dml2->config.use_native_soc_bb_construction)
|
||||
dml2_init_ip_params(dml2, in_dc, out);
|
||||
else
|
||||
dml2_translate_ip_params(in_dc, out);
|
||||
}
|
||||
|
||||
static void initialize_dml2_soc_bbox(struct dml2_context *dml2, const struct dc *in_dc, struct soc_bounding_box_st *out)
|
||||
{
|
||||
if (dml2->config.use_native_soc_bb_construction)
|
||||
dml2_init_socbb_params(dml2, in_dc, out);
|
||||
else
|
||||
dml2_translate_socbb_params(in_dc, out);
|
||||
}
|
||||
|
||||
static void initialize_dml2_soc_states(struct dml2_context *dml2,
|
||||
const struct dc *in_dc, const struct soc_bounding_box_st *in_bbox, struct soc_states_st *out)
|
||||
{
|
||||
if (dml2->config.use_native_soc_bb_construction)
|
||||
dml2_init_soc_states(dml2, in_dc, in_bbox, out);
|
||||
else
|
||||
dml2_translate_soc_states(in_dc, out, in_dc->dml.soc.num_states);
|
||||
}
|
||||
|
||||
static void map_hw_resources(struct dml2_context *dml2,
|
||||
struct dml_display_cfg_st *in_out_display_cfg, struct dml_mode_support_info_st *mode_support_info)
|
||||
{
|
||||
unsigned int num_pipes = 0;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < __DML_NUM_PLANES__; i++) {
|
||||
in_out_display_cfg->hw.ODMMode[i] = mode_support_info->ODMMode[i];
|
||||
in_out_display_cfg->hw.DPPPerSurface[i] = mode_support_info->DPPPerSurface[i];
|
||||
in_out_display_cfg->hw.DSCEnabled[i] = mode_support_info->DSCEnabled[i];
|
||||
in_out_display_cfg->hw.NumberOfDSCSlices[i] = mode_support_info->NumberOfDSCSlices[i];
|
||||
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
|
||||
|
||||
for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
|
||||
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
|
||||
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
|
||||
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
|
||||
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id_valid[num_pipes] = true;
|
||||
num_pipes++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
|
||||
const struct dml_display_cfg_st *display_cfg,
|
||||
struct dml_mode_support_info_st *evaluation_info)
|
||||
{
|
||||
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
|
||||
|
||||
s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
|
||||
s->mode_support_params.in_display_cfg = display_cfg;
|
||||
s->mode_support_params.out_evaluation_info = evaluation_info;
|
||||
|
||||
memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
|
||||
s->mode_support_params.out_lowest_state_idx = 0;
|
||||
|
||||
return dml_mode_support_ex(&s->mode_support_params);
|
||||
}
|
||||
|
||||
static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
|
||||
{
|
||||
int unused_dpps = p->ip_params->max_num_dpp;
|
||||
int i, j;
|
||||
int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
|
||||
int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
|
||||
float frame_time_sec, max_frame_time_sec;
|
||||
int largest_blend_and_timing = 0;
|
||||
bool optimization_done = false;
|
||||
|
||||
for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
|
||||
if (p->cur_display_config->plane.BlendingAndTiming[i] > largest_blend_and_timing)
|
||||
largest_blend_and_timing = p->cur_display_config->plane.BlendingAndTiming[i];
|
||||
}
|
||||
|
||||
if (p->new_policy != p->cur_policy)
|
||||
*p->new_policy = *p->cur_policy;
|
||||
|
||||
if (p->new_display_config != p->cur_display_config)
|
||||
*p->new_display_config = *p->cur_display_config;
|
||||
|
||||
// Optimize P-State Support
|
||||
if (dml2->config.use_native_pstate_optimization) {
|
||||
if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
|
||||
// Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
|
||||
subvp_timing_to_add = -1;
|
||||
subvp_surface_to_add = -1;
|
||||
max_frame_time_sec = 0;
|
||||
surface_count = 0;
|
||||
for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
|
||||
refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
|
||||
(p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
|
||||
if (refresh_rate_hz < 120) {
|
||||
// Check its upstream surfaces to see if this one could be converted to subvp.
|
||||
dpps_needed = 0;
|
||||
for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
|
||||
if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
|
||||
p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
|
||||
dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
|
||||
subvp_surface_to_add = j;
|
||||
surface_count++;
|
||||
}
|
||||
}
|
||||
|
||||
if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
|
||||
frame_time_sec = (float)1 / refresh_rate_hz;
|
||||
if (frame_time_sec > max_frame_time_sec) {
|
||||
max_frame_time_sec = frame_time_sec;
|
||||
subvp_timing_to_add = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (subvp_timing_to_add >= 0) {
|
||||
new_timing_index = p->new_display_config->num_timings++;
|
||||
new_surface_index = p->new_display_config->num_surfaces++;
|
||||
// Add a phantom pipe reflecting the main pipe's timing
|
||||
dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add);
|
||||
|
||||
pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
|
||||
p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
|
||||
(p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
|
||||
(double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
|
||||
|
||||
subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
|
||||
|
||||
p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
|
||||
p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
|
||||
p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
|
||||
|
||||
p->new_display_config->output.OutputDisabled[new_timing_index] = true;
|
||||
|
||||
p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
|
||||
|
||||
dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add);
|
||||
dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add);
|
||||
|
||||
p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
|
||||
p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
|
||||
p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
|
||||
|
||||
p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
|
||||
p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
|
||||
|
||||
p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
|
||||
|
||||
p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
|
||||
|
||||
p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
|
||||
|
||||
optimization_done = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize Clocks
|
||||
if (!optimization_done) {
|
||||
if (largest_blend_and_timing == 0 && p->cur_policy->ODMUse[0] == dml_odm_use_policy_combine_as_needed && dml2->config.minimize_dispclk_using_odm) {
|
||||
odms_needed = dml2_util_get_maximum_odm_combine_for_output(dml2->config.optimize_odm_4to1,
|
||||
p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1;
|
||||
|
||||
if (odms_needed <= unused_dpps) {
|
||||
unused_dpps -= odms_needed;
|
||||
|
||||
if (odms_needed == 1) {
|
||||
p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1;
|
||||
optimization_done = true;
|
||||
} else if (odms_needed == 3) {
|
||||
p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_4to1;
|
||||
optimization_done = true;
|
||||
} else
|
||||
optimization_done = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return optimization_done;
|
||||
}
|
||||
|
||||
static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
|
||||
{
|
||||
struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
|
||||
struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
|
||||
|
||||
unsigned int dml_result = 0;
|
||||
int result = -1, i, j;
|
||||
|
||||
build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
|
||||
|
||||
/* Zero out before each call before proceeding */
|
||||
memset(s, 0, sizeof(struct dml2_calculate_lowest_supported_state_for_temp_read_scratch));
|
||||
memset(&s_global->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
|
||||
memset(&s_global->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
|
||||
|
||||
for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
|
||||
/* Calling resource_build_scaling_params will populate the pipe params
|
||||
* with the necessary information needed for correct DML calculations
|
||||
* This is also done in DML1 driver code path and hence display_state
|
||||
* cannot be const.
|
||||
*/
|
||||
struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->plane_state) {
|
||||
if (!dml2->config.callbacks.build_scaling_params(pipe)) {
|
||||
ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
map_dc_state_into_dml_display_cfg(dml2, display_state, &s->cur_display_config);
|
||||
|
||||
for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
|
||||
s->uclk_change_latencies[i] = dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us;
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (j = 0; j < dml2->v20.dml_core_ctx.states.num_states; j++) {
|
||||
dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
|
||||
}
|
||||
|
||||
dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info);
|
||||
|
||||
if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
|
||||
map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
|
||||
dml_result = dml_mode_programming(&dml2->v20.dml_core_ctx, s_global->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
|
||||
|
||||
ASSERT(dml_result);
|
||||
|
||||
dml2_extract_watermark_set(&dml2->v20.g6_temp_read_watermark_set, &dml2->v20.dml_core_ctx);
|
||||
dml2->v20.g6_temp_read_watermark_set.cstate_pstate.fclk_pstate_change_ns = dml2->v20.g6_temp_read_watermark_set.cstate_pstate.pstate_change_ns;
|
||||
|
||||
result = s_global->mode_support_params.out_lowest_state_idx;
|
||||
|
||||
while (dml2->v20.dml_core_ctx.states.state_array[result].dram_speed_mts < s_global->dummy_pstate_table[i].dram_speed_mts)
|
||||
result++;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
|
||||
dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us = s->uclk_change_latencies[i];
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void copy_dummy_pstate_table(struct dummy_pstate_entry *dest, struct dummy_pstate_entry *src, unsigned int num_entries)
|
||||
{
|
||||
for (int i = 0; i < num_entries; i++) {
|
||||
dest[i] = src[i];
|
||||
}
|
||||
}
|
||||
|
||||
static bool are_timings_requiring_odm_doing_blending(const struct dml_display_cfg_st *display_cfg,
|
||||
const struct dml_mode_support_info_st *evaluation_info)
|
||||
{
|
||||
unsigned int planes_per_timing[__DML_NUM_PLANES__] = {0};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < display_cfg->num_surfaces; i++)
|
||||
planes_per_timing[display_cfg->plane.BlendingAndTiming[i]]++;
|
||||
|
||||
for (i = 0; i < __DML_NUM_PLANES__; i++) {
|
||||
if (planes_per_timing[i] > 1 && evaluation_info->ODMMode[i] != dml_odm_mode_bypass)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const struct dml_display_cfg_st *display_cfg,
|
||||
const struct dml_mode_support_info_st *evaluation_info)
|
||||
{
|
||||
bool pass = true;
|
||||
|
||||
if (!ctx->config.enable_windowed_mpo_odm) {
|
||||
if (are_timings_requiring_odm_doing_blending(display_cfg, evaluation_info))
|
||||
pass = false;
|
||||
}
|
||||
|
||||
return pass;
|
||||
}
|
||||
|
||||
static bool dml_mode_support_wrapper(struct dml2_context *dml2,
|
||||
struct dc_state *display_state)
|
||||
{
|
||||
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
|
||||
unsigned int result = 0, i;
|
||||
unsigned int optimized_result = true;
|
||||
|
||||
build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
|
||||
|
||||
/* Zero out before each call before proceeding */
|
||||
memset(&s->cur_display_config, 0, sizeof(struct dml_display_cfg_st));
|
||||
memset(&s->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
|
||||
memset(&s->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
|
||||
memset(&s->optimize_configuration_params, 0, sizeof(struct dml2_wrapper_optimize_configuration_params));
|
||||
|
||||
for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
|
||||
/* Calling resource_build_scaling_params will populate the pipe params
|
||||
* with the necessary information needed for correct DML calculations
|
||||
* This is also done in DML1 driver code path and hence display_state
|
||||
* cannot be const.
|
||||
*/
|
||||
struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->plane_state) {
|
||||
if (!dml2->config.callbacks.build_scaling_params(pipe)) {
|
||||
ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
map_dc_state_into_dml_display_cfg(dml2, display_state, &s->cur_display_config);
|
||||
if (!dml2->config.skip_hw_state_mapping)
|
||||
dml2_apply_det_buffer_allocation_policy(dml2, &s->cur_display_config);
|
||||
|
||||
result = pack_and_call_dml_mode_support_ex(dml2,
|
||||
&s->cur_display_config,
|
||||
&s->mode_support_info);
|
||||
|
||||
if (result)
|
||||
result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
|
||||
|
||||
// Try to optimize
|
||||
if (result) {
|
||||
s->cur_policy = dml2->v20.dml_core_ctx.policy;
|
||||
s->optimize_configuration_params.dml_core_ctx = &dml2->v20.dml_core_ctx;
|
||||
s->optimize_configuration_params.config = &dml2->config;
|
||||
s->optimize_configuration_params.ip_params = &dml2->v20.dml_core_ctx.ip;
|
||||
s->optimize_configuration_params.cur_display_config = &s->cur_display_config;
|
||||
s->optimize_configuration_params.cur_mode_support_info = &s->mode_support_info;
|
||||
s->optimize_configuration_params.cur_policy = &s->cur_policy;
|
||||
s->optimize_configuration_params.new_display_config = &s->new_display_config;
|
||||
s->optimize_configuration_params.new_policy = &s->new_policy;
|
||||
|
||||
while (optimized_result && optimize_configuration(dml2, &s->optimize_configuration_params)) {
|
||||
dml2->v20.dml_core_ctx.policy = s->new_policy;
|
||||
optimized_result = pack_and_call_dml_mode_support_ex(dml2,
|
||||
&s->new_display_config,
|
||||
&s->mode_support_info);
|
||||
|
||||
if (optimized_result)
|
||||
optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
|
||||
|
||||
// If the new optimized state is supposed, then set current = new
|
||||
if (optimized_result) {
|
||||
s->cur_display_config = s->new_display_config;
|
||||
s->cur_policy = s->new_policy;
|
||||
} else {
|
||||
// Else, restore policy to current
|
||||
dml2->v20.dml_core_ctx.policy = s->cur_policy;
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize ended with a failed config, so we need to restore DML state to last passing
|
||||
if (!optimized_result) {
|
||||
result = pack_and_call_dml_mode_support_ex(dml2,
|
||||
&s->cur_display_config,
|
||||
&s->mode_support_info);
|
||||
}
|
||||
}
|
||||
|
||||
if (result)
|
||||
map_hw_resources(dml2, &s->cur_display_config, &s->mode_support_info);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int find_drr_eligible_stream(struct dc_state *display_state)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < display_state->stream_count; i++) {
|
||||
if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE
|
||||
&& display_state->streams[i]->ignore_msa_timing_param) {
|
||||
// Use ignore_msa_timing_param flag to identify as DRR
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
|
||||
{
|
||||
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
|
||||
bool pstate_optimization_done = false;
|
||||
bool pstate_optimization_success = false;
|
||||
bool result = false;
|
||||
int drr_display_index = 0, non_svp_streams = 0;
|
||||
bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
|
||||
bool advanced_pstate_switching = false;
|
||||
|
||||
display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
|
||||
display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
|
||||
|
||||
result = dml_mode_support_wrapper(dml2, display_state);
|
||||
|
||||
if (!result) {
|
||||
pstate_optimization_done = true;
|
||||
} else if (!advanced_pstate_switching ||
|
||||
(s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp)) {
|
||||
pstate_optimization_success = true;
|
||||
pstate_optimization_done = true;
|
||||
}
|
||||
|
||||
if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
|
||||
display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
|
||||
|
||||
result = dml_mode_support_wrapper(dml2, display_state);
|
||||
} else {
|
||||
non_svp_streams = display_state->stream_count;
|
||||
|
||||
while (!pstate_optimization_done) {
|
||||
result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
|
||||
|
||||
// Always try adding SVP first
|
||||
if (result)
|
||||
result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info);
|
||||
else
|
||||
pstate_optimization_done = true;
|
||||
|
||||
|
||||
if (result) {
|
||||
result = dml_mode_support_wrapper(dml2, display_state);
|
||||
} else {
|
||||
pstate_optimization_done = true;
|
||||
}
|
||||
|
||||
if (result) {
|
||||
non_svp_streams--;
|
||||
|
||||
if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
|
||||
if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) {
|
||||
pstate_optimization_success = true;
|
||||
pstate_optimization_done = true;
|
||||
} else {
|
||||
pstate_optimization_success = false;
|
||||
pstate_optimization_done = false;
|
||||
}
|
||||
} else {
|
||||
drr_display_index = find_drr_eligible_stream(display_state);
|
||||
|
||||
// If there is only 1 remaining non SubVP pipe that is DRR, check static
|
||||
// schedulability for SubVP + DRR.
|
||||
if (non_svp_streams == 1 && drr_display_index >= 0) {
|
||||
if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) {
|
||||
display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
|
||||
display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
|
||||
result = dml_mode_support_wrapper(dml2, display_state);
|
||||
}
|
||||
|
||||
if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
|
||||
pstate_optimization_success = true;
|
||||
pstate_optimization_done = true;
|
||||
} else {
|
||||
pstate_optimization_success = false;
|
||||
pstate_optimization_done = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (pstate_optimization_success) {
|
||||
pstate_optimization_done = true;
|
||||
} else {
|
||||
pstate_optimization_done = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!pstate_optimization_success) {
|
||||
dml2_svp_remove_all_phantom_pipes(dml2, display_state);
|
||||
display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
|
||||
display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
|
||||
result = dml_mode_support_wrapper(dml2, display_state);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool call_dml_mode_support_and_programming(struct dc_state *context)
|
||||
{
|
||||
unsigned int result = 0;
|
||||
unsigned int min_state;
|
||||
int min_state_for_g6_temp_read = 0;
|
||||
struct dml2_context *dml2 = context->bw_ctx.dml2;
|
||||
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
|
||||
|
||||
min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
|
||||
|
||||
ASSERT(min_state_for_g6_temp_read >= 0);
|
||||
|
||||
if (!dml2->config.use_native_pstate_optimization) {
|
||||
result = optimize_pstate_with_svp_and_drr(dml2, context);
|
||||
} else {
|
||||
result = dml_mode_support_wrapper(dml2, context);
|
||||
}
|
||||
|
||||
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
|
||||
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
|
||||
*/
|
||||
if (min_state_for_g6_temp_read >= 0)
|
||||
min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
|
||||
else
|
||||
min_state = s->mode_support_params.out_lowest_state_idx;
|
||||
|
||||
if (result)
|
||||
result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
|
||||
{
|
||||
struct dml2_context *dml2 = context->bw_ctx.dml2;
|
||||
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
|
||||
struct dml2_dcn_clocks out_clks;
|
||||
unsigned int result = 0;
|
||||
bool need_recalculation = false;
|
||||
|
||||
if (!context || context->stream_count == 0)
|
||||
return true;
|
||||
|
||||
/* Zero out before each call before proceeding */
|
||||
memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
|
||||
memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
|
||||
memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
|
||||
memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
|
||||
|
||||
/* Initialize DET scratch */
|
||||
dml2_initialize_det_scratch(dml2);
|
||||
|
||||
copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
|
||||
|
||||
result = call_dml_mode_support_and_programming(context);
|
||||
/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
|
||||
* is required or not, the resource context needs to correctly reflect the number of active pipes. We would
|
||||
* only know the correct number if active pipes after dml2_map_dc_pipes is called.
|
||||
*/
|
||||
if (result && !dml2->config.skip_hw_state_mapping)
|
||||
dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
|
||||
|
||||
/* Verify and update DET Buffer configuration if needed. dml2_verify_det_buffer_configuration will check if DET Buffer
|
||||
* size needs to be updated. If yes it will update the DETOverride variable and set need_recalculation flag to true.
|
||||
* Based on that flag, run mode support again. Verification needs to be run after dml_mode_programming because the getters
|
||||
* return correct det buffer values only after dml_mode_programming is called.
|
||||
*/
|
||||
if (result && !dml2->config.skip_hw_state_mapping) {
|
||||
need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
|
||||
if (need_recalculation) {
|
||||
/* Engage the DML again if recalculation is required. */
|
||||
call_dml_mode_support_and_programming(context);
|
||||
if (!dml2->config.skip_hw_state_mapping) {
|
||||
dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
|
||||
}
|
||||
need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
|
||||
ASSERT(need_recalculation == false);
|
||||
}
|
||||
}
|
||||
|
||||
if (result) {
|
||||
unsigned int lowest_state_idx = s->mode_support_params.out_lowest_state_idx;
|
||||
out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.mp.Dispclk_calculated * 1000;
|
||||
out_clks.p_state_supported = s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported;
|
||||
if (in_dc->config.use_default_clock_table &&
|
||||
(lowest_state_idx < dml2->v20.dml_core_ctx.states.num_states - 1)) {
|
||||
lowest_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
|
||||
out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dispclk_mhz * 1000;
|
||||
}
|
||||
|
||||
out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
|
||||
out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
|
||||
out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
|
||||
out_clks.phyclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].phyclk_mhz * 1000;
|
||||
out_clks.socclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].socclk_mhz * 1000;
|
||||
out_clks.ref_dtbclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dtbclk_mhz * 1000;
|
||||
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(in_dc, context);
|
||||
|
||||
if (!dml2->config.skip_hw_state_mapping) {
|
||||
/* Call dml2_calculate_rq_and_dlg_params */
|
||||
dml2_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml2, in_dc->res_pool->pipe_count);
|
||||
}
|
||||
|
||||
dml2_copy_clocks_to_dc_state(&out_clks, context);
|
||||
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
|
||||
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
|
||||
memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
|
||||
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool dml2_validate_only(const struct dc_state *context)
|
||||
{
|
||||
struct dml2_context *dml2 = context->bw_ctx.dml2;
|
||||
unsigned int result = 0;
|
||||
|
||||
if (!context || context->stream_count == 0)
|
||||
return true;
|
||||
|
||||
/* Zero out before each call before proceeding */
|
||||
memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
|
||||
memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
|
||||
memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
|
||||
memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
|
||||
|
||||
build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
|
||||
|
||||
map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
|
||||
|
||||
result = pack_and_call_dml_mode_support_ex(dml2,
|
||||
&dml2->v20.scratch.cur_display_config,
|
||||
&dml2->v20.scratch.mode_support_info);
|
||||
|
||||
return (result == 1) ? true : false;
|
||||
}
|
||||
|
||||
static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
|
||||
{
|
||||
if (dc->debug.override_odm_optimization) {
|
||||
dml2->config.minimize_dispclk_using_odm = dc->debug.minimize_dispclk_using_odm;
|
||||
}
|
||||
}
|
||||
|
||||
bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate)
|
||||
{
|
||||
bool out = false;
|
||||
|
||||
if (!(context->bw_ctx.dml2))
|
||||
return false;
|
||||
dml2_apply_debug_options(in_dc, context->bw_ctx.dml2);
|
||||
|
||||
|
||||
/* Use dml_validate_only for fast_validate path */
|
||||
if (fast_validate)
|
||||
out = dml2_validate_only(context);
|
||||
else
|
||||
out = dml2_validate_and_build_resource(in_dc, context);
|
||||
return out;
|
||||
}
|
||||
|
||||
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
|
||||
{
|
||||
// Allocate Mode Lib Ctx
|
||||
*dml2 = (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
|
||||
|
||||
if (!(*dml2))
|
||||
return false;
|
||||
|
||||
// Store config options
|
||||
(*dml2)->config = *config;
|
||||
|
||||
switch (in_dc->ctx->dce_version) {
|
||||
case DCN_VERSION_3_2:
|
||||
(*dml2)->v20.dml_core_ctx.project = dml_project_dcn32;
|
||||
break;
|
||||
case DCN_VERSION_3_21:
|
||||
(*dml2)->v20.dml_core_ctx.project = dml_project_dcn321;
|
||||
break;
|
||||
default:
|
||||
(*dml2)->v20.dml_core_ctx.project = dml_project_default;
|
||||
break;
|
||||
}
|
||||
|
||||
initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
|
||||
|
||||
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
|
||||
|
||||
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void dml2_destroy(struct dml2_context *dml2)
|
||||
{
|
||||
if (!dml2)
|
||||
return;
|
||||
|
||||
kfree(dml2);
|
||||
}
|
||||
|
||||
void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
|
||||
unsigned int *fclk_change_support, unsigned int *dram_clk_change_support)
|
||||
{
|
||||
*fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0];
|
||||
*dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
|
||||
}
|
210
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
Normal file
210
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
Normal file
|
@ -0,0 +1,210 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _DML2_WRAPPER_H_
|
||||
#define _DML2_WRAPPER_H_
|
||||
|
||||
#include "os_types.h"
|
||||
|
||||
#define DML2_MAX_NUM_DPM_LVL 30
|
||||
|
||||
struct dml2_context;
|
||||
struct display_mode_lib_st;
|
||||
struct dc;
|
||||
struct pipe_ctx;
|
||||
struct dc_plane_state;
|
||||
struct dc_sink;
|
||||
struct dc_stream_state;
|
||||
struct resource_context;
|
||||
struct display_stream_compressor;
|
||||
|
||||
// Configuration of the MALL on the SoC
|
||||
struct dml2_soc_mall_info {
|
||||
// Cache line size of 0 means MALL is not enabled/present
|
||||
unsigned int cache_line_size_bytes;
|
||||
unsigned int cache_num_ways;
|
||||
unsigned int max_cab_allocation_bytes;
|
||||
|
||||
unsigned int mblk_width_pixels;
|
||||
unsigned int mblk_size_bytes;
|
||||
unsigned int mblk_height_4bpe_pixels;
|
||||
unsigned int mblk_height_8bpe_pixels;
|
||||
};
|
||||
|
||||
// Output of DML2 for clock requirements
|
||||
struct dml2_dcn_clocks {
|
||||
unsigned int dispclk_khz;
|
||||
unsigned int dcfclk_khz;
|
||||
unsigned int fclk_khz;
|
||||
unsigned int uclk_mts;
|
||||
unsigned int phyclk_khz;
|
||||
unsigned int socclk_khz;
|
||||
unsigned int ref_dtbclk_khz;
|
||||
bool p_state_supported;
|
||||
unsigned int cab_num_ways_required;
|
||||
};
|
||||
|
||||
struct dml2_dc_callbacks {
|
||||
struct dc *dc;
|
||||
bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
|
||||
bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context);
|
||||
bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm);
|
||||
};
|
||||
|
||||
struct dml2_dc_svp_callbacks {
|
||||
struct dc *dc;
|
||||
bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
|
||||
struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data);
|
||||
struct dc_plane_state* (*create_plane)(struct dc *dc);
|
||||
enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
|
||||
bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
|
||||
bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
|
||||
enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream);
|
||||
void (*plane_state_release)(struct dc_plane_state *plane_state);
|
||||
void (*stream_release)(struct dc_stream_state *stream);
|
||||
void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc);
|
||||
};
|
||||
|
||||
struct dml2_clks_table_entry {
|
||||
unsigned int dcfclk_mhz;
|
||||
unsigned int fclk_mhz;
|
||||
unsigned int memclk_mhz;
|
||||
unsigned int socclk_mhz;
|
||||
unsigned int dtbclk_mhz;
|
||||
unsigned int dispclk_mhz;
|
||||
unsigned int dppclk_mhz;
|
||||
};
|
||||
|
||||
struct dml2_clks_num_entries {
|
||||
unsigned int num_dcfclk_levels;
|
||||
unsigned int num_fclk_levels;
|
||||
unsigned int num_memclk_levels;
|
||||
unsigned int num_socclk_levels;
|
||||
unsigned int num_dtbclk_levels;
|
||||
unsigned int num_dispclk_levels;
|
||||
unsigned int num_dppclk_levels;
|
||||
};
|
||||
|
||||
struct dml2_clks_limit_table {
|
||||
struct dml2_clks_table_entry clk_entries[DML2_MAX_NUM_DPM_LVL];
|
||||
struct dml2_clks_num_entries num_entries_per_clk;
|
||||
unsigned int num_states;
|
||||
};
|
||||
|
||||
// Various overrides, per ASIC or per SKU specific, or for debugging purpose when/if available
|
||||
struct dml2_soc_bbox_overrides {
|
||||
double xtalclk_mhz;
|
||||
double dchub_refclk_mhz;
|
||||
double dprefclk_mhz;
|
||||
double disp_pll_vco_speed_mhz;
|
||||
double urgent_latency_us;
|
||||
double sr_exit_latency_us;
|
||||
double sr_enter_plus_exit_latency_us;
|
||||
double dram_clock_change_latency_us;
|
||||
double fclk_change_latency_us;
|
||||
unsigned int dram_num_chan;
|
||||
unsigned int dram_chanel_width_bytes;
|
||||
struct dml2_clks_limit_table clks_table;
|
||||
};
|
||||
|
||||
struct dml2_configuration_options {
|
||||
int dcn_pipe_count;
|
||||
bool use_native_pstate_optimization;
|
||||
bool enable_windowed_mpo_odm;
|
||||
bool use_native_soc_bb_construction;
|
||||
bool skip_hw_state_mapping;
|
||||
bool optimize_odm_4to1;
|
||||
bool minimize_dispclk_using_odm;
|
||||
struct dml2_dc_callbacks callbacks;
|
||||
struct {
|
||||
bool force_disable_subvp;
|
||||
bool force_enable_subvp;
|
||||
unsigned int subvp_fw_processing_delay_us;
|
||||
unsigned int subvp_pstate_allow_width_us;
|
||||
unsigned int subvp_prefetch_end_to_mall_start_us;
|
||||
unsigned int subvp_swath_height_margin_lines;
|
||||
struct dml2_dc_svp_callbacks callbacks;
|
||||
} svp_pstate;
|
||||
struct dml2_soc_mall_info mall_cfg;
|
||||
struct dml2_soc_bbox_overrides bbox_overrides;
|
||||
unsigned int max_segments_per_hubp;
|
||||
unsigned int det_segment_size;
|
||||
};
|
||||
|
||||
/*
|
||||
* dml2_create - Creates dml2_context.
|
||||
* @in_dc: dc.
|
||||
* @config: dml2 configuration options.
|
||||
* @dml2: Created dml2 context.
|
||||
*
|
||||
* Create and destroy of DML2 is done as part of dc_state creation
|
||||
* and dc_state_free. DML2 IP, SOC and STATES are initialized at
|
||||
* creation time.
|
||||
*
|
||||
* Return: True if dml2 is successfully created, false otherwise.
|
||||
*/
|
||||
bool dml2_create(const struct dc *in_dc,
|
||||
const struct dml2_configuration_options *config,
|
||||
struct dml2_context **dml2);
|
||||
|
||||
void dml2_destroy(struct dml2_context *dml2);
|
||||
|
||||
/*
|
||||
* dml2_validate - Determines if a display configuration is supported or not.
|
||||
* @in_dc: dc.
|
||||
* @context: dc_state to be validated.
|
||||
* @fast_validate: Fast validate will not populate context.res_ctx.
|
||||
*
|
||||
* DML1.0 compatible interface for validation.
|
||||
*
|
||||
* Based on fast_validate option internally would call:
|
||||
*
|
||||
* -dml2_validate_and_build_resource - for non fast_validate option
|
||||
* Calculates if dc_state can be supported on the SOC, and attempts to
|
||||
* optimize the power management feature supports versus minimum clocks.
|
||||
* If supported, also builds out_new_hw_state to represent the hw programming
|
||||
* for the new dc state.
|
||||
*
|
||||
* -dml2_validate_only - for fast_validate option
|
||||
* Calculates if dc_state can be supported on the SOC (i.e. at maximum
|
||||
* clocks) with all mandatory power features enabled.
|
||||
|
||||
* Context: Two threads may not invoke this function concurrently unless they reference
|
||||
* separate dc_states for validation.
|
||||
* Return: True if mode is supported, false otherwise.
|
||||
*/
|
||||
bool dml2_validate(const struct dc *in_dc,
|
||||
struct dc_state *context,
|
||||
bool fast_validate);
|
||||
|
||||
/*
|
||||
* dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info.
|
||||
* @dml2: input dml2 context pointer.
|
||||
* @fclk_change_support: output pointer holding the fclk change support info (vactive, vblank, unsupported).
|
||||
* @dram_clk_change_support: output pointer holding the uclk change support info (vactive, vblank, unsupported).
|
||||
*/
|
||||
void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
|
||||
unsigned int *fclk_change_support, unsigned int *dram_clk_change_support);
|
||||
|
||||
#endif //_DML2_WRAPPER_H_
|
30
drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h
Normal file
30
drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DML_ASSERT_H__
|
||||
#define __DML_ASSERT_H__
|
||||
|
||||
#include "os_types.h"
|
||||
|
||||
#endif //__DML_ASSERT_H__
|
31
drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h
Normal file
31
drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h
Normal file
|
@ -0,0 +1,31 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
/* This header intentinally does not include an #ifdef guard as it only contains includes for other headers*/
|
||||
|
||||
/*
|
||||
* Standard Types
|
||||
*/
|
||||
#include "os_types.h"
|
||||
#include "cmntypes.h"
|
585
drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
Normal file
585
drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
Normal file
|
@ -0,0 +1,585 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dml_display_rq_dlg_calc.h"
|
||||
#include "display_mode_core.h"
|
||||
#include "display_mode_util.h"
|
||||
|
||||
static dml_bool_t is_dual_plane(enum dml_source_format_class source_format)
|
||||
{
|
||||
dml_bool_t ret_val = 0;
|
||||
|
||||
if ((source_format == dml_420_12) || (source_format == dml_420_8) || (source_format == dml_420_10) || (source_format == dml_rgbe_alpha))
|
||||
ret_val = 1;
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
void dml_rq_dlg_get_rq_reg(dml_display_rq_regs_st *rq_regs,
|
||||
struct display_mode_lib_st *mode_lib,
|
||||
const dml_uint_t pipe_idx)
|
||||
{
|
||||
dml_uint_t plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
|
||||
enum dml_source_format_class source_format = mode_lib->ms.cache_display_cfg.surface.SourcePixelFormat[plane_idx];
|
||||
enum dml_swizzle_mode sw_mode = mode_lib->ms.cache_display_cfg.surface.SurfaceTiling[plane_idx];
|
||||
dml_bool_t dual_plane = is_dual_plane((enum dml_source_format_class)(source_format));
|
||||
|
||||
uint32 pixel_chunk_bytes = 0;
|
||||
uint32 min_pixel_chunk_bytes = 0;
|
||||
uint32 meta_chunk_bytes = 0;
|
||||
uint32 min_meta_chunk_bytes = 0;
|
||||
uint32 dpte_group_bytes = 0;
|
||||
uint32 mpte_group_bytes = 0;
|
||||
|
||||
uint32 p1_pixel_chunk_bytes = 0;
|
||||
uint32 p1_min_pixel_chunk_bytes = 0;
|
||||
uint32 p1_meta_chunk_bytes = 0;
|
||||
uint32 p1_min_meta_chunk_bytes = 0;
|
||||
uint32 p1_dpte_group_bytes = 0;
|
||||
uint32 p1_mpte_group_bytes = 0;
|
||||
|
||||
dml_uint_t detile_buf_size_in_bytes;
|
||||
dml_uint_t detile_buf_plane1_addr = 0;
|
||||
|
||||
dml_float_t stored_swath_l_bytes;
|
||||
dml_float_t stored_swath_c_bytes;
|
||||
dml_bool_t is_phantom_pipe;
|
||||
|
||||
dml_uint_t pte_row_height_linear;
|
||||
|
||||
dml_print("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
|
||||
|
||||
memset(rq_regs, 0, sizeof(*rq_regs));
|
||||
|
||||
pixel_chunk_bytes = (dml_uint_t)(dml_get_pixel_chunk_size_in_kbyte(mode_lib) * 1024);
|
||||
min_pixel_chunk_bytes = (dml_uint_t)(dml_get_min_pixel_chunk_size_in_byte(mode_lib));
|
||||
|
||||
if (pixel_chunk_bytes == 64 * 1024)
|
||||
min_pixel_chunk_bytes = 0;
|
||||
|
||||
meta_chunk_bytes = (dml_uint_t)(dml_get_meta_chunk_size_in_kbyte(mode_lib) * 1024);
|
||||
min_meta_chunk_bytes = (dml_uint_t)(dml_get_min_meta_chunk_size_in_byte(mode_lib));
|
||||
|
||||
dpte_group_bytes = (dml_uint_t)(dml_get_dpte_group_size_in_bytes(mode_lib, pipe_idx));
|
||||
mpte_group_bytes = (dml_uint_t)(dml_get_vm_group_size_in_bytes(mode_lib, pipe_idx));
|
||||
|
||||
p1_pixel_chunk_bytes = pixel_chunk_bytes;
|
||||
p1_min_pixel_chunk_bytes = min_pixel_chunk_bytes;
|
||||
p1_meta_chunk_bytes = meta_chunk_bytes;
|
||||
p1_min_meta_chunk_bytes = min_meta_chunk_bytes;
|
||||
p1_dpte_group_bytes = dpte_group_bytes;
|
||||
p1_mpte_group_bytes = mpte_group_bytes;
|
||||
|
||||
if (source_format == dml_rgbe_alpha)
|
||||
p1_pixel_chunk_bytes = (dml_uint_t)(dml_get_alpha_pixel_chunk_size_in_kbyte(mode_lib) * 1024);
|
||||
|
||||
rq_regs->rq_regs_l.chunk_size = (dml_uint_t)(dml_log2((dml_float_t) pixel_chunk_bytes) - 10);
|
||||
rq_regs->rq_regs_c.chunk_size = (dml_uint_t)(dml_log2((dml_float_t) p1_pixel_chunk_bytes) - 10);
|
||||
|
||||
if (min_pixel_chunk_bytes == 0)
|
||||
rq_regs->rq_regs_l.min_chunk_size = 0;
|
||||
else
|
||||
rq_regs->rq_regs_l.min_chunk_size = (dml_uint_t)(dml_log2((dml_float_t) min_pixel_chunk_bytes) - 8 + 1);
|
||||
|
||||
if (p1_min_pixel_chunk_bytes == 0)
|
||||
rq_regs->rq_regs_c.min_chunk_size = 0;
|
||||
else
|
||||
rq_regs->rq_regs_c.min_chunk_size = (dml_uint_t)(dml_log2((dml_float_t) p1_min_pixel_chunk_bytes) - 8 + 1);
|
||||
|
||||
rq_regs->rq_regs_l.meta_chunk_size = (dml_uint_t)(dml_log2((dml_float_t) meta_chunk_bytes) - 10);
|
||||
rq_regs->rq_regs_c.meta_chunk_size = (dml_uint_t)(dml_log2((dml_float_t) p1_meta_chunk_bytes) - 10);
|
||||
|
||||
if (min_meta_chunk_bytes == 0)
|
||||
rq_regs->rq_regs_l.min_meta_chunk_size = 0;
|
||||
else
|
||||
rq_regs->rq_regs_l.min_meta_chunk_size = (dml_uint_t)(dml_log2((dml_float_t) min_meta_chunk_bytes) - 6 + 1);
|
||||
|
||||
if (min_meta_chunk_bytes == 0)
|
||||
rq_regs->rq_regs_c.min_meta_chunk_size = 0;
|
||||
else
|
||||
rq_regs->rq_regs_c.min_meta_chunk_size = (dml_uint_t)(dml_log2((dml_float_t) p1_min_meta_chunk_bytes) - 6 + 1);
|
||||
|
||||
rq_regs->rq_regs_l.dpte_group_size = (dml_uint_t)(dml_log2((dml_float_t) dpte_group_bytes) - 6);
|
||||
rq_regs->rq_regs_l.mpte_group_size = (dml_uint_t)(dml_log2((dml_float_t) mpte_group_bytes) - 6);
|
||||
rq_regs->rq_regs_c.dpte_group_size = (dml_uint_t)(dml_log2((dml_float_t) p1_dpte_group_bytes) - 6);
|
||||
rq_regs->rq_regs_c.mpte_group_size = (dml_uint_t)(dml_log2((dml_float_t) p1_mpte_group_bytes) - 6);
|
||||
|
||||
detile_buf_size_in_bytes = (dml_uint_t)(dml_get_det_buffer_size_kbytes(mode_lib, pipe_idx) * 1024);
|
||||
|
||||
pte_row_height_linear = (dml_uint_t)(dml_get_dpte_row_height_linear_l(mode_lib, pipe_idx));
|
||||
|
||||
if (sw_mode == dml_sw_linear)
|
||||
ASSERT(pte_row_height_linear >= 8);
|
||||
|
||||
rq_regs->rq_regs_l.pte_row_height_linear = (dml_uint_t)(dml_floor(dml_log2((dml_float_t) pte_row_height_linear), 1) - 3);
|
||||
|
||||
if (dual_plane) {
|
||||
dml_uint_t p1_pte_row_height_linear = (dml_uint_t)(dml_get_dpte_row_height_linear_c(mode_lib, pipe_idx));
|
||||
if (sw_mode == dml_sw_linear)
|
||||
ASSERT(p1_pte_row_height_linear >= 8);
|
||||
|
||||
rq_regs->rq_regs_c.pte_row_height_linear = (dml_uint_t)(dml_floor(dml_log2((dml_float_t) p1_pte_row_height_linear), 1) - 3);
|
||||
}
|
||||
|
||||
rq_regs->rq_regs_l.swath_height = (dml_uint_t)(dml_log2((dml_float_t) dml_get_swath_height_l(mode_lib, pipe_idx)));
|
||||
rq_regs->rq_regs_c.swath_height = (dml_uint_t)(dml_log2((dml_float_t) dml_get_swath_height_c(mode_lib, pipe_idx)));
|
||||
|
||||
if (pixel_chunk_bytes >= 32 * 1024 || (dual_plane && p1_pixel_chunk_bytes >= 32 * 1024)) { //32kb
|
||||
rq_regs->drq_expansion_mode = 0;
|
||||
} else {
|
||||
rq_regs->drq_expansion_mode = 2;
|
||||
}
|
||||
rq_regs->prq_expansion_mode = 1;
|
||||
rq_regs->mrq_expansion_mode = 1;
|
||||
rq_regs->crq_expansion_mode = 1;
|
||||
|
||||
stored_swath_l_bytes = dml_get_det_stored_buffer_size_l_bytes(mode_lib, pipe_idx);
|
||||
stored_swath_c_bytes = dml_get_det_stored_buffer_size_c_bytes(mode_lib, pipe_idx);
|
||||
is_phantom_pipe = dml_get_is_phantom_pipe(mode_lib, pipe_idx);
|
||||
|
||||
// Note: detile_buf_plane1_addr is in unit of 1KB
|
||||
if (dual_plane) {
|
||||
if (is_phantom_pipe) {
|
||||
detile_buf_plane1_addr = (dml_uint_t)((1024.0*1024.0) / 2.0 / 1024.0); // half to chroma
|
||||
} else {
|
||||
if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
|
||||
detile_buf_plane1_addr = (dml_uint_t)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
|
||||
#endif
|
||||
} else {
|
||||
detile_buf_plane1_addr = (dml_uint_t)(dml_round_to_multiple((dml_uint_t)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
rq_regs->plane1_base_address = detile_buf_plane1_addr;
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
|
||||
dml_print("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
|
||||
dml_print("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
|
||||
dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
|
||||
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
|
||||
dml_print("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
|
||||
#endif
|
||||
dml_print_rq_regs_st(rq_regs);
|
||||
dml_print("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
|
||||
}
|
||||
|
||||
// Note: currently taken in as is.
|
||||
// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
|
||||
|
||||
|
||||
void dml_rq_dlg_get_dlg_reg(dml_display_dlg_regs_st *disp_dlg_regs,
|
||||
dml_display_ttu_regs_st *disp_ttu_regs,
|
||||
struct display_mode_lib_st *mode_lib,
|
||||
const dml_uint_t pipe_idx)
|
||||
{
|
||||
dml_uint_t plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
|
||||
enum dml_source_format_class source_format = mode_lib->ms.cache_display_cfg.surface.SourcePixelFormat[plane_idx];
|
||||
struct dml_timing_cfg_st *timing = &mode_lib->ms.cache_display_cfg.timing;
|
||||
struct dml_plane_cfg_st *plane = &mode_lib->ms.cache_display_cfg.plane;
|
||||
struct dml_hw_resource_st *hw = &mode_lib->ms.cache_display_cfg.hw;
|
||||
dml_bool_t dual_plane = is_dual_plane(source_format);
|
||||
dml_uint_t num_cursors = plane->NumberOfCursors[plane_idx];
|
||||
enum dml_odm_mode odm_mode = hw->ODMMode[plane_idx];
|
||||
|
||||
dml_uint_t htotal = timing->HTotal[plane_idx];
|
||||
dml_uint_t hactive = timing->HActive[plane_idx];
|
||||
dml_uint_t hblank_end = timing->HBlankEnd[plane_idx];
|
||||
dml_uint_t vblank_end = timing->VBlankEnd[plane_idx];
|
||||
dml_bool_t interlaced = timing->Interlace[plane_idx];
|
||||
dml_float_t pclk_freq_in_mhz = (dml_float_t) timing->PixelClock[plane_idx];
|
||||
dml_float_t refclk_freq_in_mhz = (hw->DLGRefClkFreqMHz > 0) ? (dml_float_t) hw->DLGRefClkFreqMHz : mode_lib->soc.refclk_mhz;
|
||||
dml_float_t ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
|
||||
|
||||
dml_uint_t vready_after_vcount0;
|
||||
|
||||
dml_uint_t dst_x_after_scaler;
|
||||
dml_uint_t dst_y_after_scaler;
|
||||
|
||||
dml_float_t dst_y_prefetch;
|
||||
dml_float_t dst_y_per_vm_vblank;
|
||||
dml_float_t dst_y_per_row_vblank;
|
||||
dml_float_t dst_y_per_vm_flip;
|
||||
dml_float_t dst_y_per_row_flip;
|
||||
|
||||
dml_float_t max_dst_y_per_vm_vblank = 32.0; //U5.2
|
||||
dml_float_t max_dst_y_per_row_vblank = 16.0; //U4.2
|
||||
|
||||
dml_float_t vratio_pre_l;
|
||||
dml_float_t vratio_pre_c;
|
||||
|
||||
dml_float_t refcyc_per_line_delivery_pre_l;
|
||||
dml_float_t refcyc_per_line_delivery_l;
|
||||
dml_float_t refcyc_per_line_delivery_pre_c = 0.;
|
||||
dml_float_t refcyc_per_line_delivery_c = 0.;
|
||||
dml_float_t refcyc_per_req_delivery_pre_l;
|
||||
dml_float_t refcyc_per_req_delivery_l;
|
||||
dml_float_t refcyc_per_req_delivery_pre_c = 0.;
|
||||
dml_float_t refcyc_per_req_delivery_c = 0.;
|
||||
dml_float_t refcyc_per_req_delivery_pre_cur0 = 0.;
|
||||
dml_float_t refcyc_per_req_delivery_cur0 = 0.;
|
||||
|
||||
dml_float_t dst_y_per_pte_row_nom_l;
|
||||
dml_float_t dst_y_per_pte_row_nom_c;
|
||||
dml_float_t dst_y_per_meta_row_nom_l;
|
||||
dml_float_t dst_y_per_meta_row_nom_c;
|
||||
dml_float_t refcyc_per_pte_group_nom_l;
|
||||
dml_float_t refcyc_per_pte_group_nom_c;
|
||||
dml_float_t refcyc_per_pte_group_vblank_l;
|
||||
dml_float_t refcyc_per_pte_group_vblank_c;
|
||||
dml_float_t refcyc_per_pte_group_flip_l;
|
||||
dml_float_t refcyc_per_pte_group_flip_c;
|
||||
dml_float_t refcyc_per_meta_chunk_nom_l;
|
||||
dml_float_t refcyc_per_meta_chunk_nom_c;
|
||||
dml_float_t refcyc_per_meta_chunk_vblank_l;
|
||||
dml_float_t refcyc_per_meta_chunk_vblank_c;
|
||||
dml_float_t refcyc_per_meta_chunk_flip_l;
|
||||
dml_float_t refcyc_per_meta_chunk_flip_c;
|
||||
|
||||
dml_float_t temp;
|
||||
dml_float_t min_ttu_vblank;
|
||||
dml_uint_t min_dst_y_next_start;
|
||||
|
||||
dml_print("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
|
||||
dml_print("DML_DLG::%s: plane_idx = %d\n", __func__, plane_idx);
|
||||
dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
|
||||
dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
|
||||
dml_print("DML_DLG: %s: hw->DLGRefClkFreqMHz = %3.2f\n", __func__, hw->DLGRefClkFreqMHz);
|
||||
dml_print("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.refclk_mhz);
|
||||
dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
|
||||
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, ref_freq_to_pix_freq);
|
||||
dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
|
||||
|
||||
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
|
||||
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
|
||||
|
||||
ASSERT(refclk_freq_in_mhz != 0);
|
||||
ASSERT(pclk_freq_in_mhz != 0);
|
||||
ASSERT(ref_freq_to_pix_freq < 4.0);
|
||||
|
||||
// Need to figure out which side of odm combine we're in
|
||||
// Assume the pipe instance under the same plane is in order
|
||||
|
||||
if (odm_mode == dml_odm_mode_bypass) {
|
||||
disp_dlg_regs->refcyc_h_blank_end = (dml_uint_t)((dml_float_t) hblank_end * ref_freq_to_pix_freq);
|
||||
} else if (odm_mode == dml_odm_mode_combine_2to1 || odm_mode == dml_odm_mode_combine_4to1) {
|
||||
// find out how many pipe are in this plane
|
||||
dml_uint_t num_active_pipes = dml_get_num_active_pipes(&mode_lib->ms.cache_display_cfg);
|
||||
dml_uint_t first_pipe_idx_in_plane = __DML_NUM_PLANES__;
|
||||
dml_uint_t pipe_idx_in_combine = 0; // pipe index within the plane
|
||||
dml_uint_t odm_combine_factor = (odm_mode == dml_odm_mode_combine_2to1 ? 2 : 4);
|
||||
|
||||
for (dml_uint_t i = 0; i < num_active_pipes; i++) {
|
||||
if (dml_get_plane_idx(mode_lib, i) == plane_idx) {
|
||||
if (i < first_pipe_idx_in_plane) {
|
||||
first_pipe_idx_in_plane = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
pipe_idx_in_combine = pipe_idx - first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.)
|
||||
|
||||
disp_dlg_regs->refcyc_h_blank_end = (dml_uint_t)(((dml_float_t) hblank_end + (dml_float_t) pipe_idx_in_combine * (dml_float_t) hactive / (dml_float_t) odm_combine_factor) * ref_freq_to_pix_freq);
|
||||
dml_print("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
|
||||
dml_print("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, first_pipe_idx_in_plane);
|
||||
dml_print("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, pipe_idx_in_combine);
|
||||
dml_print("DML_DLG: %s: odm_combine_factor = %d\n", __func__, odm_combine_factor);
|
||||
}
|
||||
dml_print("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
|
||||
|
||||
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (dml_uint_t)dml_pow(2, 13));
|
||||
|
||||
disp_dlg_regs->ref_freq_to_pix_freq = (dml_uint_t)(ref_freq_to_pix_freq * dml_pow(2, 19));
|
||||
temp = dml_pow(2, 8);
|
||||
disp_dlg_regs->refcyc_per_htotal = (dml_uint_t)(ref_freq_to_pix_freq * (dml_float_t)htotal * temp);
|
||||
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
|
||||
|
||||
min_ttu_vblank = dml_get_min_ttu_vblank_in_us(mode_lib, pipe_idx);
|
||||
min_dst_y_next_start = (dml_uint_t)(dml_get_min_dst_y_next_start(mode_lib, pipe_idx));
|
||||
|
||||
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
|
||||
dml_print("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, min_dst_y_next_start);
|
||||
dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, ref_freq_to_pix_freq);
|
||||
|
||||
vready_after_vcount0 = (dml_uint_t)(dml_get_vready_at_or_after_vsync(mode_lib, pipe_idx));
|
||||
disp_dlg_regs->vready_after_vcount0 = vready_after_vcount0;
|
||||
|
||||
dml_print("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
|
||||
|
||||
dst_x_after_scaler = (dml_uint_t)(dml_get_dst_x_after_scaler(mode_lib, pipe_idx));
|
||||
dst_y_after_scaler = (dml_uint_t)(dml_get_dst_y_after_scaler(mode_lib, pipe_idx));
|
||||
|
||||
dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, dst_x_after_scaler);
|
||||
dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, dst_y_after_scaler);
|
||||
|
||||
dst_y_prefetch = dml_get_dst_y_prefetch(mode_lib, pipe_idx);
|
||||
dst_y_per_vm_vblank = dml_get_dst_y_per_vm_vblank(mode_lib, pipe_idx);
|
||||
dst_y_per_row_vblank = dml_get_dst_y_per_row_vblank(mode_lib, pipe_idx);
|
||||
dst_y_per_vm_flip = dml_get_dst_y_per_vm_flip(mode_lib, pipe_idx);
|
||||
dst_y_per_row_flip = dml_get_dst_y_per_row_flip(mode_lib, pipe_idx);
|
||||
|
||||
// magic!
|
||||
if (htotal <= 75) {
|
||||
max_dst_y_per_vm_vblank = 100.0;
|
||||
max_dst_y_per_row_vblank = 100.0;
|
||||
}
|
||||
|
||||
dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
|
||||
dml_print("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, dst_y_per_vm_flip);
|
||||
dml_print("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, dst_y_per_row_flip);
|
||||
dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
|
||||
dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
|
||||
|
||||
ASSERT(dst_y_per_vm_vblank < max_dst_y_per_vm_vblank);
|
||||
ASSERT(dst_y_per_row_vblank < max_dst_y_per_row_vblank);
|
||||
ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
|
||||
|
||||
vratio_pre_l = dml_get_vratio_prefetch_l(mode_lib, pipe_idx);
|
||||
vratio_pre_c = dml_get_vratio_prefetch_c(mode_lib, pipe_idx);
|
||||
|
||||
dml_print("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, vratio_pre_l);
|
||||
dml_print("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, vratio_pre_c);
|
||||
|
||||
// Active
|
||||
refcyc_per_line_delivery_pre_l = dml_get_refcyc_per_line_delivery_pre_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_line_delivery_l = dml_get_refcyc_per_line_delivery_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
|
||||
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, refcyc_per_line_delivery_pre_l);
|
||||
dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, refcyc_per_line_delivery_l);
|
||||
|
||||
if (dual_plane) {
|
||||
refcyc_per_line_delivery_pre_c = dml_get_refcyc_per_line_delivery_pre_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_line_delivery_c = dml_get_refcyc_per_line_delivery_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
|
||||
dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, refcyc_per_line_delivery_pre_c);
|
||||
dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, refcyc_per_line_delivery_c);
|
||||
}
|
||||
|
||||
disp_dlg_regs->refcyc_per_vm_dmdata = (dml_uint_t)(dml_get_refcyc_per_vm_dmdata_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
|
||||
disp_dlg_regs->dmdata_dl_delta = (dml_uint_t)(dml_get_dmdata_dl_delta_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
|
||||
|
||||
refcyc_per_req_delivery_pre_l = dml_get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_req_delivery_l = dml_get_refcyc_per_req_delivery_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
|
||||
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, refcyc_per_req_delivery_pre_l);
|
||||
dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, refcyc_per_req_delivery_l);
|
||||
|
||||
if (dual_plane) {
|
||||
refcyc_per_req_delivery_pre_c = dml_get_refcyc_per_req_delivery_pre_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_req_delivery_c = dml_get_refcyc_per_req_delivery_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
|
||||
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, refcyc_per_req_delivery_pre_c);
|
||||
dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, refcyc_per_req_delivery_c);
|
||||
}
|
||||
|
||||
// TTU - Cursor
|
||||
ASSERT(num_cursors <= 1);
|
||||
if (num_cursors > 0) {
|
||||
refcyc_per_req_delivery_pre_cur0 = dml_get_refcyc_per_cursor_req_delivery_pre_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_req_delivery_cur0 = dml_get_refcyc_per_cursor_req_delivery_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
|
||||
dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur0 = %3.2f\n", __func__, refcyc_per_req_delivery_pre_cur0);
|
||||
dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur0 = %3.2f\n", __func__, refcyc_per_req_delivery_cur0);
|
||||
}
|
||||
|
||||
// Assign to register structures
|
||||
disp_dlg_regs->min_dst_y_next_start = (dml_uint_t)((dml_float_t) min_dst_y_next_start * dml_pow(2, 2));
|
||||
ASSERT(disp_dlg_regs->min_dst_y_next_start < (dml_uint_t)dml_pow(2, 18));
|
||||
|
||||
disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
|
||||
disp_dlg_regs->refcyc_x_after_scaler = (dml_uint_t)((dml_float_t) dst_x_after_scaler * ref_freq_to_pix_freq); // in terms of refclk
|
||||
disp_dlg_regs->dst_y_prefetch = (dml_uint_t)(dst_y_prefetch * dml_pow(2, 2));
|
||||
disp_dlg_regs->dst_y_per_vm_vblank = (dml_uint_t)(dst_y_per_vm_vblank * dml_pow(2, 2));
|
||||
disp_dlg_regs->dst_y_per_row_vblank = (dml_uint_t)(dst_y_per_row_vblank * dml_pow(2, 2));
|
||||
disp_dlg_regs->dst_y_per_vm_flip = (dml_uint_t)(dst_y_per_vm_flip * dml_pow(2, 2));
|
||||
disp_dlg_regs->dst_y_per_row_flip = (dml_uint_t)(dst_y_per_row_flip * dml_pow(2, 2));
|
||||
|
||||
disp_dlg_regs->vratio_prefetch = (dml_uint_t)(vratio_pre_l * dml_pow(2, 19));
|
||||
disp_dlg_regs->vratio_prefetch_c = (dml_uint_t)(vratio_pre_c * dml_pow(2, 19));
|
||||
|
||||
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
|
||||
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
|
||||
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
|
||||
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
|
||||
|
||||
// hack for FPGA
|
||||
/* NOTE: We dont have getenv defined in driver and it does not make any sense in the driver */
|
||||
/*char* fpga_env = getenv("FPGA_FPDIV");
|
||||
if(fpga_env !=NULL)
|
||||
{
|
||||
if(disp_dlg_regs->vratio_prefetch >= (dml_uint_t)dml_pow(2, 22))
|
||||
{
|
||||
disp_dlg_regs->vratio_prefetch = (dml_uint_t)dml_pow(2, 22)-1;
|
||||
dml_print("FPGA msg: vratio_prefetch exceed the max value, the register field is [21:0]\n");
|
||||
}
|
||||
}*/
|
||||
|
||||
disp_dlg_regs->refcyc_per_vm_group_vblank = (dml_uint_t)(dml_get_refcyc_per_vm_group_vblank_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
|
||||
disp_dlg_regs->refcyc_per_vm_group_flip = (dml_uint_t)(dml_get_refcyc_per_vm_group_flip_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
|
||||
disp_dlg_regs->refcyc_per_vm_req_vblank = (dml_uint_t)(dml_get_refcyc_per_vm_req_vblank_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10));
|
||||
disp_dlg_regs->refcyc_per_vm_req_flip = (dml_uint_t)(dml_get_refcyc_per_vm_req_flip_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10));
|
||||
|
||||
dst_y_per_pte_row_nom_l = dml_get_dst_y_per_pte_row_nom_l(mode_lib, pipe_idx);
|
||||
dst_y_per_pte_row_nom_c = dml_get_dst_y_per_pte_row_nom_c(mode_lib, pipe_idx);
|
||||
dst_y_per_meta_row_nom_l = dml_get_dst_y_per_meta_row_nom_l(mode_lib, pipe_idx);
|
||||
dst_y_per_meta_row_nom_c = dml_get_dst_y_per_meta_row_nom_c(mode_lib, pipe_idx);
|
||||
|
||||
refcyc_per_pte_group_nom_l = dml_get_refcyc_per_pte_group_nom_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_pte_group_nom_c = dml_get_refcyc_per_pte_group_nom_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_pte_group_vblank_l = dml_get_refcyc_per_pte_group_vblank_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_pte_group_vblank_c = dml_get_refcyc_per_pte_group_vblank_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_pte_group_flip_l = dml_get_refcyc_per_pte_group_flip_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_pte_group_flip_c = dml_get_refcyc_per_pte_group_flip_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
|
||||
refcyc_per_meta_chunk_nom_l = dml_get_refcyc_per_meta_chunk_nom_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_meta_chunk_nom_c = dml_get_refcyc_per_meta_chunk_nom_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_meta_chunk_vblank_l = dml_get_refcyc_per_meta_chunk_vblank_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_meta_chunk_vblank_c = dml_get_refcyc_per_meta_chunk_vblank_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_meta_chunk_flip_l = dml_get_refcyc_per_meta_chunk_flip_l_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
refcyc_per_meta_chunk_flip_c = dml_get_refcyc_per_meta_chunk_flip_c_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz;
|
||||
|
||||
disp_dlg_regs->dst_y_per_pte_row_nom_l = (dml_uint_t)(dst_y_per_pte_row_nom_l * dml_pow(2, 2));
|
||||
disp_dlg_regs->dst_y_per_pte_row_nom_c = (dml_uint_t)(dst_y_per_pte_row_nom_c * dml_pow(2, 2));
|
||||
disp_dlg_regs->dst_y_per_meta_row_nom_l = (dml_uint_t)(dst_y_per_meta_row_nom_l * dml_pow(2, 2));
|
||||
disp_dlg_regs->dst_y_per_meta_row_nom_c = (dml_uint_t)(dst_y_per_meta_row_nom_c * dml_pow(2, 2));
|
||||
disp_dlg_regs->refcyc_per_pte_group_nom_l = (dml_uint_t)(refcyc_per_pte_group_nom_l);
|
||||
disp_dlg_regs->refcyc_per_pte_group_nom_c = (dml_uint_t)(refcyc_per_pte_group_nom_c);
|
||||
disp_dlg_regs->refcyc_per_pte_group_vblank_l = (dml_uint_t)(refcyc_per_pte_group_vblank_l);
|
||||
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (dml_uint_t)(refcyc_per_pte_group_vblank_c);
|
||||
disp_dlg_regs->refcyc_per_pte_group_flip_l = (dml_uint_t)(refcyc_per_pte_group_flip_l);
|
||||
disp_dlg_regs->refcyc_per_pte_group_flip_c = (dml_uint_t)(refcyc_per_pte_group_flip_c);
|
||||
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (dml_uint_t)(refcyc_per_meta_chunk_nom_l);
|
||||
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = (dml_uint_t)(refcyc_per_meta_chunk_nom_c);
|
||||
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = (dml_uint_t)(refcyc_per_meta_chunk_vblank_l);
|
||||
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c = (dml_uint_t)(refcyc_per_meta_chunk_vblank_c);
|
||||
disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (dml_uint_t)(refcyc_per_meta_chunk_flip_l);
|
||||
disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (dml_uint_t)(refcyc_per_meta_chunk_flip_c);
|
||||
disp_dlg_regs->refcyc_per_line_delivery_pre_l = (dml_uint_t)dml_floor(refcyc_per_line_delivery_pre_l, 1);
|
||||
disp_dlg_regs->refcyc_per_line_delivery_l = (dml_uint_t)dml_floor(refcyc_per_line_delivery_l, 1);
|
||||
disp_dlg_regs->refcyc_per_line_delivery_pre_c = (dml_uint_t)dml_floor(refcyc_per_line_delivery_pre_c, 1);
|
||||
disp_dlg_regs->refcyc_per_line_delivery_c = (dml_uint_t)dml_floor(refcyc_per_line_delivery_c, 1);
|
||||
|
||||
disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
|
||||
disp_dlg_regs->dst_y_offset_cur0 = 0;
|
||||
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
|
||||
disp_dlg_regs->dst_y_offset_cur1 = 0;
|
||||
|
||||
disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
|
||||
|
||||
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (dml_uint_t)(refcyc_per_req_delivery_pre_l * dml_pow(2, 10));
|
||||
disp_ttu_regs->refcyc_per_req_delivery_l = (dml_uint_t)(refcyc_per_req_delivery_l * dml_pow(2, 10));
|
||||
disp_ttu_regs->refcyc_per_req_delivery_pre_c = (dml_uint_t)(refcyc_per_req_delivery_pre_c * dml_pow(2, 10));
|
||||
disp_ttu_regs->refcyc_per_req_delivery_c = (dml_uint_t)(refcyc_per_req_delivery_c * dml_pow(2, 10));
|
||||
disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 = (dml_uint_t)(refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
|
||||
disp_ttu_regs->refcyc_per_req_delivery_cur0 = (dml_uint_t)(refcyc_per_req_delivery_cur0 * dml_pow(2, 10));
|
||||
disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 = 0;
|
||||
disp_ttu_regs->refcyc_per_req_delivery_cur1 = 0;
|
||||
disp_ttu_regs->qos_level_low_wm = 0;
|
||||
|
||||
disp_ttu_regs->qos_level_high_wm = (dml_uint_t)(4.0 * (dml_float_t)htotal * ref_freq_to_pix_freq);
|
||||
|
||||
disp_ttu_regs->qos_level_flip = 14;
|
||||
disp_ttu_regs->qos_level_fixed_l = 8;
|
||||
disp_ttu_regs->qos_level_fixed_c = 8;
|
||||
disp_ttu_regs->qos_level_fixed_cur0 = 8;
|
||||
disp_ttu_regs->qos_ramp_disable_l = 0;
|
||||
disp_ttu_regs->qos_ramp_disable_c = 0;
|
||||
disp_ttu_regs->qos_ramp_disable_cur0 = 0;
|
||||
disp_ttu_regs->min_ttu_vblank = (dml_uint_t)(min_ttu_vblank * refclk_freq_in_mhz);
|
||||
|
||||
// CHECK for HW registers' range, assert or clamp
|
||||
ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
|
||||
ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
|
||||
ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
|
||||
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
|
||||
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (dml_uint_t)dml_pow(2, 23))
|
||||
disp_dlg_regs->refcyc_per_vm_group_vblank = (dml_uint_t)(dml_pow(2, 23) - 1);
|
||||
|
||||
if (disp_dlg_regs->refcyc_per_vm_group_flip >= (dml_uint_t)dml_pow(2, 23))
|
||||
disp_dlg_regs->refcyc_per_vm_group_flip = (dml_uint_t)(dml_pow(2, 23) - 1);
|
||||
|
||||
if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (dml_uint_t)dml_pow(2, 23))
|
||||
disp_dlg_regs->refcyc_per_vm_req_vblank = (dml_uint_t)(dml_pow(2, 23) - 1);
|
||||
|
||||
if (disp_dlg_regs->refcyc_per_vm_req_flip >= (dml_uint_t)dml_pow(2, 23))
|
||||
disp_dlg_regs->refcyc_per_vm_req_flip = (dml_uint_t)(dml_pow(2, 23) - 1);
|
||||
|
||||
|
||||
ASSERT(disp_dlg_regs->dst_y_after_scaler < (dml_uint_t)8);
|
||||
ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (dml_uint_t)dml_pow(2, 13));
|
||||
ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (dml_uint_t)dml_pow(2, 17));
|
||||
if (dual_plane) {
|
||||
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (dml_uint_t)dml_pow(2, 17)) { // FIXME what so special about chroma, can we just assert?
|
||||
dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u > register max U15.2 %u\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (dml_uint_t)dml_pow(2, 17) - 1);
|
||||
}
|
||||
}
|
||||
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (dml_uint_t)dml_pow(2, 17));
|
||||
ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_c < (dml_uint_t)dml_pow(2, 17));
|
||||
|
||||
if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (dml_uint_t)dml_pow(2, 23))
|
||||
disp_dlg_regs->refcyc_per_pte_group_nom_l = (dml_uint_t)(dml_pow(2, 23) - 1);
|
||||
if (dual_plane) {
|
||||
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (dml_uint_t)dml_pow(2, 23))
|
||||
disp_dlg_regs->refcyc_per_pte_group_nom_c = (dml_uint_t)(dml_pow(2, 23) - 1);
|
||||
}
|
||||
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (dml_uint_t)dml_pow(2, 13));
|
||||
if (dual_plane) {
|
||||
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (dml_uint_t)dml_pow(2, 13));
|
||||
}
|
||||
|
||||
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (dml_uint_t)dml_pow(2, 23))
|
||||
disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (dml_uint_t)(dml_pow(2, 23) - 1);
|
||||
if (dual_plane) {
|
||||
if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (dml_uint_t)dml_pow(2, 23))
|
||||
disp_dlg_regs->refcyc_per_meta_chunk_nom_c = (dml_uint_t)(dml_pow(2, 23) - 1);
|
||||
}
|
||||
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (dml_uint_t)dml_pow(2, 13));
|
||||
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_c < (dml_uint_t)dml_pow(2, 13));
|
||||
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (dml_uint_t)dml_pow(2, 13));
|
||||
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (dml_uint_t)dml_pow(2, 13));
|
||||
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (dml_uint_t)dml_pow(2, 13));
|
||||
ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (dml_uint_t)dml_pow(2, 13));
|
||||
ASSERT(disp_ttu_regs->qos_level_low_wm < (dml_uint_t) dml_pow(2, 14));
|
||||
ASSERT(disp_ttu_regs->qos_level_high_wm < (dml_uint_t) dml_pow(2, 14));
|
||||
ASSERT(disp_ttu_regs->min_ttu_vblank < (dml_uint_t) dml_pow(2, 24));
|
||||
|
||||
dml_print_ttu_regs_st(disp_ttu_regs);
|
||||
dml_print_dlg_regs_st(disp_dlg_regs);
|
||||
dml_print("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
|
||||
}
|
||||
|
||||
void dml_rq_dlg_get_arb_params(struct display_mode_lib_st *mode_lib, dml_display_arb_params_st *arb_param)
|
||||
{
|
||||
memset(arb_param, 0, sizeof(*arb_param));
|
||||
arb_param->max_req_outstanding = 256;
|
||||
arb_param->min_req_outstanding = 256; // turn off the sat level feature if this set to max
|
||||
arb_param->sat_level_us = 60;
|
||||
arb_param->hvm_max_qos_commit_threshold = 0xf;
|
||||
arb_param->hvm_min_req_outstand_commit_threshold = 0xa;
|
||||
arb_param->compbuf_reserved_space_kbytes = 2 * 8; // assume max data chunk size of 8K
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DML_DISPLAY_RQ_DLG_CALC_H__
|
||||
#define __DML_DISPLAY_RQ_DLG_CALC_H__
|
||||
|
||||
#include "display_mode_core_structs.h"
|
||||
#include "display_mode_lib_defines.h"
|
||||
|
||||
struct display_mode_lib_st;
|
||||
|
||||
// Function: dml_rq_dlg_get_rq_reg
|
||||
// Main entry point for test to get the register values out of this DML class.
|
||||
// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
|
||||
// and then populate the rq_regs struct
|
||||
// Input:
|
||||
// Assume mode_program is already called
|
||||
// Output:
|
||||
// rq_regs - struct that holds all the RQ registers field value.
|
||||
// See also: <display_rq_regs_st>
|
||||
|
||||
void dml_rq_dlg_get_rq_reg(dml_display_rq_regs_st *rq_regs,
|
||||
struct display_mode_lib_st *mode_lib,
|
||||
const dml_uint_t pipe_idx);
|
||||
|
||||
// Function: dml_rq_dlg_get_dlg_reg
|
||||
// Calculate and return DLG and TTU register struct given the system setting
|
||||
// Output:
|
||||
// dlg_regs - output DLG register struct
|
||||
// ttu_regs - output DLG TTU register struct
|
||||
// Input:
|
||||
// Assume mode_program is already called
|
||||
// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
|
||||
void dml_rq_dlg_get_dlg_reg(dml_display_dlg_regs_st *dlg_regs,
|
||||
dml_display_ttu_regs_st *ttu_regs,
|
||||
struct display_mode_lib_st *mode_lib,
|
||||
const dml_uint_t pipe_idx);
|
||||
|
||||
// Function: dml_rq_dlg_get_arb_params
|
||||
void dml_rq_dlg_get_arb_params(struct display_mode_lib_st *mode_lib, dml_display_arb_params_st *arb_param);
|
||||
|
||||
#endif
|
29
drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h
Normal file
29
drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __DML_LOGGING_H__
|
||||
#define __DML_LOGGING_H__
|
||||
|
||||
#define dml_print(...) ((void)0)
|
||||
|
||||
#endif //__DML_LOGGING_H__
|
|
@ -491,6 +491,7 @@ union bw_output {
|
|||
struct bw_context {
|
||||
union bw_output bw;
|
||||
struct display_mode_lib dml;
|
||||
struct dml2_context *dml2;
|
||||
};
|
||||
|
||||
struct dc_dmub_cmd {
|
||||
|
|
Loading…
Add table
Reference in a new issue