mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge tag 'drm-msm-fixes-2025-08-26' of https://gitlab.freedesktop.org/drm/msm into drm-fixes
Fixes for v6.17-rc4 Core/GPU: - fix comment doc warning in gpuvm - fix build with KMS disabled - fix pgtable setup/teardown race - global fault counter fix - various error path fixes - GPU devcoredump snapshot fixes - handle in-place VM_BIND remaps to solve turnip vm update race - skip re-emitting IBs for unusable VMs - Don't use %pK through printk - moved display snapshot init earlier, fixing a crash DPU: - Fixed crash in virtual plane checking code - Fixed mode comparison in virtual plane checking code DSI: - Adjusted width of resulution-related registers - Fixed locking issue on 14nm PLLs UBWC (per Bjorn's ack) - Added UBWC configuration for several missing platforms (fixing regression) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <rob.clark@oss.qualcomm.com> Link: https://lore.kernel.org/r/CACSVV02+u1VW1dzuz6JWwVEfpgTj6Y-JXMH+vX43KsKTVsW+Yg@mail.gmail.com
This commit is contained in:
commit
49862587fa
26 changed files with 303 additions and 199 deletions
|
@ -60,7 +60,6 @@ properties:
|
|||
- const: bus
|
||||
- const: core
|
||||
- const: vsync
|
||||
- const: lut
|
||||
- const: tbu
|
||||
- const: tbu_rt
|
||||
# MSM8996 has additional iommu clock
|
||||
|
|
|
@ -2430,7 +2430,7 @@ static const struct drm_gpuvm_ops lock_ops = {
|
|||
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
|
||||
* will be newly mapped.
|
||||
*
|
||||
* The expected usage is:
|
||||
* The expected usage is::
|
||||
*
|
||||
* .. code-block:: c
|
||||
*
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
static const unsigned int *gen7_0_0_external_core_regs[] __always_unused;
|
||||
static const unsigned int *gen7_2_0_external_core_regs[] __always_unused;
|
||||
static const unsigned int *gen7_9_0_external_core_regs[] __always_unused;
|
||||
static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
|
||||
static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
|
||||
static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused;
|
||||
|
||||
#include "adreno_gen7_0_0_snapshot.h"
|
||||
|
@ -174,8 +174,15 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
|
|||
static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
|
||||
u32 *data)
|
||||
{
|
||||
u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
|
||||
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
|
||||
u32 reg;
|
||||
|
||||
if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
|
||||
reg = A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
|
||||
A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
|
||||
} else {
|
||||
reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
|
||||
A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
|
||||
}
|
||||
|
||||
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
|
||||
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
|
||||
|
@ -198,11 +205,18 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
|
|||
readl((ptr) + ((offset) << 2))
|
||||
|
||||
/* read a value from the CX debug bus */
|
||||
static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
|
||||
static int cx_debugbus_read(struct msm_gpu *gpu, void __iomem *cxdbg, u32 block, u32 offset,
|
||||
u32 *data)
|
||||
{
|
||||
u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
|
||||
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
|
||||
u32 reg;
|
||||
|
||||
if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
|
||||
reg = A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
|
||||
A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
|
||||
} else {
|
||||
reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
|
||||
A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
|
||||
}
|
||||
|
||||
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
|
||||
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
|
||||
|
@ -315,7 +329,8 @@ static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
|
|||
ptr += debugbus_read(gpu, block->id, i, ptr);
|
||||
}
|
||||
|
||||
static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
|
||||
static void a6xx_get_cx_debugbus_block(struct msm_gpu *gpu,
|
||||
void __iomem *cxdbg,
|
||||
struct a6xx_gpu_state *a6xx_state,
|
||||
const struct a6xx_debugbus_block *block,
|
||||
struct a6xx_gpu_state_obj *obj)
|
||||
|
@ -330,7 +345,7 @@ static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
|
|||
obj->handle = block;
|
||||
|
||||
for (ptr = obj->data, i = 0; i < block->count; i++)
|
||||
ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
|
||||
ptr += cx_debugbus_read(gpu, cxdbg, block->id, i, ptr);
|
||||
}
|
||||
|
||||
static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu,
|
||||
|
@ -423,8 +438,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
|
|||
a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]],
|
||||
&a6xx_state->debugbus[i + debugbus_blocks_count]);
|
||||
}
|
||||
}
|
||||
|
||||
a6xx_state->nr_debugbus = total_debugbus_blocks;
|
||||
}
|
||||
}
|
||||
|
||||
static void a6xx_get_debugbus(struct msm_gpu *gpu,
|
||||
|
@ -526,7 +542,8 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < nr_cx_debugbus_blocks; i++)
|
||||
a6xx_get_cx_debugbus_block(cxdbg,
|
||||
a6xx_get_cx_debugbus_block(gpu,
|
||||
cxdbg,
|
||||
a6xx_state,
|
||||
&cx_debugbus_blocks[i],
|
||||
&a6xx_state->cx_debugbus[i]);
|
||||
|
@ -759,15 +776,15 @@ static void a7xx_get_cluster(struct msm_gpu *gpu,
|
|||
size_t datasize;
|
||||
int i, regcount = 0;
|
||||
|
||||
/* Some clusters need a selector register to be programmed too */
|
||||
if (cluster->sel)
|
||||
in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
|
||||
|
||||
in += CRASHDUMP_WRITE(in, REG_A7XX_CP_APERTURE_CNTL_CD,
|
||||
A7XX_CP_APERTURE_CNTL_CD_PIPE(cluster->pipe_id) |
|
||||
A7XX_CP_APERTURE_CNTL_CD_CLUSTER(cluster->cluster_id) |
|
||||
A7XX_CP_APERTURE_CNTL_CD_CONTEXT(cluster->context_id));
|
||||
|
||||
/* Some clusters need a selector register to be programmed too */
|
||||
if (cluster->sel)
|
||||
in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
|
||||
|
||||
for (i = 0; cluster->regs[i] != UINT_MAX; i += 2) {
|
||||
int count = RANGE(cluster->regs, i);
|
||||
|
||||
|
@ -1796,6 +1813,7 @@ static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj,
|
|||
|
||||
print_name(p, " - type: ", a7xx_statetype_names[block->statetype]);
|
||||
print_name(p, " - pipe: ", a7xx_pipe_names[block->pipeid]);
|
||||
drm_printf(p, " - location: %d\n", block->location);
|
||||
|
||||
for (i = 0; i < block->num_sps; i++) {
|
||||
drm_printf(p, " - sp: %d\n", i);
|
||||
|
@ -1873,6 +1891,7 @@ static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
|
|||
print_name(p, " - pipe: ", a7xx_pipe_names[dbgahb->pipe_id]);
|
||||
print_name(p, " - cluster-name: ", a7xx_cluster_names[dbgahb->cluster_id]);
|
||||
drm_printf(p, " - context: %d\n", dbgahb->context_id);
|
||||
drm_printf(p, " - location: %d\n", dbgahb->location_id);
|
||||
a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -419,47 +419,47 @@ static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
|
|||
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
|
||||
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
|
||||
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
|
||||
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
{ "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
|
||||
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
{ "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
|
||||
};
|
||||
|
||||
static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
|
||||
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
|
||||
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
|
||||
REG_A6XX_CP_SQE_STAT_DATA, 0x40, NULL },
|
||||
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
|
||||
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
|
||||
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
{ "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
|
||||
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
|
||||
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
|
||||
{ "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
|
||||
{ "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
|
||||
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x40, NULL },
|
||||
{ "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
|
||||
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
|
||||
{ "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
|
||||
{ "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
|
||||
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
|
||||
{ "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
|
||||
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
|
||||
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
|
||||
{ "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
|
||||
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x40, NULL },
|
||||
{ "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
|
||||
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
|
||||
{ "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
|
||||
{ "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
|
||||
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
|
||||
{ "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
|
||||
{ "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
|
||||
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
|
||||
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
{ "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
|
||||
};
|
||||
|
||||
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
|
||||
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
|
||||
"CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
|
||||
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
|
||||
};
|
||||
|
||||
static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
|
||||
{ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
|
||||
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
|
||||
{ "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
|
||||
REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
|
||||
{ "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
|
||||
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2200, NULL },
|
||||
{ "CP_BV_MEM_POOL_DBG", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
|
||||
REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2200, NULL },
|
||||
};
|
||||
|
||||
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
|
||||
|
|
|
@ -81,7 +81,7 @@ static const u32 gen7_0_0_debugbus_blocks[] = {
|
|||
A7XX_DBGBUS_USPTP_7,
|
||||
};
|
||||
|
||||
static struct gen7_shader_block gen7_0_0_shader_blocks[] = {
|
||||
static const struct gen7_shader_block gen7_0_0_shader_blocks[] = {
|
||||
{A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
{A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
|
@ -668,12 +668,19 @@ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
|
|||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
|
||||
|
||||
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
|
||||
static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
|
||||
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */
|
||||
static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
|
||||
0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
|
||||
0x0b60f, 0x0b621, 0x0b630, 0x0b633,
|
||||
UINT_MAX, UINT_MAX,
|
||||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8));
|
||||
|
||||
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
|
||||
static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
|
||||
0x0b600, 0x0b600,
|
||||
UINT_MAX, UINT_MAX,
|
||||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
|
||||
|
||||
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
|
||||
|
@ -695,7 +702,7 @@ static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
|
|||
.val = 0x9,
|
||||
};
|
||||
|
||||
static struct gen7_cluster_registers gen7_0_0_clusters[] = {
|
||||
static const struct gen7_cluster_registers gen7_0_0_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
|
||||
gen7_0_0_noncontext_pipe_br_registers, },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
|
||||
|
@ -764,7 +771,7 @@ static struct gen7_cluster_registers gen7_0_0_clusters[] = {
|
|||
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
|
||||
};
|
||||
|
||||
static struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
|
||||
static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
|
||||
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
|
||||
|
@ -914,7 +921,7 @@ static const u32 gen7_0_0_dpm_registers[] = {
|
|||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_0_0_dpm_registers), 8));
|
||||
|
||||
static struct gen7_reg_list gen7_0_0_reg_list[] = {
|
||||
static const struct gen7_reg_list gen7_0_0_reg_list[] = {
|
||||
{ gen7_0_0_gpu_registers, NULL },
|
||||
{ gen7_0_0_cx_misc_registers, NULL },
|
||||
{ gen7_0_0_dpm_registers, NULL },
|
||||
|
|
|
@ -95,7 +95,7 @@ static const u32 gen7_2_0_debugbus_blocks[] = {
|
|||
A7XX_DBGBUS_CCHE_2,
|
||||
};
|
||||
|
||||
static struct gen7_shader_block gen7_2_0_shader_blocks[] = {
|
||||
static const struct gen7_shader_block gen7_2_0_shader_blocks[] = {
|
||||
{A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
{A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
|
||||
|
@ -489,7 +489,7 @@ static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
|
|||
.val = 0x9,
|
||||
};
|
||||
|
||||
static struct gen7_cluster_registers gen7_2_0_clusters[] = {
|
||||
static const struct gen7_cluster_registers gen7_2_0_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
|
||||
gen7_2_0_noncontext_pipe_br_registers, },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
|
||||
|
@ -558,7 +558,7 @@ static struct gen7_cluster_registers gen7_2_0_clusters[] = {
|
|||
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
|
||||
};
|
||||
|
||||
static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
|
||||
static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
|
||||
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
|
||||
|
@ -573,6 +573,8 @@ static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
|
|||
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
|
||||
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
|
||||
gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
|
||||
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
|
||||
{ A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
|
||||
|
@ -737,7 +739,7 @@ static const u32 gen7_2_0_dpm_registers[] = {
|
|||
};
|
||||
static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_registers), 8));
|
||||
|
||||
static struct gen7_reg_list gen7_2_0_reg_list[] = {
|
||||
static const struct gen7_reg_list gen7_2_0_reg_list[] = {
|
||||
{ gen7_2_0_gpu_registers, NULL },
|
||||
{ gen7_2_0_cx_misc_registers, NULL },
|
||||
{ gen7_2_0_dpm_registers, NULL },
|
||||
|
|
|
@ -117,7 +117,7 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = {
|
|||
A7XX_DBGBUS_GBIF_CX,
|
||||
};
|
||||
|
||||
static struct gen7_shader_block gen7_9_0_shader_blocks[] = {
|
||||
static const struct gen7_shader_block gen7_9_0_shader_blocks[] = {
|
||||
{ A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
|
||||
{ A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
|
||||
{ A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
|
||||
|
@ -1116,7 +1116,7 @@ static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = {
|
|||
.val = 0x9,
|
||||
};
|
||||
|
||||
static struct gen7_cluster_registers gen7_9_0_clusters[] = {
|
||||
static const struct gen7_cluster_registers gen7_9_0_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
|
||||
gen7_9_0_non_context_pipe_br_registers, },
|
||||
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
|
||||
|
@ -1185,7 +1185,7 @@ static struct gen7_cluster_registers gen7_9_0_clusters[] = {
|
|||
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
|
||||
};
|
||||
|
||||
static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
|
||||
static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
|
||||
gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00},
|
||||
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
|
||||
|
@ -1294,34 +1294,34 @@ static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
|
|||
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
|
||||
};
|
||||
|
||||
static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
|
||||
static const struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
|
||||
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
|
||||
REG_A6XX_CP_SQE_STAT_DATA, 0x00040},
|
||||
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
|
||||
REG_A6XX_CP_DRAW_STATE_DATA, 0x00200},
|
||||
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
{ "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
|
||||
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
|
||||
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
{ "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
|
||||
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
|
||||
{ "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
|
||||
{ "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
|
||||
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
|
||||
{ "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
|
||||
{ "CP_BV_ROQ_DBG", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
|
||||
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
|
||||
{ "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
|
||||
{ "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
|
||||
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
|
||||
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
|
||||
{ "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
|
||||
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
|
||||
{ "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
|
||||
{ "CP_RESOURCE_TABLE_DBG", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
|
||||
REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100},
|
||||
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
|
||||
{ "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
|
||||
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
|
||||
{ "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
|
||||
{ "CP_LPAC_ROQ_DBG", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
|
||||
REG_A7XX_CP_LPAC_ROQ_DBG_DATA, 0x00200},
|
||||
{ "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
|
||||
{ "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
|
||||
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x08000},
|
||||
{ "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
|
||||
{ "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
|
||||
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x00040},
|
||||
{ "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
|
||||
{ "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
|
||||
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x00040},
|
||||
{ "CP_AQE_ROQ_0", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0,
|
||||
REG_A7XX_CP_AQE_ROQ_DBG_DATA_0, 0x00100},
|
||||
|
@ -1337,7 +1337,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
|
|||
REG_A7XX_CP_AQE_STAT_DATA_1, 0x00040},
|
||||
};
|
||||
|
||||
static struct gen7_reg_list gen7_9_0_reg_list[] = {
|
||||
static const struct gen7_reg_list gen7_9_0_reg_list[] = {
|
||||
{ gen7_9_0_gpu_registers, NULL},
|
||||
{ gen7_9_0_cx_misc_registers, NULL},
|
||||
{ gen7_9_0_cx_dbgc_registers, NULL},
|
||||
|
|
|
@ -596,7 +596,7 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
|
|||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (dpu_crtc->event) {
|
||||
DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
|
||||
DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name,
|
||||
dpu_crtc->event);
|
||||
trace_dpu_crtc_complete_flip(DRMID(crtc));
|
||||
drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
|
||||
|
|
|
@ -730,6 +730,8 @@ bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_st
|
|||
return false;
|
||||
|
||||
conn_state = drm_atomic_get_new_connector_state(state, connector);
|
||||
if (!conn_state)
|
||||
return false;
|
||||
|
||||
/**
|
||||
* These checks are duplicated from dpu_encoder_update_topology() since
|
||||
|
|
|
@ -31,14 +31,14 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
|
|||
u32 base;
|
||||
|
||||
if (!ctx) {
|
||||
DRM_ERROR("invalid ctx %pK\n", ctx);
|
||||
DRM_ERROR("invalid ctx %p\n", ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
base = ctx->cap->sblk->pcc.base;
|
||||
|
||||
if (!base) {
|
||||
DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
|
||||
DRM_ERROR("invalid ctx %p pcc base 0x%x\n", ctx, base);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1345,7 +1345,7 @@ static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
|
|||
dpu_kms->mmio = NULL;
|
||||
return ret;
|
||||
}
|
||||
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
|
||||
DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
|
||||
|
||||
dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev,
|
||||
dpu_kms->pdev,
|
||||
|
@ -1380,7 +1380,7 @@ static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
|
|||
dpu_kms->mmio = NULL;
|
||||
return ret;
|
||||
}
|
||||
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
|
||||
DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
|
||||
|
||||
dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
|
||||
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
|
||||
|
|
|
@ -1129,7 +1129,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
|
|||
struct drm_plane_state *old_plane_state =
|
||||
drm_atomic_get_old_plane_state(state, plane);
|
||||
struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_crtc_state *crtc_state = NULL;
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(plane_state))
|
||||
|
@ -1162,7 +1162,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
|
|||
if (!old_plane_state || !old_plane_state->fb ||
|
||||
old_plane_state->src_w != plane_state->src_w ||
|
||||
old_plane_state->src_h != plane_state->src_h ||
|
||||
old_plane_state->src_w != plane_state->src_w ||
|
||||
old_plane_state->crtc_w != plane_state->crtc_w ||
|
||||
old_plane_state->crtc_h != plane_state->crtc_h ||
|
||||
msm_framebuffer_format(old_plane_state->fb) !=
|
||||
msm_framebuffer_format(plane_state->fb))
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_clock.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <dt-bindings/phy/phy.h>
|
||||
|
||||
#include "dsi_phy.h"
|
||||
|
@ -511,30 +513,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
|
||||
{
|
||||
struct device *dev = &phy->pdev->dev;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_prepare_enable(phy->ahb_clk);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
|
||||
{
|
||||
clk_disable_unprepare(phy->ahb_clk);
|
||||
pm_runtime_put(&phy->pdev->dev);
|
||||
}
|
||||
|
||||
static const struct of_device_id dsi_phy_dt_match[] = {
|
||||
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
|
||||
{ .compatible = "qcom,dsi-phy-28nm-hpm",
|
||||
|
@ -698,22 +676,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
phy->ahb_clk = msm_clk_get(pdev, "iface");
|
||||
if (IS_ERR(phy->ahb_clk))
|
||||
return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
|
||||
"Unable to get ahb clk\n");
|
||||
platform_set_drvdata(pdev, phy);
|
||||
|
||||
ret = devm_pm_runtime_enable(&pdev->dev);
|
||||
ret = devm_pm_runtime_enable(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PLL init will call into clk_register which requires
|
||||
* register access, so we need to enable power and ahb clock.
|
||||
*/
|
||||
ret = dsi_phy_enable_resource(phy);
|
||||
ret = devm_pm_clk_create(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pm_clk_add(dev, "iface");
|
||||
if (ret < 0)
|
||||
return dev_err_probe(dev, ret, "Unable to get iface clk\n");
|
||||
|
||||
if (phy->cfg->ops.pll_init) {
|
||||
ret = phy->cfg->ops.pll_init(phy);
|
||||
if (ret)
|
||||
|
@ -727,18 +703,19 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
|
|||
return dev_err_probe(dev, ret,
|
||||
"Failed to register clk provider\n");
|
||||
|
||||
dsi_phy_disable_resource(phy);
|
||||
|
||||
platform_set_drvdata(pdev, phy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops dsi_phy_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
|
||||
};
|
||||
|
||||
static struct platform_driver dsi_phy_platform_driver = {
|
||||
.probe = dsi_phy_driver_probe,
|
||||
.driver = {
|
||||
.name = "msm_dsi_phy",
|
||||
.of_match_table = dsi_phy_dt_match,
|
||||
.pm = &dsi_phy_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -764,9 +741,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
|
|||
|
||||
dev = &phy->pdev->dev;
|
||||
|
||||
ret = dsi_phy_enable_resource(phy);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
|
||||
DRM_DEV_ERROR(dev, "%s: resume failed, %d\n",
|
||||
__func__, ret);
|
||||
goto res_en_fail;
|
||||
}
|
||||
|
@ -810,7 +787,7 @@ pll_restor_fail:
|
|||
phy_en_fail:
|
||||
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
|
||||
reg_en_fail:
|
||||
dsi_phy_disable_resource(phy);
|
||||
pm_runtime_put(dev);
|
||||
res_en_fail:
|
||||
return ret;
|
||||
}
|
||||
|
@ -823,7 +800,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
|
|||
phy->cfg->ops.disable(phy);
|
||||
|
||||
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
|
||||
dsi_phy_disable_resource(phy);
|
||||
pm_runtime_put(&phy->pdev->dev);
|
||||
}
|
||||
|
||||
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
|
||||
|
|
|
@ -104,7 +104,6 @@ struct msm_dsi_phy {
|
|||
phys_addr_t lane_size;
|
||||
int id;
|
||||
|
||||
struct clk *ahb_clk;
|
||||
struct regulator_bulk_data *supplies;
|
||||
|
||||
struct msm_dsi_dphy_timing timing;
|
||||
|
|
|
@ -325,25 +325,28 @@ static struct drm_info_list msm_debugfs_list[] = {
|
|||
|
||||
static int late_init_minor(struct drm_minor *minor)
|
||||
{
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct drm_device *dev;
|
||||
struct msm_drm_private *priv;
|
||||
int ret;
|
||||
|
||||
if (!minor)
|
||||
return 0;
|
||||
|
||||
dev = minor->dev;
|
||||
priv = dev->dev_private;
|
||||
|
||||
if (!priv->gpu_pdev)
|
||||
return 0;
|
||||
|
||||
ret = msm_rd_debugfs_init(minor);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
|
||||
DRM_DEV_ERROR(dev->dev, "could not install rd debugfs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = msm_perf_debugfs_init(minor);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
|
||||
DRM_DEV_ERROR(dev->dev, "could not install perf debugfs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,6 @@ void msm_gem_vma_get(struct drm_gem_object *obj)
|
|||
void msm_gem_vma_put(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
struct drm_exec exec;
|
||||
|
||||
if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
|
||||
return;
|
||||
|
@ -103,9 +102,13 @@ void msm_gem_vma_put(struct drm_gem_object *obj)
|
|||
if (!priv->kms)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_DRM_MSM_KMS
|
||||
struct drm_exec exec;
|
||||
|
||||
msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
|
||||
put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
|
||||
drm_exec_fini(&exec); /* drop locks */
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -663,9 +666,13 @@ int msm_gem_set_iova(struct drm_gem_object *obj,
|
|||
|
||||
static bool is_kms_vm(struct drm_gpuvm *vm)
|
||||
{
|
||||
#ifdef CONFIG_DRM_MSM_KMS
|
||||
struct msm_drm_private *priv = vm->drm->dev_private;
|
||||
|
||||
return priv->kms && (priv->kms->vm == vm);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1113,10 +1120,12 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
|
|||
put_pages(obj);
|
||||
}
|
||||
|
||||
if (msm_obj->flags & MSM_BO_NO_SHARE) {
|
||||
if (obj->resv != &obj->_resv) {
|
||||
struct drm_gem_object *r_obj =
|
||||
container_of(obj->resv, struct drm_gem_object, _resv);
|
||||
|
||||
WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
|
||||
|
||||
/* Drop reference we hold to shared resv obj: */
|
||||
drm_gem_object_put(r_obj);
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ struct msm_gem_vm {
|
|||
*
|
||||
* Only used for kernel managed VMs, unused for user managed VMs.
|
||||
*
|
||||
* Protected by @mm_lock.
|
||||
* Protected by vm lock. See msm_gem_lock_vm_and_obj(), for ex.
|
||||
*/
|
||||
struct drm_mm mm;
|
||||
|
||||
|
|
|
@ -271,32 +271,37 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int submit_lock_objects_vmbind(struct msm_gem_submit *submit)
|
||||
{
|
||||
unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES;
|
||||
struct drm_exec *exec = &submit->exec;
|
||||
int ret = 0;
|
||||
|
||||
drm_exec_init(&submit->exec, flags, submit->nr_bos);
|
||||
|
||||
drm_exec_until_all_locked (&submit->exec) {
|
||||
ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
|
||||
drm_exec_retry_on_contention(exec);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
|
||||
drm_exec_retry_on_contention(exec);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This is where we make sure all the bo's are reserved and pin'd: */
|
||||
static int submit_lock_objects(struct msm_gem_submit *submit)
|
||||
{
|
||||
unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
|
||||
struct drm_exec *exec = &submit->exec;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (msm_context_is_vmbind(submit->queue->ctx)) {
|
||||
flags |= DRM_EXEC_IGNORE_DUPLICATES;
|
||||
|
||||
drm_exec_init(&submit->exec, flags, submit->nr_bos);
|
||||
|
||||
drm_exec_until_all_locked (&submit->exec) {
|
||||
ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
|
||||
drm_exec_retry_on_contention(exec);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
|
||||
drm_exec_retry_on_contention(exec);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
if (msm_context_is_vmbind(submit->queue->ctx))
|
||||
return submit_lock_objects_vmbind(submit);
|
||||
|
||||
drm_exec_init(&submit->exec, flags, submit->nr_bos);
|
||||
|
||||
|
@ -305,17 +310,17 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
|
|||
drm_gpuvm_resv_obj(submit->vm));
|
||||
drm_exec_retry_on_contention(&submit->exec);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
for (unsigned i = 0; i < submit->nr_bos; i++) {
|
||||
struct drm_gem_object *obj = submit->bos[i].obj;
|
||||
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
|
||||
drm_exec_retry_on_contention(&submit->exec);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int submit_fence_sync(struct msm_gem_submit *submit)
|
||||
|
@ -514,14 +519,15 @@ out:
|
|||
*/
|
||||
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
|
||||
{
|
||||
if (error)
|
||||
submit_unpin_objects(submit);
|
||||
|
||||
if (submit->exec.objects)
|
||||
drm_exec_fini(&submit->exec);
|
||||
|
||||
if (error) {
|
||||
submit_unpin_objects(submit);
|
||||
/* job wasn't enqueued to scheduler, so early retirement: */
|
||||
/* if job wasn't enqueued to scheduler, early retirement: */
|
||||
if (error)
|
||||
msm_submit_retire(submit);
|
||||
}
|
||||
}
|
||||
|
||||
void msm_submit_retire(struct msm_gem_submit *submit)
|
||||
|
@ -769,12 +775,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
|
||||
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
|
||||
sync_file = sync_file_create(submit->user_fence);
|
||||
if (!sync_file) {
|
||||
if (!sync_file)
|
||||
ret = -ENOMEM;
|
||||
} else {
|
||||
fd_install(out_fence_fd, sync_file->file);
|
||||
args->fence_fd = out_fence_fd;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@ -812,10 +814,14 @@ out:
|
|||
out_unlock:
|
||||
mutex_unlock(&queue->lock);
|
||||
out_post_unlock:
|
||||
if (ret && (out_fence_fd >= 0)) {
|
||||
put_unused_fd(out_fence_fd);
|
||||
if (ret) {
|
||||
if (out_fence_fd >= 0)
|
||||
put_unused_fd(out_fence_fd);
|
||||
if (sync_file)
|
||||
fput(sync_file->file);
|
||||
} else if (sync_file) {
|
||||
fd_install(out_fence_fd, sync_file->file);
|
||||
args->fence_fd = out_fence_fd;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(submit)) {
|
||||
|
|
|
@ -319,13 +319,10 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
|
|||
mutex_lock(&vm->mmu_lock);
|
||||
|
||||
/*
|
||||
* NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
|
||||
* NOTE: if not using pgtable preallocation, we cannot hold
|
||||
* a lock across map/unmap which is also used in the job_run()
|
||||
* path, as this can cause deadlock in job_run() vs shrinker/
|
||||
* reclaim.
|
||||
*
|
||||
* Revisit this if we can come up with a scheme to pre-alloc pages
|
||||
* for the pgtable in map/unmap ops.
|
||||
*/
|
||||
ret = vm_map_op(vm, &(struct msm_vm_map_op){
|
||||
.iova = vma->va.addr,
|
||||
|
@ -454,6 +451,8 @@ msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
|
|||
struct op_arg {
|
||||
unsigned flags;
|
||||
struct msm_vm_bind_job *job;
|
||||
const struct msm_vm_bind_op *op;
|
||||
bool kept;
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -475,14 +474,18 @@ vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
|
|||
}
|
||||
|
||||
static int
|
||||
msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
|
||||
msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
|
||||
{
|
||||
struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
|
||||
struct op_arg *arg = _arg;
|
||||
struct msm_vm_bind_job *job = arg->job;
|
||||
struct drm_gem_object *obj = op->map.gem.obj;
|
||||
struct drm_gpuva *vma;
|
||||
struct sg_table *sgt;
|
||||
unsigned prot;
|
||||
|
||||
if (arg->kept)
|
||||
return 0;
|
||||
|
||||
vma = vma_from_op(arg, &op->map);
|
||||
if (WARN_ON(IS_ERR(vma)))
|
||||
return PTR_ERR(vma);
|
||||
|
@ -602,15 +605,41 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
|
|||
}
|
||||
|
||||
static int
|
||||
msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
|
||||
msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
|
||||
{
|
||||
struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
|
||||
struct op_arg *arg = _arg;
|
||||
struct msm_vm_bind_job *job = arg->job;
|
||||
struct drm_gpuva *vma = op->unmap.va;
|
||||
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
|
||||
|
||||
vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
|
||||
vma->va.addr, vma->va.range);
|
||||
|
||||
/*
|
||||
* Detect in-place remap. Turnip does this to change the vma flags,
|
||||
* in particular MSM_VMA_DUMP. In this case we want to avoid actually
|
||||
* touching the page tables, as that would require synchronization
|
||||
* against SUBMIT jobs running on the GPU.
|
||||
*/
|
||||
if (op->unmap.keep &&
|
||||
(arg->op->op == MSM_VM_BIND_OP_MAP) &&
|
||||
(vma->gem.obj == arg->op->obj) &&
|
||||
(vma->gem.offset == arg->op->obj_offset) &&
|
||||
(vma->va.addr == arg->op->iova) &&
|
||||
(vma->va.range == arg->op->range)) {
|
||||
/* We are only expecting a single in-place unmap+map cb pair: */
|
||||
WARN_ON(arg->kept);
|
||||
|
||||
/* Leave the existing VMA in place, but signal that to the map cb: */
|
||||
arg->kept = true;
|
||||
|
||||
/* Only flags are changing, so update that in-place: */
|
||||
unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
|
||||
vma->flags = orig_flags | arg->flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!msm_vma->mapped)
|
||||
goto out_close;
|
||||
|
||||
|
@ -1271,6 +1300,7 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
|
|||
const struct msm_vm_bind_op *op = &job->ops[i];
|
||||
struct op_arg arg = {
|
||||
.job = job,
|
||||
.op = op,
|
||||
};
|
||||
|
||||
switch (op->op) {
|
||||
|
@ -1460,12 +1490,8 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
|
||||
if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
|
||||
sync_file = sync_file_create(job->fence);
|
||||
if (!sync_file) {
|
||||
if (!sync_file)
|
||||
ret = -ENOMEM;
|
||||
} else {
|
||||
fd_install(out_fence_fd, sync_file->file);
|
||||
args->fence_fd = out_fence_fd;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
@ -1494,10 +1520,14 @@ out:
|
|||
out_unlock:
|
||||
mutex_unlock(&queue->lock);
|
||||
out_post_unlock:
|
||||
if (ret && (out_fence_fd >= 0)) {
|
||||
put_unused_fd(out_fence_fd);
|
||||
if (ret) {
|
||||
if (out_fence_fd >= 0)
|
||||
put_unused_fd(out_fence_fd);
|
||||
if (sync_file)
|
||||
fput(sync_file->file);
|
||||
} else if (sync_file) {
|
||||
fd_install(out_fence_fd, sync_file->file);
|
||||
args->fence_fd = out_fence_fd;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(job)) {
|
||||
|
|
|
@ -465,6 +465,7 @@ static void recover_worker(struct kthread_work *work)
|
|||
struct msm_gem_submit *submit;
|
||||
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
|
||||
char *comm = NULL, *cmd = NULL;
|
||||
struct task_struct *task;
|
||||
int i;
|
||||
|
||||
mutex_lock(&gpu->lock);
|
||||
|
@ -482,16 +483,20 @@ static void recover_worker(struct kthread_work *work)
|
|||
|
||||
/* Increment the fault counts */
|
||||
submit->queue->faults++;
|
||||
if (submit->vm) {
|
||||
|
||||
task = get_pid_task(submit->pid, PIDTYPE_PID);
|
||||
if (!task)
|
||||
gpu->global_faults++;
|
||||
else {
|
||||
struct msm_gem_vm *vm = to_msm_vm(submit->vm);
|
||||
|
||||
vm->faults++;
|
||||
|
||||
/*
|
||||
* If userspace has opted-in to VM_BIND (and therefore userspace
|
||||
* management of the VM), faults mark the VM as unusuable. This
|
||||
* management of the VM), faults mark the VM as unusable. This
|
||||
* matches vulkan expectations (vulkan is the main target for
|
||||
* VM_BIND)
|
||||
* VM_BIND).
|
||||
*/
|
||||
if (!vm->managed)
|
||||
msm_gem_vm_unusable(submit->vm);
|
||||
|
@ -553,8 +558,15 @@ static void recover_worker(struct kthread_work *work)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ring->submit_lock, flags);
|
||||
list_for_each_entry(submit, &ring->submits, node)
|
||||
list_for_each_entry(submit, &ring->submits, node) {
|
||||
/*
|
||||
* If the submit uses an unusable vm make sure
|
||||
* we don't actually run it
|
||||
*/
|
||||
if (to_msm_vm(submit->vm)->unusable)
|
||||
submit->nr_cmds = 0;
|
||||
gpu->funcs->submit(gpu, submit);
|
||||
}
|
||||
spin_unlock_irqrestore(&ring->submit_lock, flags);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,9 @@
|
|||
struct msm_iommu {
|
||||
struct msm_mmu base;
|
||||
struct iommu_domain *domain;
|
||||
atomic_t pagetables;
|
||||
|
||||
struct mutex init_lock; /* protects pagetables counter and prr_page */
|
||||
int pagetables;
|
||||
struct page *prr_page;
|
||||
|
||||
struct kmem_cache *pt_cache;
|
||||
|
@ -227,7 +229,8 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
|
|||
* If this is the last attached pagetable for the parent,
|
||||
* disable TTBR0 in the arm-smmu driver
|
||||
*/
|
||||
if (atomic_dec_return(&iommu->pagetables) == 0) {
|
||||
mutex_lock(&iommu->init_lock);
|
||||
if (--iommu->pagetables == 0) {
|
||||
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
|
||||
|
||||
if (adreno_smmu->set_prr_bit) {
|
||||
|
@ -236,6 +239,7 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
|
|||
iommu->prr_page = NULL;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&iommu->init_lock);
|
||||
|
||||
free_io_pgtable_ops(pagetable->pgtbl_ops);
|
||||
kfree(pagetable);
|
||||
|
@ -568,9 +572,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
|
|||
* If this is the first pagetable that we've allocated, send it back to
|
||||
* the arm-smmu driver as a trigger to set up TTBR0
|
||||
*/
|
||||
if (atomic_inc_return(&iommu->pagetables) == 1) {
|
||||
mutex_lock(&iommu->init_lock);
|
||||
if (iommu->pagetables++ == 0) {
|
||||
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
|
||||
if (ret) {
|
||||
iommu->pagetables--;
|
||||
mutex_unlock(&iommu->init_lock);
|
||||
free_io_pgtable_ops(pagetable->pgtbl_ops);
|
||||
kfree(pagetable);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -595,6 +602,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
|
|||
adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&iommu->init_lock);
|
||||
|
||||
/* Needed later for TLB flush */
|
||||
pagetable->parent = parent;
|
||||
|
@ -730,7 +738,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
|
|||
iommu->domain = domain;
|
||||
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
|
||||
|
||||
atomic_set(&iommu->pagetables, 0);
|
||||
mutex_init(&iommu->init_lock);
|
||||
|
||||
ret = iommu_attach_device(iommu->domain, dev);
|
||||
if (ret) {
|
||||
|
|
|
@ -275,6 +275,12 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = msm_disp_snapshot_init(ddev);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = priv->kms_init(ddev);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "failed to load kms\n");
|
||||
|
@ -327,10 +333,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
|
|||
goto err_msm_uninit;
|
||||
}
|
||||
|
||||
ret = msm_disp_snapshot_init(ddev);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
|
||||
|
||||
drm_mode_config_reset(ddev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -423,7 +423,7 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
|
|||
if (IS_ERR(msm_mdss->mmio))
|
||||
return ERR_CAST(msm_mdss->mmio);
|
||||
|
||||
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
|
||||
dev_dbg(&pdev->dev, "mapped mdss address space @%p\n", msm_mdss->mmio);
|
||||
|
||||
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
|
||||
if (ret)
|
||||
|
|
|
@ -594,10 +594,14 @@ by a particular renderpass/blit.
|
|||
<reg32 offset="0x0600" name="DBGC_CFG_DBGBUS_SEL_A"/>
|
||||
<reg32 offset="0x0601" name="DBGC_CFG_DBGBUS_SEL_B"/>
|
||||
<reg32 offset="0x0602" name="DBGC_CFG_DBGBUS_SEL_C"/>
|
||||
<reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D">
|
||||
<reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A6XX">
|
||||
<bitfield high="7" low="0" name="PING_INDEX"/>
|
||||
<bitfield high="15" low="8" name="PING_BLK_SEL"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A7XX-">
|
||||
<bitfield high="7" low="0" name="PING_INDEX"/>
|
||||
<bitfield high="24" low="16" name="PING_BLK_SEL"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x0604" name="DBGC_CFG_DBGBUS_CNTLT">
|
||||
<bitfield high="5" low="0" name="TRACEEN"/>
|
||||
<bitfield high="14" low="12" name="GRANU"/>
|
||||
|
@ -3796,6 +3800,14 @@ by a particular renderpass/blit.
|
|||
<reg32 offset="0x0030" name="CFG_DBGBUS_TRACE_BUF2"/>
|
||||
</domain>
|
||||
|
||||
<domain name="A7XX_CX_DBGC" width="32">
|
||||
<!-- Bitfields shifted, but otherwise the same: -->
|
||||
<reg32 offset="0x0000" name="CFG_DBGBUS_SEL_A" variants="A7XX-">
|
||||
<bitfield high="7" low="0" name="PING_INDEX"/>
|
||||
<bitfield high="24" low="16" name="PING_BLK_SEL"/>
|
||||
</reg32>
|
||||
</domain>
|
||||
|
||||
<domain name="A6XX_CX_MISC" width="32" prefix="variant" varset="chip">
|
||||
<reg32 offset="0x0001" name="SYSTEM_CACHE_CNTL_0"/>
|
||||
<reg32 offset="0x0002" name="SYSTEM_CACHE_CNTL_1"/>
|
||||
|
|
|
@ -159,28 +159,28 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
|
|||
<bitfield name="RGB_SWAP" low="12" high="14" type="dsi_rgb_swap"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x00020" name="ACTIVE_H">
|
||||
<bitfield name="START" low="0" high="11" type="uint"/>
|
||||
<bitfield name="END" low="16" high="27" type="uint"/>
|
||||
<bitfield name="START" low="0" high="15" type="uint"/>
|
||||
<bitfield name="END" low="16" high="31" type="uint"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x00024" name="ACTIVE_V">
|
||||
<bitfield name="START" low="0" high="11" type="uint"/>
|
||||
<bitfield name="END" low="16" high="27" type="uint"/>
|
||||
<bitfield name="START" low="0" high="15" type="uint"/>
|
||||
<bitfield name="END" low="16" high="31" type="uint"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x00028" name="TOTAL">
|
||||
<bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
|
||||
<bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
|
||||
<bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
|
||||
<bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x0002c" name="ACTIVE_HSYNC">
|
||||
<bitfield name="START" low="0" high="11" type="uint"/>
|
||||
<bitfield name="END" low="16" high="27" type="uint"/>
|
||||
<bitfield name="START" low="0" high="15" type="uint"/>
|
||||
<bitfield name="END" low="16" high="31" type="uint"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x00030" name="ACTIVE_VSYNC_HPOS">
|
||||
<bitfield name="START" low="0" high="11" type="uint"/>
|
||||
<bitfield name="END" low="16" high="27" type="uint"/>
|
||||
<bitfield name="START" low="0" high="15" type="uint"/>
|
||||
<bitfield name="END" low="16" high="31" type="uint"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x00034" name="ACTIVE_VSYNC_VPOS">
|
||||
<bitfield name="START" low="0" high="11" type="uint"/>
|
||||
<bitfield name="END" low="16" high="27" type="uint"/>
|
||||
<bitfield name="START" low="0" high="15" type="uint"/>
|
||||
<bitfield name="END" low="16" high="31" type="uint"/>
|
||||
</reg32>
|
||||
|
||||
<reg32 offset="0x00038" name="CMD_DMA_CTRL">
|
||||
|
@ -209,8 +209,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
|
|||
<bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x00058" name="CMD_MDP_STREAM0_TOTAL">
|
||||
<bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
|
||||
<bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
|
||||
<bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
|
||||
<bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
|
||||
</reg32>
|
||||
<reg32 offset="0x0005c" name="CMD_MDP_STREAM1_CTRL">
|
||||
<bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
|
||||
#include <linux/soc/qcom/ubwc.h>
|
||||
|
||||
static const struct qcom_ubwc_cfg_data no_ubwc_data = {
|
||||
/* no UBWC, no HBB */
|
||||
};
|
||||
|
||||
static const struct qcom_ubwc_cfg_data msm8937_data = {
|
||||
.ubwc_enc_version = UBWC_1_0,
|
||||
.ubwc_dec_version = UBWC_1_0,
|
||||
|
@ -215,12 +219,20 @@ static const struct qcom_ubwc_cfg_data x1e80100_data = {
|
|||
};
|
||||
|
||||
static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
|
||||
{ .compatible = "qcom,apq8016", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,apq8026", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,apq8074", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,apq8096", .data = &msm8998_data },
|
||||
{ .compatible = "qcom,msm8917", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,msm8226", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,msm8916", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,msm8917", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,msm8937", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,msm8929", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,msm8939", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,msm8953", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,msm8956", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,msm8976", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,msm8956", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,msm8974", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,msm8976", .data = &no_ubwc_data },
|
||||
{ .compatible = "qcom,msm8996", .data = &msm8998_data },
|
||||
{ .compatible = "qcom,msm8998", .data = &msm8998_data },
|
||||
{ .compatible = "qcom,qcm2290", .data = &qcm2290_data, },
|
||||
|
@ -233,7 +245,10 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
|
|||
{ .compatible = "qcom,sc7280", .data = &sc7280_data, },
|
||||
{ .compatible = "qcom,sc8180x", .data = &sc8180x_data, },
|
||||
{ .compatible = "qcom,sc8280xp", .data = &sc8280xp_data, },
|
||||
{ .compatible = "qcom,sda660", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,sdm450", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,sdm630", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,sdm632", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,sdm636", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,sdm660", .data = &msm8937_data },
|
||||
{ .compatible = "qcom,sdm670", .data = &sdm670_data, },
|
||||
|
@ -246,6 +261,8 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
|
|||
{ .compatible = "qcom,sm6375", .data = &sm6350_data, },
|
||||
{ .compatible = "qcom,sm7125", .data = &sc7180_data },
|
||||
{ .compatible = "qcom,sm7150", .data = &sm7150_data, },
|
||||
{ .compatible = "qcom,sm7225", .data = &sm6350_data, },
|
||||
{ .compatible = "qcom,sm7325", .data = &sc7280_data, },
|
||||
{ .compatible = "qcom,sm8150", .data = &sm8150_data, },
|
||||
{ .compatible = "qcom,sm8250", .data = &sm8250_data, },
|
||||
{ .compatible = "qcom,sm8350", .data = &sm8350_data, },
|
||||
|
|
Loading…
Add table
Reference in a new issue