drm fixes for 6.15-rc2

tests:
 - Clean up struct drm_display_mode in various places
 
 i915:
 - Fix scanline offset for LNL+ and BMG+
 - Fix GVT unterminated-string-initialization build warning
 - Fix DP rate limit when sink doesn't support TPS4
 - Handle GDDR + ECC memory type detection
 - Fix VRR parameter change check
 - Fix fence not released on early probe errors
 - Disable render power gating during live selftests
 
 xe:
 - Add another BMG PCI ID
 - Fix UAFs on migration paths
 - Fix shift-out-of-bounds access on TLB invalidation
 - Ensure ccs_mode is correctly set on gt reset
 - Extend some HW workarounds to Xe3
 - Fix PM runtime get/put on sysfs files
 - Fix u64 division on 32b
 - Fix flickering due to missing L3 invalidations
 - Fix missing error code return
 
 amdgpu:
 - MES FW version caching fixes
 - Only use GTT as a fallback if we already have a backing store
 - dma_buf fix
 - IP discovery fix
 - Replay and PSR with VRR fix
 - DC FP fixes
 - eDP fixes
 - KIQ TLB invalidate fix
 - Enable dmem groups support
 - Allow pinning VRAM dma bufs if imports can do P2P
 - Workload profile fixes
 - Prevent possible division by 0 in fan handling
 
 amdkfd:
 - Queue reset fixes
 
 imagination:
 - Fix overflow
 - Fix use-after-free
 
 ivpu:
 - Fix suspend/resume
 
 nouveau:
 - Do not deref dangling pointer
 
 rockchip:
 - Set DP/HDMI registers correctly
 
 udmabuf:
 - Fix overflow
 
 virtgpu:
 - Set reservation lock on dma-buf import
 - Fix error handling in prepare_fb
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmf4hGMACgkQDHTzWXnE
 hr6c0Q//RlM4+kkTiNaqUQ1NtnzcYF6EaleluckxFKtANTjOshDCaOphDBfZKOVe
 TawxOoXcegMvdY+o7w2IDIaxRPgPQSPvXPzdEQoqEsuAqUE1tRLQMWQ6smhfznSu
 X2tn1O47e0pGwa9KRw98dbXvJdbrfuvTaoyvJIvAlxsiH0goQnhazG8uoVaQRzTM
 27JNdw3HlNP64LSZhoxIolKqDPY2HK63sSFixrlmFl+hTYzFLYUs3F7zo5mg5rdi
 pJI6vGEyJ+X8M40s3V964NBtSIUmpcZQQYAM/0pFuegrzq5Svv8F5iDdmdlGeEqK
 xBe5EA1PlKyOvniJZalwMoNvFvPGF6vZ7bNuwptU2DYpfCGg3GJZRHL0WNca0KmY
 P5VjsJqmRMLPJSKhKNqfJugfcP8hFk4GSk6xw+PRMgQNbeIQHPk8f3UqhSJENsyL
 fBTrhrGgns3514K9KPhIqNZuJEekEMEtiwuJZdYGhxG9GoR9Al8Yv5CpH+7Asx/F
 dZ9sWThxGF82ZV8v8Q2Drw8Mco271dg5WxDrCDTbK3S5tyL8taJX8ktLSfXZ1Tli
 h2pQYfGhgp3enTCV72w6shbck1HsIq7FXDy1DwrngfoKjXv4kEhejotuXX3WSIjk
 EClBp8I9splWb6aO2T4IpHtNj6w72ymarSSSEdpIcSg9HfbQm2M=
 =8Pnf
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2025-04-11-1' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Weekly fixes, as expected it has a bit more in it than probably usual
  for rc2. amdgpu/xe/i915 lead the way with fixes all over for a bunch
  of other drivers. Nothing major stands out from what I can see.

  tests:
   - Clean up struct drm_display_mode in various places

  i915:
   - Fix scanline offset for LNL+ and BMG+
   - Fix GVT unterminated-string-initialization build warning
   - Fix DP rate limit when sink doesn't support TPS4
   - Handle GDDR + ECC memory type detection
   - Fix VRR parameter change check
   - Fix fence not released on early probe errors
   - Disable render power gating during live selftests

  xe:
   - Add another BMG PCI ID
   - Fix UAFs on migration paths
   - Fix shift-out-of-bounds access on TLB invalidation
   - Ensure ccs_mode is correctly set on gt reset
   - Extend some HW workarounds to Xe3
   - Fix PM runtime get/put on sysfs files
   - Fix u64 division on 32b
   - Fix flickering due to missing L3 invalidations
   - Fix missing error code return

  amdgpu:
   - MES FW version caching fixes
   - Only use GTT as a fallback if we already have a backing store
   - dma_buf fix
   - IP discovery fix
   - Replay and PSR with VRR fix
   - DC FP fixes
   - eDP fixes
   - KIQ TLB invalidate fix
   - Enable dmem groups support
   - Allow pinning VRAM dma bufs if imports can do P2P
   - Workload profile fixes
   - Prevent possible division by 0 in fan handling

  amdkfd:
   - Queue reset fixes

  imagination:
   - Fix overflow
   - Fix use-after-free

  ivpu:
   - Fix suspend/resume

  nouveau:
   - Do not deref dangling pointer

  rockchip:
   - Set DP/HDMI registers correctly

  udmabuf:
   - Fix overflow

  virtgpu:
   - Set reservation lock on dma-buf import
   - Fix error handling in prepare_fb"

* tag 'drm-fixes-2025-04-11-1' of https://gitlab.freedesktop.org/drm/kernel: (58 commits)
  drm/rockchip: dw_hdmi_qp: Fix io init for dw_hdmi_qp_rockchip_resume
  drm/rockchip: vop2: Fix interface enable/mux setting of DP1 on rk3588
  drm/amdgpu/mes12: optimize MES pipe FW version fetching
  drm/amd/pm/smu11: Prevent division by zero
  drm/amdgpu: cancel gfx idle work in device suspend for s0ix
  drm/amd/display: pause the workload setting in dm
  drm/amdgpu/pm/swsmu: implement pause workload profile
  drm/amdgpu/pm: add workload profile pause helper
  drm/i915/huc: Fix fence not released on early probe errors
  drm/i915/vrr: Add vrr.vsync_{start, end} in vrr_params_changed
  drm/tests: probe-helper: Fix drm_display_mode memory leak
  drm/tests: modes: Fix drm_display_mode memory leak
  drm/tests: modes: Fix drm_display_mode memory leak
  drm/tests: cmdline: Fix drm_display_mode memory leak
  drm/tests: modeset: Fix drm_display_mode memory leak
  drm/tests: modeset: Fix drm_display_mode memory leak
  drm/tests: helpers: Create kunit helper to destroy a drm_display_mode
  drm/xe: Restore EIO errno return when GuC PC start fails
  drm/xe: Invalidate L3 read-only cachelines for geometry streams too
  drm/xe: avoid plain 64-bit division
  ...
This commit is contained in:
Linus Torvalds 2025-04-10 20:30:06 -07:00
commit 900241a5cc
64 changed files with 543 additions and 197 deletions

View file

@ -332,7 +332,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si
return -EINVAL;
ret = ivpu_rpm_get(vdev);
if (ret)
if (ret < 0)
return ret;
ivpu_pm_trigger_recovery(vdev, "debugfs");
@ -383,7 +383,7 @@ static int dct_active_set(void *data, u64 active_percent)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
if (ret)
if (ret < 0)
return ret;
if (active_percent)

View file

@ -302,7 +302,8 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
struct ivpu_ipc_consumer cons;
int ret;
drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) &&
pm_runtime_enabled(vdev->drm.dev));
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);

View file

@ -4,6 +4,7 @@
*/
#include <drm/drm_file.h>
#include <linux/pm_runtime.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
@ -96,6 +101,8 @@ err_free_ms:
kfree(ms);
unlock:
mutex_unlock(&file_priv->ms_lock);
ivpu_rpm_put(vdev);
return ret;
}
@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
if (!args->metric_group_mask)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
unlock:
mutex_unlock(&file_priv->ms_lock);
ivpu_rpm_put(vdev);
return ret;
}
@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_stop *args = data;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
int ret;
if (!args->metric_group_mask)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file
mutex_unlock(&file_priv->ms_lock);
ivpu_rpm_put(vdev);
return ms ? 0 : -EINVAL;
}
@ -281,6 +300,9 @@ unlock:
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
{
struct ivpu_ms_instance *ms, *tmp;
struct ivpu_device *vdev = file_priv->vdev;
pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&file_priv->ms_lock);
@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)

View file

@ -393,7 +393,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf)
return -ENOMEM;
pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
pgoff_t subpgcnt;

View file

@ -353,7 +353,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000

View file

@ -3643,6 +3643,13 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev, adev->ip_blocks[i].version->type))
continue;
/* Since we skip suspend for S0i3, we need to cancel the delayed
* idle work here as the suspend callback never gets called.
*/
if (adev->in_s0ix &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
cancel_delayed_work_sync(&adev->gfx.idle_work);
/* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware

View file

@ -120,6 +120,8 @@ MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3

View file

@ -75,11 +75,25 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct dma_buf *dmabuf = attach->dmabuf;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
u32 domains = bo->preferred_domains;
/* pin buffer into GTT */
return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
dma_resv_assert_held(dmabuf->resv);
/*
* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
* support if all attachments can do P2P. If any attachment can't do
* P2P just pin into GTT instead.
*/
list_for_each_entry(attach, &dmabuf->attachments, node)
if (!attach->peer2peer)
domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
if (domains & AMDGPU_GEM_DOMAIN_VRAM)
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
return amdgpu_bo_pin(bo, domains);
}
/**
@ -134,9 +148,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
} else if (bo->tbo.resource->mem_type != TTM_PL_TT) {
return ERR_PTR(-EBUSY);
}
switch (bo->tbo.resource->mem_type) {
@ -184,7 +195,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
if (sgt->sgl->page_link) {
if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);

View file

@ -699,12 +699,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
int r;
int r, cnt = 0;
uint32_t seq;
/*
@ -761,10 +759,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
!amdgpu_reset_pending(adev->reset_domain)) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
}
} else
r = 0;
}
error_unlock_reset:

View file

@ -163,8 +163,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
!(adev->flags & AMD_IS_APU))
if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}

View file

@ -24,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@ -907,6 +908,9 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct ttm_resource_manager *man = &mgr->manager;
int err;
man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
if (IS_ERR(man->cg))
return PTR_ERR(man->cg);
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);

View file

@ -894,6 +894,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
{
int pipe;
/* return early if we have already fetched these */
if (adev->mes.sched_version && adev->mes.kiq_version)
return;
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);

View file

@ -1392,17 +1392,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
mes_v12_0_queue_init_register(ring);
}
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, 3, pipe, 0, 0);
if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) ||
((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) {
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, 3, pipe, 0, 0);
if (pipe == AMDGPU_MES_SCHED_PIPE)
adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
if (pipe == AMDGPU_MES_SCHED_PIPE)
adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
return 0;
}

View file

@ -1983,9 +1983,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4))
@ -2001,7 +1998,11 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
if (!amdgpu_sriov_vf(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;

View file

@ -1722,6 +1722,13 @@ static const struct dmi_system_id dmi_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
@ -1729,6 +1736,20 @@ static const struct dmi_system_id dmi_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
},
},
{
.callback = edp0_on_dp1_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
},
},
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};

View file

@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
*
* Panel Replay and PSR SU
* - Enable when:
* - VRR is disabled
* - vblank counter is disabled
* - entry is allowed: usermode demonstrates an adequate number of fast
* commits)
@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
bool is_sr_active = (link->replay_settings.replay_allow_active ||
link->psr_settings.psr_allow_active);
bool is_crc_window_active = false;
bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
is_crc_window_active =
amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
#endif
if (link->replay_settings.replay_feature_enabled &&
if (link->replay_settings.replay_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
amdgpu_dm_psr_disable(vblank_work->stream, false);
} else if (link->psr_settings.psr_feature_enabled &&
} else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
struct amdgpu_dm_connector *aconn =
@ -244,6 +246,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
struct amdgpu_device *adev = drm_to_adev(dm->ddev);
int r;
mutex_lock(&dm->dc_lock);
@ -271,8 +275,15 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
if (dm->active_vblank_irq_count == 0)
if (dm->active_vblank_irq_count == 0) {
r = amdgpu_dpm_pause_power_profile(adev, true);
if (r)
dev_warn(adev->dev, "failed to set default power profile mode\n");
dc_allow_idle_optimizations(dm->dc, true);
r = amdgpu_dpm_pause_power_profile(adev, false);
if (r)
dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);

View file

@ -86,6 +86,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
/* Store configuration options */
(*dml_ctx)->config = *config;
DC_FP_START();
/*Initialize SOCBB and DCNIP params */
dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
@ -96,6 +98,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
/*Initialize DML21 instance */
dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
DC_FP_END();
}
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
@ -283,11 +287,16 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
{
bool out = false;
DC_FP_START();
/* Use dml_validate_only for fast_validate path */
if (fast_validate) {
if (fast_validate)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
} else
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
DC_FP_END();
return out;
}
@ -426,8 +435,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming;
DC_FP_START();
/* need to initialize copied instance for internal references to be correct */
dml2_initialize_instance(&dst_dml_ctx->v21.dml_init);
DC_FP_END();
}
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,

View file

@ -732,11 +732,16 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
return out;
}
DC_FP_START();
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
out = dml2_validate_only(context);
else
out = dml2_validate_and_build_resource(in_dc, context);
DC_FP_END();
return out;
}
@ -779,11 +784,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
break;
}
DC_FP_START();
initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
DC_FP_END();
}
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)

View file

@ -429,6 +429,7 @@ struct amd_pm_funcs {
int (*set_pp_table)(void *handle, const char *buf, size_t size);
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
int (*pause_power_profile)(void *handle, bool pause);
/* export to amdgpu */
struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,

View file

@ -349,6 +349,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
return ret;
}
int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
bool pause)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (amdgpu_sriov_vf(adev))
return 0;
if (pp_funcs && pp_funcs->pause_power_profile) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->pause_power_profile(
adev->powerplay.pp_handle, pause);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate)
{

View file

@ -410,6 +410,8 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en);
int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
bool pause);
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);

View file

@ -2398,7 +2398,11 @@ static int smu_switch_power_profile(void *handle,
smu_power_profile_mode_get(smu, type);
else
smu_power_profile_mode_put(smu, type);
ret = smu_bump_power_profile_mode(smu, NULL, 0);
/* don't switch the active workload when paused */
if (smu->pause_workload)
ret = 0;
else
ret = smu_bump_power_profile_mode(smu, NULL, 0);
if (ret) {
if (enable)
smu_power_profile_mode_put(smu, type);
@ -2411,6 +2415,35 @@ static int smu_switch_power_profile(void *handle,
return 0;
}
static int smu_pause_power_profile(void *handle,
bool pause)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
int ret;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
smu->pause_workload = pause;
/* force to bootup default profile */
if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
ret = smu->ppt_funcs->set_power_profile_mode(smu,
workload_mask,
NULL,
0);
else
ret = smu_bump_power_profile_mode(smu, NULL, 0);
return ret;
}
return 0;
}
static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
@ -3733,6 +3766,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_pp_table = smu_sys_get_pp_table,
.set_pp_table = smu_sys_set_pp_table,
.switch_power_profile = smu_switch_power_profile,
.pause_power_profile = smu_pause_power_profile,
/* export to amdgpu */
.dispatch_tasks = smu_handle_dpm_task,
.load_firmware = smu_load_microcode,

View file

@ -558,6 +558,7 @@ struct smu_context {
/* asic agnostic workload mask */
uint32_t workload_mask;
bool pause_workload;
/* default/user workload preference */
uint32_t power_profile_mode;
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];

View file

@ -1204,7 +1204,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
if (speed == 0)
if (!speed || speed > UINT_MAX/8)
return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement

View file

@ -244,6 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = 4;
break;
case INTEL_DRAM_GDDR:
case INTEL_DRAM_GDDR_ECC:
qi->channel_width = 32;
break;
default:
@ -398,6 +399,12 @@ static const struct intel_sa_info xe2_hpd_sa_info = {
/* Other values not used by simplified algorithm */
};
static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
.derating = 45,
.deprogbwlimit = 53,
/* Other values not used by simplified algorithm */
};
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@ -740,10 +747,15 @@ static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
const struct dram_info *dram_info = &dev_priv->dram_info;
if (!HAS_DISPLAY(dev_priv))
return;
if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) &&
dram_info->type == INTEL_DRAM_GDDR_ECC)
xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info);
else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
else if (DISPLAY_VER(dev_priv) >= 14)
tgl_get_bw_info(dev_priv, &mtl_sa_info);

View file

@ -968,7 +968,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
}
static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,

View file

@ -172,10 +172,28 @@ int intel_dp_link_symbol_clock(int rate)
static int max_dprx_rate(struct intel_dp *intel_dp)
{
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int max_rate;
return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
max_rate = drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
else
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
/*
* Some broken eDP sinks illegally declare support for
* HBR3 without TPS4, and are unable to produce a stable
* output. Reject HBR3 when TPS4 is not available.
*/
if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
encoder->base.base.id, encoder->base.name);
max_rate = 540000;
}
return max_rate;
}
static int max_dprx_lane_count(struct intel_dp *intel_dp)
@ -4170,6 +4188,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
static void
intel_edp_set_sink_rates(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
intel_dp->num_sink_rates = 0;
if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
@ -4180,10 +4201,7 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
sink_rates, sizeof(sink_rates));
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
int val = le16_to_cpu(sink_rates[i]);
if (val == 0)
break;
int rate;
/* Value read multiplied by 200kHz gives the per-lane
* link rate in kHz. The source rates are, however,
@ -4191,7 +4209,24 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
* back to symbols is
* (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
*/
intel_dp->sink_rates[i] = (val * 200) / 10;
rate = le16_to_cpu(sink_rates[i]) * 200 / 10;
if (rate == 0)
break;
/*
* Some broken eDP sinks illegally declare support for
* HBR3 without TPS4, and are unable to produce a stable
* output. Reject HBR3 when TPS4 is not available.
*/
if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
encoder->base.base.id, encoder->base.name);
break;
}
intel_dp->sink_rates[i] = rate;
}
intel_dp->num_sink_rates = i;
}

View file

@ -222,7 +222,9 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
if (DISPLAY_VER(display) == 2)
if (DISPLAY_VER(display) >= 20 || display->platform.battlemage)
return 1;
else if (DISPLAY_VER(display) == 2)
return -1;
else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return 2;

View file

@ -117,21 +117,10 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
/*
* BSpec 52698 - Render powergating must be off.
* FIXME BSpec is outdated, disabling powergating for MTL is just
* temporary wa and should be removed after fixing real cause
* of forcewake timeouts.
*/
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
pg_enable =
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
else
pg_enable =
GEN9_RENDER_PG_ENABLE |
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
pg_enable =
GEN9_RENDER_PG_ENABLE |
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)

View file

@ -317,6 +317,11 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
void intel_huc_fini_late(struct intel_huc *huc)
{
delayed_huc_load_fini(huc);
}
#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
static int check_huc_loading_mode(struct intel_huc *huc)
{
@ -414,12 +419,6 @@ out:
void intel_huc_fini(struct intel_huc *huc)
{
/*
* the fence is initialized in init_early, so we need to clean it up
* even if HuC loading is off.
*/
delayed_huc_load_fini(huc);
if (huc->heci_pkt)
i915_vma_unpin_and_release(&huc->heci_pkt, 0);

View file

@ -55,6 +55,7 @@ struct intel_huc {
int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
void intel_huc_fini_late(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type);

View file

@ -136,6 +136,7 @@ void intel_uc_init_late(struct intel_uc *uc)
void intel_uc_driver_late_release(struct intel_uc *uc)
{
intel_huc_fini_late(&uc->huc);
}
/**

View file

@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
u8 *buf;
struct opregion_header *header;
struct vbt v;
const char opregion_signature[16] = OPREGION_SIGNATURE;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf;
memcpy(header->signature, opregion_signature,
sizeof(opregion_signature));
static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1);
memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature));
header->size = 0x8;
header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT;

View file

@ -305,6 +305,7 @@ struct drm_i915_private {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
INTEL_DRAM_GDDR_ECC,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;

View file

@ -23,7 +23,9 @@
#include <linux/random.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
#include "gt/uc/intel_gsc_fw.h"
#include "i915_driver.h"
@ -253,11 +255,27 @@ int i915_mock_selftests(void)
int i915_live_selftests(struct pci_dev *pdev)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
struct intel_uncore *uncore = &i915->uncore;
int err;
u32 pg_enable;
intel_wakeref_t wakeref;
if (!i915_selftest.live)
return 0;
/*
* FIXME Disable render powergating, this is temporary wa and should be removed
* after fixing real cause of forcewake timeouts.
*/
with_intel_runtime_pm(uncore->rpm, wakeref) {
if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 00), IP_VER(12, 74))) {
pg_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE);
if (pg_enable & GEN9_RENDER_PG_ENABLE)
intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
pg_enable & ~GEN9_RENDER_PG_ENABLE);
}
}
__wait_gsc_proxy_completed(i915);
__wait_gsc_huc_load_completed(i915);

View file

@ -687,6 +687,10 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
dram_info->type = INTEL_DRAM_GDDR;
break;
case 9:
drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
dram_info->type = INTEL_DRAM_GDDR_ECC;
break;
default:
MISSING_CASE(val);
return -EINVAL;

View file

@ -732,7 +732,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
fw_mem->core_data, fw_mem->core_code_alloc_size);
if (err)
goto err_free_fw_core_data_obj;
goto err_free_kdata;
memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
@ -742,10 +742,14 @@ pvr_fw_process(struct pvr_device *pvr_dev)
memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
/* We're finished with the firmware section memory on the CPU, unmap. */
if (fw_core_data_ptr)
if (fw_core_data_ptr) {
pvr_fw_object_vunmap(fw_mem->core_data_obj);
if (fw_core_code_ptr)
fw_core_data_ptr = NULL;
}
if (fw_core_code_ptr) {
pvr_fw_object_vunmap(fw_mem->core_code_obj);
fw_core_code_ptr = NULL;
}
pvr_fw_object_vunmap(fw_mem->data_obj);
fw_data_ptr = NULL;
pvr_fw_object_vunmap(fw_mem->code_obj);
@ -753,7 +757,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
if (err)
goto err_free_fw_core_data_obj;
goto err_free_kdata;
return 0;
@ -763,13 +767,16 @@ err_free_kdata:
kfree(fw_mem->data);
kfree(fw_mem->code);
err_free_fw_core_data_obj:
if (fw_core_data_ptr)
pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj);
pvr_fw_object_vunmap(fw_mem->core_data_obj);
if (fw_mem->core_data_obj)
pvr_fw_object_destroy(fw_mem->core_data_obj);
err_free_fw_core_code_obj:
if (fw_core_code_ptr)
pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj);
pvr_fw_object_vunmap(fw_mem->core_code_obj);
if (fw_mem->core_code_obj)
pvr_fw_object_destroy(fw_mem->core_code_obj);
err_free_fw_data_obj:
if (fw_data_ptr)
@ -836,6 +843,12 @@ pvr_fw_cleanup(struct pvr_device *pvr_dev)
struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
pvr_fw_fini_fwif_connection_ctl(pvr_dev);
kfree(fw_mem->core_data);
kfree(fw_mem->core_code);
kfree(fw_mem->data);
kfree(fw_mem->code);
if (fw_mem->core_code_obj)
pvr_fw_object_destroy(fw_mem->core_code_obj);
if (fw_mem->core_data_obj)

View file

@ -671,6 +671,13 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
geom_job->paired_job = frag_job;
frag_job->paired_job = geom_job;
/* The geometry job pvr_job structure is used when the fragment
* job is being prepared by the GPU scheduler. Have the fragment
* job hold a reference on the geometry job to prevent it being
* freed until the fragment job has finished with it.
*/
pvr_job_get(geom_job);
/* Skip the fragment job we just paired to the geometry job. */
i++;
}

View file

@ -866,6 +866,10 @@ static void pvr_queue_free_job(struct drm_sched_job *sched_job)
struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
drm_sched_job_cleanup(sched_job);
if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job)
pvr_job_put(job->paired_job);
job->paired_job = NULL;
pvr_job_put(job);
}

View file

@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
if (bo->base.import_attach)
drm_prime_gem_destroy(&bo->base, bo->sg);
/*
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.

View file

@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
}
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev);

View file

@ -94,6 +94,7 @@ struct rockchip_hdmi_qp {
struct gpio_desc *enable_gpio;
struct delayed_work hpd_work;
int port_id;
const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops;
};
struct rockchip_hdmi_qp_ctrl_ops {
@ -461,6 +462,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return -ENODEV;
}
hdmi->ctrl_ops = cfg->ctrl_ops;
hdmi->dev = &pdev->dev;
hdmi->port_id = -ENODEV;
@ -600,27 +602,8 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
u32 val;
val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
regmap_write(hdmi->vo_regmap,
hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
val);
val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
RK3588_SET_HPD_PATH_MASK);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
if (hdmi->port_id)
val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
RK3588_HDMI1_GRANT_SEL);
else
val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
RK3588_HDMI0_GRANT_SEL);
regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
hdmi->ctrl_ops->io_init(hdmi);
dw_hdmi_qp_resume(dev, hdmi->hdmi);

View file

@ -1754,9 +1754,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_DP1:
die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
die &= ~RK3588_SYS_DSP_INFACE_EN_DP1_MUX;
die |= RK3588_SYS_DSP_INFACE_EN_DP1 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP1_MUX, vp->id);
dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
break;

View file

@ -7,8 +7,6 @@ sti-drm-y := \
sti_compositor.o \
sti_crtc.o \
sti_plane.o \
sti_crtc.o \
sti_plane.o \
sti_hdmi.o \
sti_hdmi_tx3g4c28phy.o \
sti_dvo.o \

View file

@ -95,6 +95,9 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_TRUE(test,
drm_mode_parse_command_line_for_connector(cmdline,
connector,
@ -129,7 +132,8 @@ static void drm_test_pick_cmdline_named(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
const struct drm_display_mode *expected_mode, *mode;
const struct drm_display_mode *mode;
struct drm_display_mode *expected_mode;
const char *cmdline = params->cmdline;
int ret;
@ -149,6 +153,9 @@ static void drm_test_pick_cmdline_named(struct kunit *test)
expected_mode = params->func(drm);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
}

View file

@ -7,6 +7,7 @@
#include <kunit/test.h>
#include <drm/drm_connector.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
static const struct drm_connector no_connector = {};
@ -955,8 +956,15 @@ struct drm_cmdline_tv_option_test {
static void drm_test_cmdline_tv_options(struct kunit *test)
{
const struct drm_cmdline_tv_option_test *params = test->param_value;
const struct drm_display_mode *expected_mode = params->mode_fn(NULL);
struct drm_display_mode *expected_mode;
struct drm_cmdline_mode mode = { };
int ret;
expected_mode = params->mode_fn(NULL);
KUNIT_ASSERT_NOT_NULL(test, expected_mode);
ret = drm_kunit_add_mode_destroy_action(test, expected_mode);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
&no_connector, &mode));

View file

@ -278,6 +278,28 @@ static void kunit_action_drm_mode_destroy(void *ptr)
drm_mode_destroy(NULL, mode);
}
/**
* drm_kunit_add_mode_destroy_action() - Add a drm_destroy_mode kunit action
* @test: The test context object
* @mode: The drm_display_mode to destroy eventually
*
* Registers a kunit action that will destroy the drm_display_mode at
* the end of the test.
*
* If an error occurs, the drm_display_mode will be destroyed.
*
* Returns:
* 0 on success, an error code otherwise.
*/
int drm_kunit_add_mode_destroy_action(struct kunit *test,
struct drm_display_mode *mode)
{
return kunit_add_action_or_reset(test,
kunit_action_drm_mode_destroy,
mode);
}
EXPORT_SYMBOL_GPL(drm_kunit_add_mode_destroy_action);
/**
* drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object

View file

@ -40,6 +40,7 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
@ -47,6 +48,9 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
ret = drm_kunit_add_mode_destroy_action(test, mode);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
@ -70,6 +74,7 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
int ret;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_NTSC,
@ -77,9 +82,15 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
ret = drm_kunit_add_mode_destroy_action(test, expected);
KUNIT_ASSERT_EQ(test, ret, 0);
mode = drm_mode_analog_ntsc_480i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
ret = drm_kunit_add_mode_destroy_action(test, mode);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
@ -87,6 +98,7 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
@ -94,6 +106,9 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
ret = drm_kunit_add_mode_destroy_action(test, mode);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
@ -117,6 +132,7 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *expected, *mode;
int ret;
expected = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_PAL,
@ -124,9 +140,15 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, expected);
ret = drm_kunit_add_mode_destroy_action(test, expected);
KUNIT_ASSERT_EQ(test, ret, 0);
mode = drm_mode_analog_pal_576i(priv->drm);
KUNIT_ASSERT_NOT_NULL(test, mode);
ret = drm_kunit_add_mode_destroy_action(test, mode);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
}
@ -134,6 +156,7 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
struct drm_display_mode *mode;
int ret;
mode = drm_analog_tv_mode(priv->drm,
DRM_MODE_TV_MODE_MONOCHROME,
@ -141,6 +164,9 @@ static void drm_test_modes_analog_tv_mono_576i(struct kunit *test)
true);
KUNIT_ASSERT_NOT_NULL(test, mode);
ret = drm_kunit_add_mode_destroy_action(test, mode);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);

View file

@ -98,7 +98,7 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_display_mode *mode;
const struct drm_display_mode *expected;
struct drm_display_mode *expected;
size_t len;
int ret;
@ -134,6 +134,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
ret = drm_kunit_add_mode_destroy_action(test, expected);
KUNIT_ASSERT_EQ(test, ret, 0);
}
if (params->num_expected_modes >= 2) {
@ -145,6 +148,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
ret = drm_kunit_add_mode_destroy_action(test, expected);
KUNIT_ASSERT_EQ(test, ret, 0);
}
mutex_unlock(&priv->drm->mode_config.mutex);

View file

@ -115,13 +115,14 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
if (!vgdev->has_context_init)
virtio_gpu_create_context(obj->dev, file);
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
virtio_gpu_array_add_obj(objs, obj);
if (vfpriv->context_created) {
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
virtio_gpu_array_add_obj(objs, obj);
if (vfpriv->ctx_id)
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
}
out_notify:
virtio_gpu_notify(vgdev);

View file

@ -366,12 +366,6 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
obj = new_state->fb->obj[0];
if (obj->import_attach) {
ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
if (ret)
return ret;
}
if (bo->dumb || obj->import_attach) {
vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
vgdev->fence_drv.context,
@ -380,7 +374,21 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return -ENOMEM;
}
if (obj->import_attach) {
ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
if (ret)
goto err_fence;
}
return 0;
err_fence:
if (vgplane_st->fence) {
dma_fence_put(&vgplane_st->fence->f);
vgplane_st->fence = NULL;
}
return ret;
}
static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)

View file

@ -321,6 +321,7 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
obj = &bo->base.base;
obj->resv = buf->resv;
obj->funcs = &virtgpu_gem_dma_buf_funcs;
drm_gem_private_object_init(dev, obj, buf->size);

View file

@ -41,6 +41,7 @@
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
#define PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE BIT(10) /* gen12 */
#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH BIT(9) /* gen12 */
#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29)

View file

@ -585,6 +585,7 @@ struct xe_device {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
INTEL_DRAM_GDDR_ECC,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;

View file

@ -322,6 +322,13 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
return 0;
}
/*
* Ensure that roundup_pow_of_two(length) doesn't overflow.
* Note that roundup_pow_of_two() operates on unsigned long,
* not on u64.
*/
#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
/**
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
@ -346,6 +353,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_device *xe = gt_to_xe(gt);
#define MAX_TLB_INVALIDATION_LEN 7
u32 action[MAX_TLB_INVALIDATION_LEN];
u64 length = end - start;
int len = 0;
xe_gt_assert(gt, fence);
@ -358,11 +366,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
if (!xe->info.has_range_tlb_invalidation) {
if (!xe->info.has_range_tlb_invalidation ||
length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
} else {
u64 orig_start = start;
u64 length = end - start;
u64 align;
if (length < SZ_4K)

View file

@ -1070,6 +1070,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
ret = -EIO;
goto out;
}

View file

@ -389,12 +389,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
blit_cctl_val,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
/* Use Fixed slice CCS mode */
{ XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
RCU_MODE_FIXED_SLICE_CCS_MODE))
},
/* Disable WMTP if HW doesn't support it */
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
@ -461,6 +455,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
/* Use Fixed slice CCS mode */
{ XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
RCU_MODE_FIXED_SLICE_CCS_MODE))
},
};
xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);

View file

@ -32,14 +32,61 @@ bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max)
return timeout >= min && timeout <= max;
}
static void kobj_xe_hw_engine_release(struct kobject *kobj)
static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct xe_device *xe = kobj_to_xe(kobj);
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->show) {
xe_pm_runtime_get(xe);
ret = kattr->show(kobj, kattr, buf);
xe_pm_runtime_put(xe);
}
return ret;
}
static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf,
size_t count)
{
struct xe_device *xe = kobj_to_xe(kobj);
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->store) {
xe_pm_runtime_get(xe);
ret = kattr->store(kobj, kattr, buf, count);
xe_pm_runtime_put(xe);
}
return ret;
}
static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
.show = xe_hw_engine_class_sysfs_attr_show,
.store = xe_hw_engine_class_sysfs_attr_store,
};
static const struct kobj_type kobj_xe_hw_engine_type = {
.release = kobj_xe_hw_engine_release,
.sysfs_ops = &kobj_sysfs_ops
.release = xe_hw_engine_sysfs_kobj_release,
.sysfs_ops = &xe_hw_engine_class_sysfs_ops,
};
static const struct kobj_type kobj_xe_hw_engine_type_def = {
.release = xe_hw_engine_sysfs_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
static ssize_t job_timeout_max_store(struct kobject *kobj,
@ -543,7 +590,7 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
if (!kobj)
return -ENOMEM;
kobject_init(kobj, &kobj_xe_hw_engine_type);
kobject_init(kobj, &kobj_xe_hw_engine_type_def);
err = kobject_add(kobj, parent, "%s", ".defaults");
if (err)
goto err_object;
@ -559,57 +606,6 @@ err_object:
return err;
}
static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct xe_device *xe = kobj_to_xe(kobj);
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->show) {
xe_pm_runtime_get(xe);
ret = kattr->show(kobj, kattr, buf);
xe_pm_runtime_put(xe);
}
return ret;
}
static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf,
size_t count)
{
struct xe_device *xe = kobj_to_xe(kobj);
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->store) {
xe_pm_runtime_get(xe);
ret = kattr->store(kobj, kattr, buf, count);
xe_pm_runtime_put(xe);
}
return ret;
}
static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
.show = xe_hw_engine_class_sysfs_attr_show,
.store = xe_hw_engine_class_sysfs_attr_store,
};
static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
.release = xe_hw_engine_sysfs_kobj_release,
.sysfs_ops = &xe_hw_engine_class_sysfs_ops,
};
static void hw_engine_class_sysfs_fini(void *arg)
{
@ -640,7 +636,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
if (!kobj)
return -ENOMEM;
kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type);
kobject_init(kobj, &kobj_xe_hw_engine_type);
err = kobject_add(kobj, gt->sysfs, "engines");
if (err)

View file

@ -1177,7 +1177,7 @@ err:
err_sync:
/* Sync partial copies if any. FIXME: job_mutex? */
if (fence) {
dma_fence_wait(m->fence, false);
dma_fence_wait(fence, false);
dma_fence_put(fence);
}
@ -1547,7 +1547,7 @@ void xe_migrate_wait(struct xe_migrate *m)
static u32 pte_update_cmd_size(u64 size)
{
u32 num_dword;
u64 entries = DIV_ROUND_UP(size, XE_PAGE_SIZE);
u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
/*
@ -1558,7 +1558,7 @@ static u32 pte_update_cmd_size(u64 size)
* 2 dword for the page table's physical location
* 2*n dword for value of pte to fill (each pte entry is 2 dwords)
*/
num_dword = (1 + 2) * DIV_ROUND_UP(entries, 0x1ff);
num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff);
num_dword += entries * 2;
return num_dword;

View file

@ -137,7 +137,8 @@ emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset,
static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
int i)
{
u32 flags = PIPE_CONTROL_CS_STALL |
u32 flags0 = 0;
u32 flags1 = PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@ -148,11 +149,15 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
PIPE_CONTROL_STORE_DATA_INDEX;
if (invalidate_tlb)
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
flags &= ~mask_flags;
flags1 &= ~mask_flags;
return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
return emit_pipe_control(dw, i, flags0, flags1,
LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,

View file

@ -696,11 +696,14 @@ retry:
list_for_each_entry(block, blocks, link)
block->private = vr;
xe_bo_get(bo);
err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
&bo->devmem_allocation, ctx);
xe_bo_unlock(bo);
if (err)
xe_bo_put(bo); /* Creation ref */
xe_svm_devmem_release(&bo->devmem_allocation);
xe_bo_unlock(bo);
xe_bo_put(bo);
unlock:
mmap_read_unlock(mm);

View file

@ -32,8 +32,10 @@
GRAPHICS_VERSION(3001)
14022293748 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
22019794406 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)

View file

@ -118,6 +118,9 @@ drm_kunit_helper_create_crtc(struct kunit *test,
const struct drm_crtc_funcs *funcs,
const struct drm_crtc_helper_funcs *helper_funcs);
int drm_kunit_add_mode_destroy_action(struct kunit *test,
struct drm_display_mode *mode);
struct drm_display_mode *
drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
u8 video_code);

View file

@ -850,6 +850,7 @@
MACRO__(0xE20C, ## __VA_ARGS__), \
MACRO__(0xE20D, ## __VA_ARGS__), \
MACRO__(0xE210, ## __VA_ARGS__), \
MACRO__(0xE211, ## __VA_ARGS__), \
MACRO__(0xE212, ## __VA_ARGS__), \
MACRO__(0xE215, ## __VA_ARGS__), \
MACRO__(0xE216, ## __VA_ARGS__)