mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge tag 'drm-misc-fixes-2025-07-23' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes
drm-misc-fixes for v6.16-rc8/final?: - Revert all uses of drm_gem_object->dmabuf to drm_gem_object->import_attach->dmabuf. - Fix amdgpu returning BIOS cluttered VRAM after resume. - Scheduler hang fix. - Revert nouveau ioctl fix as it caused regressions. - Fix null pointer deref in nouveau. - Fix unnecessary semicolon in ti_sn_bridge_probe. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://lore.kernel.org/r/72235afd-c849-49fe-9cc1-2b1781abdf08@linux.intel.com
This commit is contained in:
commit
337666c522
16 changed files with 101 additions and 38 deletions
|
@ -5193,6 +5193,8 @@ exit:
|
|||
dev->dev->power.disable_depth--;
|
||||
#endif
|
||||
}
|
||||
|
||||
amdgpu_vram_mgr_clear_reset_blocks(adev);
|
||||
adev->in_suspend = false;
|
||||
|
||||
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
|
||||
|
|
|
@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
|
|||
uint64_t start, uint64_t size);
|
||||
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
||||
uint64_t start);
|
||||
void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
|
||||
|
||||
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
|
||||
struct ttm_resource *res);
|
||||
|
|
|
@ -782,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
|
|||
return atomic64_read(&mgr->vis_usage);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Reset the cleared drm buddy blocks.
|
||||
*/
|
||||
void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
|
||||
struct drm_buddy *mm = &mgr->mm;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
drm_buddy_reset_clear(mm, false);
|
||||
mutex_unlock(&mgr->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
|
||||
*
|
||||
|
|
|
@ -1373,7 +1373,7 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
|
|||
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
|
||||
HPD_DISABLE, 0);
|
||||
mutex_unlock(&pdata->comms_mutex);
|
||||
};
|
||||
}
|
||||
|
||||
drm_bridge_add(&pdata->bridge);
|
||||
|
||||
|
|
|
@ -404,6 +404,49 @@ drm_get_buddy(struct drm_buddy_block *block)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_get_buddy);
|
||||
|
||||
/**
|
||||
* drm_buddy_reset_clear - reset blocks clear state
|
||||
*
|
||||
* @mm: DRM buddy manager
|
||||
* @is_clear: blocks clear state
|
||||
*
|
||||
* Reset the clear state based on @is_clear value for each block
|
||||
* in the freelist.
|
||||
*/
|
||||
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
|
||||
{
|
||||
u64 root_size, size, start;
|
||||
unsigned int order;
|
||||
int i;
|
||||
|
||||
size = mm->size;
|
||||
for (i = 0; i < mm->n_roots; ++i) {
|
||||
order = ilog2(size) - ilog2(mm->chunk_size);
|
||||
start = drm_buddy_block_offset(mm->roots[i]);
|
||||
__force_merge(mm, start, start + size, order);
|
||||
|
||||
root_size = mm->chunk_size << order;
|
||||
size -= root_size;
|
||||
}
|
||||
|
||||
for (i = 0; i <= mm->max_order; ++i) {
|
||||
struct drm_buddy_block *block;
|
||||
|
||||
list_for_each_entry_reverse(block, &mm->free_list[i], link) {
|
||||
if (is_clear != drm_buddy_block_is_clear(block)) {
|
||||
if (is_clear) {
|
||||
mark_cleared(block);
|
||||
mm->clear_avail += drm_buddy_block_size(mm, block);
|
||||
} else {
|
||||
clear_reset(block);
|
||||
mm->clear_avail -= drm_buddy_block_size(mm, block);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_buddy_reset_clear);
|
||||
|
||||
/**
|
||||
* drm_buddy_free_block - free a block
|
||||
*
|
||||
|
|
|
@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
|
|||
|
||||
if (drm_gem_is_imported(gem_obj)) {
|
||||
if (dma_obj->vaddr)
|
||||
dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map);
|
||||
dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
|
||||
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
|
||||
} else if (dma_obj->vaddr) {
|
||||
if (dma_obj->map_noncoherent)
|
||||
|
|
|
@ -419,6 +419,7 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap);
|
|||
static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
|
||||
unsigned int num_planes)
|
||||
{
|
||||
struct dma_buf_attachment *import_attach;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
|
@ -427,9 +428,10 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
|
|||
obj = drm_gem_fb_get_obj(fb, num_planes);
|
||||
if (!obj)
|
||||
continue;
|
||||
import_attach = obj->import_attach;
|
||||
if (!drm_gem_is_imported(obj))
|
||||
continue;
|
||||
ret = dma_buf_end_cpu_access(obj->dma_buf, dir);
|
||||
ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
|
||||
if (ret)
|
||||
drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
|
||||
ret, num_planes, dir);
|
||||
|
@ -452,6 +454,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
|
|||
*/
|
||||
int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_buf_attachment *import_attach;
|
||||
struct drm_gem_object *obj;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
@ -462,9 +465,10 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
|
|||
ret = -EINVAL;
|
||||
goto err___drm_gem_fb_end_cpu_access;
|
||||
}
|
||||
import_attach = obj->import_attach;
|
||||
if (!drm_gem_is_imported(obj))
|
||||
continue;
|
||||
ret = dma_buf_begin_cpu_access(obj->dma_buf, dir);
|
||||
ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
|
||||
if (ret)
|
||||
goto err___drm_gem_fb_end_cpu_access;
|
||||
}
|
||||
|
|
|
@ -349,7 +349,7 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
|
|||
int ret = 0;
|
||||
|
||||
if (drm_gem_is_imported(obj)) {
|
||||
ret = dma_buf_vmap(obj->dma_buf, map);
|
||||
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
|
||||
} else {
|
||||
pgprot_t prot = PAGE_KERNEL;
|
||||
|
||||
|
@ -409,7 +409,7 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
|
|||
struct drm_gem_object *obj = &shmem->base;
|
||||
|
||||
if (drm_gem_is_imported(obj)) {
|
||||
dma_buf_vunmap(obj->dma_buf, map);
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, map);
|
||||
} else {
|
||||
dma_resv_assert_held(shmem->base.resv);
|
||||
|
||||
|
|
|
@ -453,7 +453,13 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
|
|||
}
|
||||
|
||||
mutex_lock(&dev->object_name_lock);
|
||||
/* re-export the original imported/exported object */
|
||||
/* re-export the original imported object */
|
||||
if (obj->import_attach) {
|
||||
dmabuf = obj->import_attach->dmabuf;
|
||||
get_dma_buf(dmabuf);
|
||||
goto out_have_obj;
|
||||
}
|
||||
|
||||
if (obj->dma_buf) {
|
||||
get_dma_buf(obj->dma_buf);
|
||||
dmabuf = obj->dma_buf;
|
||||
|
|
|
@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
|
|||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
|
||||
|
||||
if (etnaviv_obj->vaddr)
|
||||
dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map);
|
||||
dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
|
||||
|
||||
/* Don't drop the pages for imported dmabuf, as they are not
|
||||
* ours, just free the array we allocated:
|
||||
|
@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
|
|||
|
||||
lockdep_assert_held(&etnaviv_obj->lock);
|
||||
|
||||
ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map);
|
||||
ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
|
||||
if (ret)
|
||||
return NULL;
|
||||
return map.vaddr;
|
||||
|
|
|
@ -1284,9 +1284,6 @@ nouveau_ioctls[] = {
|
|||
DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_NVIF _IOC(_IOC_READ | _IOC_WRITE, DRM_IOCTL_BASE, \
|
||||
DRM_COMMAND_BASE + DRM_NOUVEAU_NVIF, 0)
|
||||
|
||||
long
|
||||
nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -1300,10 +1297,14 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if ((cmd & ~IOCSIZE_MASK) == DRM_IOCTL_NOUVEAU_NVIF)
|
||||
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
|
||||
case DRM_NOUVEAU_NVIF:
|
||||
ret = nouveau_abi16_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
|
||||
else
|
||||
break;
|
||||
default:
|
||||
ret = drm_ioctl(file, cmd, arg);
|
||||
break;
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
|
|
@ -39,6 +39,9 @@ nvif_chan_gpfifo_post(struct nvif_chan *chan)
|
|||
const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
|
||||
const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
|
||||
|
||||
if (!chan->func->gpfifo.post)
|
||||
return 0;
|
||||
|
||||
return chan->func->gpfifo.post(chan, gpptr, pbptr);
|
||||
}
|
||||
|
||||
|
|
|
@ -355,17 +355,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_destroy);
|
||||
|
||||
/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
|
||||
static void drm_sched_entity_clear_dep(struct dma_fence *f,
|
||||
struct dma_fence_cb *cb)
|
||||
{
|
||||
struct drm_sched_entity *entity =
|
||||
container_of(cb, struct drm_sched_entity, cb);
|
||||
|
||||
entity->dependency = NULL;
|
||||
dma_fence_put(f);
|
||||
}
|
||||
|
||||
/*
|
||||
* drm_sched_entity_wakeup - callback to clear the entity's dependency and
|
||||
* wake up the scheduler
|
||||
|
@ -376,7 +365,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
|
|||
struct drm_sched_entity *entity =
|
||||
container_of(cb, struct drm_sched_entity, cb);
|
||||
|
||||
drm_sched_entity_clear_dep(f, cb);
|
||||
entity->dependency = NULL;
|
||||
dma_fence_put(f);
|
||||
drm_sched_wakeup(entity->rq->sched);
|
||||
}
|
||||
|
||||
|
@ -429,13 +419,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
|
|||
fence = dma_fence_get(&s_fence->scheduled);
|
||||
dma_fence_put(entity->dependency);
|
||||
entity->dependency = fence;
|
||||
if (!dma_fence_add_callback(fence, &entity->cb,
|
||||
drm_sched_entity_clear_dep))
|
||||
return true;
|
||||
|
||||
/* Ignore it when it is already scheduled */
|
||||
dma_fence_put(fence);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
|
||||
|
|
|
@ -204,15 +204,16 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
|
|||
{
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
|
||||
struct dma_buf_attachment *attach = obj->import_attach;
|
||||
|
||||
if (drm_gem_is_imported(obj)) {
|
||||
struct dma_buf *dmabuf = obj->dma_buf;
|
||||
struct dma_buf *dmabuf = attach->dmabuf;
|
||||
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
virtgpu_dma_buf_unmap(bo);
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
|
||||
dma_buf_detach(dmabuf, obj->import_attach);
|
||||
dma_buf_detach(dmabuf, attach);
|
||||
dma_buf_put(dmabuf);
|
||||
}
|
||||
|
||||
|
|
|
@ -85,10 +85,10 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
|||
int ret;
|
||||
|
||||
if (drm_gem_is_imported(obj)) {
|
||||
ret = dma_buf_vmap(obj->dma_buf, map);
|
||||
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
|
||||
if (!ret) {
|
||||
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
|
||||
dma_buf_vunmap(obj->dma_buf, map);
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, map);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
|||
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
{
|
||||
if (drm_gem_is_imported(obj))
|
||||
dma_buf_vunmap(obj->dma_buf, map);
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, map);
|
||||
else
|
||||
drm_gem_ttm_vunmap(obj, map);
|
||||
}
|
||||
|
|
|
@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
|
|||
u64 new_size,
|
||||
struct list_head *blocks);
|
||||
|
||||
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
|
||||
|
||||
void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
|
||||
|
||||
void drm_buddy_free_list(struct drm_buddy *mm,
|
||||
|
|
Loading…
Add table
Reference in a new issue