drm/panthor: Expose size of driver internal BO's over fdinfo

This will display the sizes of kenrel BO's bound to an open file, which are
otherwise not exposed to UM through a handle.

The sizes recorded are as follows:
 - Per group: suspend buffer, protm-suspend buffer, syncobjcs
 - Per queue: ringbuffer, profiling slots, firmware interface
 - For all heaps in all heap pools across all VM's bound to an open file,
 record size of all heap chuks, and for each pool the gpu_context BO too.

This does not record the size of FW regions, as these aren't bound to a
specific open file and remain active through the whole life of the driver.

Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Reviewed-by: Mihail Atanassov <mihail.atanassov@arm.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250130172851.941597-4-adrian.larumbe@collabora.com
This commit is contained in:
Adrián Larumbe 2025-01-30 17:28:11 +00:00 committed by Boris Brezillon
parent af6c2b7c46
commit 434e5ca5b5
7 changed files with 136 additions and 1 deletions

View file

@ -1457,12 +1457,26 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
}
static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
{
char *drv_name = file->minor->dev->driver->name;
struct panthor_file *pfile = file->driver_priv;
struct drm_memory_stats stats = {0};
panthor_fdinfo_gather_group_mem_info(pfile, &stats);
panthor_vm_heaps_sizes(pfile, &stats);
drm_fdinfo_print_size(p, drv_name, "resident", "memory", stats.resident);
drm_fdinfo_print_size(p, drv_name, "active", "memory", stats.active);
}
static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
struct drm_device *dev = file->minor->dev;
struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
panthor_show_internal_memory_stats(p, file);
drm_show_memory_stats(p, file);
}

View file

@ -603,3 +603,29 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
panthor_heap_pool_put(pool);
}
/**
* panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
* @pool: Pool whose total chunk size to calculate.
*
* This function adds the size of all heap chunks across all heaps in the
* argument pool. It also adds the size of the gpu contexts kernel bo.
* It is meant to be used by fdinfo for displaying the size of internal
* driver BO's that aren't exposed to userspace through a GEM handle.
*
*/
size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
{
struct panthor_heap *heap;
unsigned long i;
size_t size = 0;
down_read(&pool->lock);
xa_for_each(&pool->xa, i, heap)
size += heap->chunk_size * heap->chunk_count;
up_read(&pool->lock);
size += pool->gpu_contexts->obj->size;
return size;
}

View file

@ -27,6 +27,8 @@ struct panthor_heap_pool *
panthor_heap_pool_get(struct panthor_heap_pool *pool);
void panthor_heap_pool_put(struct panthor_heap_pool *pool);
size_t panthor_heap_pool_size(struct panthor_heap_pool *pool);
int panthor_heap_grow(struct panthor_heap_pool *pool,
u64 heap_gpu_va,
u32 renderpasses_in_flight,

View file

@ -1944,6 +1944,39 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
return pool;
}
/**
* panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all
* heaps over all the heap pools in a VM
* @pfile: File.
* @stats: Memory stats to be updated.
*
* Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
* is active, record the size as active as well.
*/
void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats)
{
struct panthor_vm *vm;
unsigned long i;
if (!pfile->vms)
return;
xa_lock(&pfile->vms->xa);
xa_for_each(&pfile->vms->xa, i, vm) {
size_t size = 0;
mutex_lock(&vm->heaps.lock);
if (vm->heaps.pool)
size = panthor_heap_pool_size(vm->heaps.pool);
mutex_unlock(&vm->heaps.lock);
stats->resident += size;
if (vm->as.id >= 0)
stats->active += size;
}
xa_unlock(&pfile->vms->xa);
}
static u64 mair_to_memattr(u64 mair, bool coherent)
{
u64 memattr = 0;

View file

@ -9,6 +9,7 @@
struct drm_exec;
struct drm_sched_job;
struct drm_memory_stats;
struct panthor_gem_object;
struct panthor_heap_pool;
struct panthor_vm;
@ -37,6 +38,8 @@ int panthor_vm_flush_all(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats);
struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
void panthor_vm_put(struct panthor_vm *vm);
struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,

View file

@ -625,7 +625,7 @@ struct panthor_group {
*/
struct panthor_kernel_bo *syncobjs;
/** @fdinfo: Per-file total cycle and timestamp values reference. */
/** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */
struct {
/** @data: Total sampled values for jobs in queues from this group. */
struct panthor_gpu_usage data;
@ -635,6 +635,9 @@ struct panthor_group {
* and job post-completion processing function
*/
struct mutex lock;
/** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
size_t kbo_sizes;
} fdinfo;
/** @state: Group state. */
@ -3378,6 +3381,29 @@ err_free_queue:
return ERR_PTR(ret);
}
static void add_group_kbo_sizes(struct panthor_device *ptdev,
struct panthor_group *group)
{
struct panthor_queue *queue;
int i;
if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
return;
if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
return;
group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
for (i = 0; i < group->queue_count; i++) {
queue = group->queues[i];
group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
}
}
#define MAX_GROUPS_PER_POOL 128
int panthor_group_create(struct panthor_file *pfile,
@ -3502,6 +3528,7 @@ int panthor_group_create(struct panthor_file *pfile,
}
mutex_unlock(&sched->reset.lock);
add_group_kbo_sizes(group->ptdev, group);
mutex_init(&group->fdinfo.lock);
return gid;
@ -3621,6 +3648,33 @@ void panthor_group_pool_destroy(struct panthor_file *pfile)
pfile->groups = NULL;
}
/**
* panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's
* belonging to all the groups owned by an open Panthor file
* @pfile: File.
* @stats: Memory statistics to be updated.
*
*/
void
panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
struct drm_memory_stats *stats)
{
struct panthor_group_pool *gpool = pfile->groups;
struct panthor_group *group;
unsigned long i;
if (IS_ERR_OR_NULL(gpool))
return;
xa_lock(&gpool->xa);
xa_for_each(&gpool->xa, i, group) {
stats->resident += group->fdinfo.kbo_sizes;
if (group->csg_id >= 0)
stats->active += group->fdinfo.kbo_sizes;
}
xa_unlock(&gpool->xa);
}
static void job_release(struct kref *ref)
{
struct panthor_job *job = container_of(ref, struct panthor_job, refcount);

View file

@ -9,6 +9,7 @@ struct dma_fence;
struct drm_file;
struct drm_gem_object;
struct drm_sched_job;
struct drm_memory_stats;
struct drm_panthor_group_create;
struct drm_panthor_queue_create;
struct drm_panthor_group_get_state;
@ -36,6 +37,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
int panthor_group_pool_create(struct panthor_file *pfile);
void panthor_group_pool_destroy(struct panthor_file *pfile);
void panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
struct drm_memory_stats *stats);
int panthor_sched_init(struct panthor_device *ptdev);
void panthor_sched_unplug(struct panthor_device *ptdev);