drm/panthor: Expose size of driver internal BO's over fdinfo
authorAdrián Larumbe <adrian.larumbe@collabora.com>
Thu, 30 Jan 2025 17:28:11 +0000 (17:28 +0000)
committerBoris Brezillon <boris.brezillon@collabora.com>
Fri, 7 Feb 2025 14:23:39 +0000 (15:23 +0100)
This will display the sizes of kenrel BO's bound to an open file, which are
otherwise not exposed to UM through a handle.

The sizes recorded are as follows:
 - Per group: suspend buffer, protm-suspend buffer, syncobjcs
 - Per queue: ringbuffer, profiling slots, firmware interface
 - For all heaps in all heap pools across all VM's bound to an open file,
 record size of all heap chuks, and for each pool the gpu_context BO too.

This does not record the size of FW regions, as these aren't bound to a
specific open file and remain active through the whole life of the driver.

Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Reviewed-by: Mihail Atanassov <mihail.atanassov@arm.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250130172851.941597-4-adrian.larumbe@collabora.com
drivers/gpu/drm/panthor/panthor_drv.c
drivers/gpu/drm/panthor/panthor_heap.c
drivers/gpu/drm/panthor/panthor_heap.h
drivers/gpu/drm/panthor/panthor_mmu.c
drivers/gpu/drm/panthor/panthor_mmu.h
drivers/gpu/drm/panthor/panthor_sched.c
drivers/gpu/drm/panthor/panthor_sched.h

index d5dcd3d1b33a0ec29a1b304f4ee37839bfb1704f..310bb44abe1a8f5d817fa8f3b22e4d9af9931432 100644 (file)
@@ -1457,12 +1457,26 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
        drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
 }
 
+static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
+{
+       char *drv_name = file->minor->dev->driver->name;
+       struct panthor_file *pfile = file->driver_priv;
+       struct drm_memory_stats stats = {0};
+
+       panthor_fdinfo_gather_group_mem_info(pfile, &stats);
+       panthor_vm_heaps_sizes(pfile, &stats);
+
+       drm_fdinfo_print_size(p, drv_name, "resident", "memory", stats.resident);
+       drm_fdinfo_print_size(p, drv_name, "active", "memory", stats.active);
+}
+
 static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
 {
        struct drm_device *dev = file->minor->dev;
        struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
 
        panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
+       panthor_show_internal_memory_stats(p, file);
 
        drm_show_memory_stats(p, file);
 }
index 3796a9eb22af2f25d8b9500a221b886e0bc18eec..db0285ce581268c579190a82aa2c6bf3a2eb93c7 100644 (file)
@@ -603,3 +603,29 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
 
        panthor_heap_pool_put(pool);
 }
+
+/**
+ * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
+ * @pool: Pool whose total chunk size to calculate.
+ *
+ * This function adds the size of all heap chunks across all heaps in the
+ * argument pool. It also adds the size of the gpu contexts kernel bo.
+ * It is meant to be used by fdinfo for displaying the size of internal
+ * driver BO's that aren't exposed to userspace through a GEM handle.
+ *
+ */
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
+{
+       struct panthor_heap *heap;
+       unsigned long i;
+       size_t size = 0;
+
+       down_read(&pool->lock);
+       xa_for_each(&pool->xa, i, heap)
+               size += heap->chunk_size * heap->chunk_count;
+       up_read(&pool->lock);
+
+       size += pool->gpu_contexts->obj->size;
+
+       return size;
+}
index 25a5f2bba44570fc4f9895dfaa141eb6049bfa66..e3358d4e8edb21be7c6c249bb413762c36283948 100644 (file)
@@ -27,6 +27,8 @@ struct panthor_heap_pool *
 panthor_heap_pool_get(struct panthor_heap_pool *pool);
 void panthor_heap_pool_put(struct panthor_heap_pool *pool);
 
+size_t panthor_heap_pool_size(struct panthor_heap_pool *pool);
+
 int panthor_heap_grow(struct panthor_heap_pool *pool,
                      u64 heap_gpu_va,
                      u32 renderpasses_in_flight,
index 5ce80e2532d53e82f90baeef5c0c92a32ad636cf..0a4e352b5505a96ae14c51fb5f20ebe4bee9f95c 100644 (file)
@@ -1944,6 +1944,39 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
        return pool;
 }
 
+/**
+ * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all
+ * heaps over all the heap pools in a VM
+ * @pfile: File.
+ * @stats: Memory stats to be updated.
+ *
+ * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
+ * is active, record the size as active as well.
+ */
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats)
+{
+       struct panthor_vm *vm;
+       unsigned long i;
+
+       if (!pfile->vms)
+               return;
+
+       xa_lock(&pfile->vms->xa);
+       xa_for_each(&pfile->vms->xa, i, vm) {
+               size_t size = 0;
+
+               mutex_lock(&vm->heaps.lock);
+               if (vm->heaps.pool)
+                       size = panthor_heap_pool_size(vm->heaps.pool);
+               mutex_unlock(&vm->heaps.lock);
+
+               stats->resident += size;
+               if (vm->as.id >= 0)
+                       stats->active += size;
+       }
+       xa_unlock(&pfile->vms->xa);
+}
+
 static u64 mair_to_memattr(u64 mair, bool coherent)
 {
        u64 memattr = 0;
index 8d21e83d8aba1e203df11883122a9e9356787803..fc274637114e5531e9dc6950dbeb8cba6800892a 100644 (file)
@@ -9,6 +9,7 @@
 
 struct drm_exec;
 struct drm_sched_job;
+struct drm_memory_stats;
 struct panthor_gem_object;
 struct panthor_heap_pool;
 struct panthor_vm;
@@ -37,6 +38,8 @@ int panthor_vm_flush_all(struct panthor_vm *vm);
 struct panthor_heap_pool *
 panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
 
+void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats);
+
 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm);
 void panthor_vm_put(struct panthor_vm *vm);
 struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
index 5844a7f639e001cddd393f755e4b5b3516c03386..da87e6e819387decb2a06fa54152a338742aa3c5 100644 (file)
@@ -625,7 +625,7 @@ struct panthor_group {
         */
        struct panthor_kernel_bo *syncobjs;
 
-       /** @fdinfo: Per-file total cycle and timestamp values reference. */
+       /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */
        struct {
                /** @data: Total sampled values for jobs in queues from this group. */
                struct panthor_gpu_usage data;
@@ -635,6 +635,9 @@ struct panthor_group {
                 * and job post-completion processing function
                 */
                struct mutex lock;
+
+               /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
+               size_t kbo_sizes;
        } fdinfo;
 
        /** @state: Group state. */
@@ -3378,6 +3381,29 @@ err_free_queue:
        return ERR_PTR(ret);
 }
 
+static void add_group_kbo_sizes(struct panthor_device *ptdev,
+                               struct panthor_group *group)
+{
+       struct panthor_queue *queue;
+       int i;
+
+       if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
+               return;
+       if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
+               return;
+
+       group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
+       group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
+       group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
+
+       for (i = 0; i < group->queue_count; i++) {
+               queue = group->queues[i];
+               group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
+               group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
+               group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
+       }
+}
+
 #define MAX_GROUPS_PER_POOL            128
 
 int panthor_group_create(struct panthor_file *pfile,
@@ -3502,6 +3528,7 @@ int panthor_group_create(struct panthor_file *pfile,
        }
        mutex_unlock(&sched->reset.lock);
 
+       add_group_kbo_sizes(group->ptdev, group);
        mutex_init(&group->fdinfo.lock);
 
        return gid;
@@ -3621,6 +3648,33 @@ void panthor_group_pool_destroy(struct panthor_file *pfile)
        pfile->groups = NULL;
 }
 
+/**
+ * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's
+ * belonging to all the groups owned by an open Panthor file
+ * @pfile: File.
+ * @stats: Memory statistics to be updated.
+ *
+ */
+void
+panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
+                                    struct drm_memory_stats *stats)
+{
+       struct panthor_group_pool *gpool = pfile->groups;
+       struct panthor_group *group;
+       unsigned long i;
+
+       if (IS_ERR_OR_NULL(gpool))
+               return;
+
+       xa_lock(&gpool->xa);
+       xa_for_each(&gpool->xa, i, group) {
+               stats->resident += group->fdinfo.kbo_sizes;
+               if (group->csg_id >= 0)
+                       stats->active += group->fdinfo.kbo_sizes;
+       }
+       xa_unlock(&gpool->xa);
+}
+
 static void job_release(struct kref *ref)
 {
        struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
index 5ae6b4bde7c50fd5ef673c55b7280fec47ab4c7b..e650a445cf50707943c82e08598eba6dfec723fc 100644 (file)
@@ -9,6 +9,7 @@ struct dma_fence;
 struct drm_file;
 struct drm_gem_object;
 struct drm_sched_job;
+struct drm_memory_stats;
 struct drm_panthor_group_create;
 struct drm_panthor_queue_create;
 struct drm_panthor_group_get_state;
@@ -36,6 +37,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *job);
 
 int panthor_group_pool_create(struct panthor_file *pfile);
 void panthor_group_pool_destroy(struct panthor_file *pfile);
+void panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
+                                         struct drm_memory_stats *stats);
 
 int panthor_sched_init(struct panthor_device *ptdev);
 void panthor_sched_unplug(struct panthor_device *ptdev);