clear-pkgs-linux-iot-lts2018/0675-drm-i915-Sysfs-interfa...

1727 lines
47 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: "Fan, Yugang" <yugang.fan@intel.com>
Date: Mon, 17 Sep 2018 09:50:43 +0800
Subject: [PATCH] drm/i915: Sysfs interface to get GFX shmem usage stats per
process
There is a requirement of a new interface for achieving below
functionalities:
1) Need to provide Client based detailed information about the
distribution of Graphics memory
2) Need to provide an interface which can provide info about the
sharing of Graphics buffers between the clients.
The client based interface would also aid in debugging of memory
usage/consumption by each client & debug memleak related issues.
With this new interface,
1) In case of memleak scenarios, we can easily zero in on the culprit
client which is unexpectedly holding on the Graphics buffers for an
inordinate amount of time.
2) We can get an estimate of the instantaneous memory footprint of
every Graphics client.
3) We can now trace all the processes sharing a particular Graphics buffer.
By means of this patch we try to provide a sysfs interface to achieve
the mentioned functionalities.
There are two files created in sysfs:
'i915_gem_meminfo' will provide summary of the graphics resources used by
each graphics client.
'i915_gem_objinfo' will provide detailed view of each object created by
individual clients.
Rebased for kernel v4.19 1A (Fan Yugang)
1) Removes unuseful shmem_inode_info->info_lock per Jeremy's optimization.
2) Removes unuseful object info for each pid for performance optimization.
3) Adds CONFIG_DRM_I915_MEMTRACK which depends on
CONFIG_DRM_I915_CAPTURE_ERROR to pass kernel config test.
Change-Id: If9705d001b922de3570693b2ad39125babd8e860
Signed-off-by: Sourab Gupta <sourab.gupta@intel.com>
Sgined-off-by: Deepak S <deepak.s@intel.com>
Signed-off-by: Hu Beiyuan <beiyuan.hu@intel.com>
Signed-off-by: Mingwei Wang <mingwei.wang@intel.com>
Signed-off-by: Harish Krupo <harish.krupo.kps@intel.com>
Signed-off-by: Jeremy Compostella <jeremy.compostella@intel.com>
Signed-off-by: Yugang Fan
---
drivers/gpu/drm/drm_file.c | 4 +
drivers/gpu/drm/drm_internal.h | 5 +
drivers/gpu/drm/i915/Kconfig | 10 +
drivers/gpu/drm/i915/i915_drv.c | 7 +
drivers/gpu/drm/i915/i915_drv.h | 45 ++
drivers/gpu/drm/i915/i915_gem.c | 954 +++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_object.h | 8 +
drivers/gpu/drm/i915/i915_gpu_error.c | 25 +-
drivers/gpu/drm/i915/i915_gpu_error.h | 7 +
drivers/gpu/drm/i915/i915_sysfs.c | 311 ++++++++
10 files changed, 1375 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 334addaca9c5..644ae6f9a13e 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -46,6 +46,10 @@
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+EXPORT_SYMBOL(drm_global_mutex);
+#endif
+
/**
* DOC: file operations
*
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 8750f3f02b3f..6b2d7fe6efa9 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -48,6 +48,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
struct dma_buf *dma_buf);
/* drm_drv.c */
+
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
+#endif
+
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 19dfbf39f6ca..3d25ace132e7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -84,6 +84,16 @@ config DRM_I915_COMPRESS_ERROR
If in doubt, say "Y".
+config DRM_I915_MEMTRACK
+ bool "Enable shmem usage status track"
+ depends on DRM_I915_CAPTURE_ERROR
+ default y
+ help
+ This option enables shmem usage status track of system summary and
+ each process.
+
+ If in doubt, say "N".
+
config DRM_I915_USERPTR
bool "Always enable userptr support"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5fa2da278670..7a91c1e6974e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1563,6 +1563,10 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ kfree(file_priv->process_name);
+#endif
+
kfree(file_priv);
}
@@ -2925,6 +2929,9 @@ static struct drm_driver driver = {
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ .gem_open_object = i915_gem_open_object,
+#endif
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1b61ed91687f..3cd14d171c61 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -81,6 +81,9 @@
#include "i915_scheduler.h"
#include "i915_timeline.h"
#include "i915_vma.h"
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+#include "i915_gpu_error.h"
+#endif
#include "intel_gvt.h"
@@ -336,6 +339,11 @@ struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ char *process_name;
+ struct pid *tgid;
+#endif
+
struct {
spinlock_t lock;
struct list_head request_list;
@@ -354,6 +362,10 @@ struct drm_i915_file_private {
unsigned int bsd_engine;
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ struct bin_attribute *obj_attr;
+#endif
+
/*
* Every context ban increments per client ban score. Also
* hangs in short succession increments ban score. If ban threshold
@@ -1000,6 +1012,10 @@ struct i915_gem_mm {
spinlock_t object_stat_lock;
u64 object_memory;
u32 object_count;
+
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ size_t phys_mem_total;
+#endif
};
#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
@@ -1673,6 +1689,10 @@ struct drm_i915_private {
bool preserve_bios_swizzle;
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ struct kobject memtrack_kobj;
+#endif
+
/* overlay */
struct intel_overlay *overlay;
@@ -2926,6 +2946,11 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
const void *data, size_t size);
+
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+int i915_gem_open_object(struct drm_gem_object *gem, struct drm_file *file);
+#endif
+
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
@@ -3365,6 +3390,19 @@ u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
unsigned int tiling, unsigned int stride);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+int i915_get_pid_cmdline(struct task_struct *task, char *buffer);
+int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj);
+void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj);
+int i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj,
+ unsigned long addr, bool is_map_gtt,
+ bool is_mutex_locked);
+int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev);
+int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev, struct pid *tgid);
+#endif
+
/* i915_debugfs.c */
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);
@@ -3406,6 +3444,13 @@ extern int i915_restore_state(struct drm_i915_private *dev_priv);
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+int i915_gem_create_sysfs_file_entry(struct drm_device *dev,
+ struct drm_file *file);
+void i915_gem_remove_sysfs_file_entry(struct drm_device *dev,
+ struct drm_file *file);
+#endif
+
/* intel_lpe_audio.c */
int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e6100723680c..402db0dac1c4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -47,8 +47,852 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+#include <linux/pid.h>
+#include <linux/async.h>
+#include <linux/sched/mm.h>
+#include "../drm_internal.h"
+#endif
+
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+struct per_file_obj_mem_info {
+ int num_obj;
+ int num_obj_shared;
+ int num_obj_private;
+ int num_obj_gtt_bound;
+ int num_obj_purged;
+ int num_obj_purgeable;
+ int num_obj_allocated;
+ int num_obj_fault_mappable;
+ int num_obj_stolen;
+ size_t gtt_space_allocated_shared;
+ size_t gtt_space_allocated_priv;
+ size_t phys_space_allocated_shared;
+ size_t phys_space_allocated_priv;
+ size_t phys_space_purgeable;
+ size_t phys_space_shared_proportion;
+ size_t fault_mappable_size;
+ size_t stolen_space_allocated;
+ char *process_name;
+};
+
+struct name_entry {
+ struct list_head head;
+ struct drm_hash_item hash_item;
+};
+
+struct pid_stat_entry {
+ struct list_head head;
+ struct list_head namefree;
+ struct drm_open_hash namelist;
+ struct per_file_obj_mem_info stats;
+ struct pid *tgid;
+ int pid_num;
+};
+
+struct drm_i915_obj_virt_addr {
+ struct list_head head;
+ unsigned long user_virt_addr;
+};
+
+struct drm_i915_obj_pid_info {
+ struct list_head head;
+ pid_t tgid;
+ int open_handle_count;
+ struct list_head virt_addr_head;
+};
+
+struct get_obj_stats_buf {
+ struct pid_stat_entry *entry;
+ struct drm_i915_error_state_buf *m;
+};
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+{
+ switch (i915_gem_object_get_tiling(obj)) {
+ default:
+ case I915_TILING_NONE: return " ";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ }
+}
+
+/*
+ * If this mmput call is the last one, it will tear down the mmaps of the
+ * process and calls drm_gem_vm_close(), which leads deadlock on i915 mutex.
+ * Instead, asynchronously schedule mmput function here, to avoid recursive
+ * calls to acquire i915_mutex.
+ */
+static void async_mmput_func(void *data, async_cookie_t cookie)
+{
+ struct mm_struct *mm = data;
+ mmput(mm);
+}
+
+static void async_mmput(struct mm_struct *mm)
+{
+ async_schedule(async_mmput_func, mm);
+}
+
+int i915_get_pid_cmdline(struct task_struct *task, char *buffer)
+{
+ int res = 0;
+ unsigned int len;
+ struct mm_struct *mm = get_task_mm(task);
+
+ if (!mm)
+ goto out;
+ if (!mm->arg_end)
+ goto out_mm;
+
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+ if (res < 0) {
+ async_mmput(mm);
+ return res;
+ }
+
+ if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE)
+ buffer[res-1] = '\0';
+out_mm:
+ async_mmput(mm);
+out:
+ return 0;
+}
+
+static int i915_obj_get_shmem_pages_alloced(struct drm_i915_gem_object *obj)
+{
+ if (obj->base.filp) {
+ struct inode *inode = file_inode(obj->base.filp);
+
+ if (!inode)
+ return 0;
+ return inode->i_mapping->nrpages;
+ }
+ return 0;
+}
+
+int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj)
+{
+ int found = 0;
+ struct drm_i915_obj_pid_info *entry;
+ pid_t current_tgid = task_tgid_nr(current);
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
+ list_for_each_entry(entry, &obj->pid_info, head) {
+ if (entry->tgid == current_tgid) {
+ entry->open_handle_count++;
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry == NULL) {
+ DRM_ERROR("alloc failed\n");
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return -ENOMEM;
+ }
+ entry->tgid = current_tgid;
+ entry->open_handle_count = 1;
+ INIT_LIST_HEAD(&entry->virt_addr_head);
+ list_add_tail(&entry->head, &obj->pid_info);
+ }
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return 0;
+}
+
+void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_obj_pid_info *pid_entry, *pid_next;
+ struct drm_i915_obj_virt_addr *virt_entry, *virt_next;
+
+ list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) {
+ list_for_each_entry_safe(virt_entry,
+ virt_next,
+ &pid_entry->virt_addr_head,
+ head) {
+ list_del(&virt_entry->head);
+ kfree(virt_entry);
+ }
+ list_del(&pid_entry->head);
+ kfree(pid_entry);
+ }
+}
+
+ int
+i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj,
+ unsigned long addr,
+ bool is_map_gtt,
+ bool is_mutex_locked)
+{
+ struct drm_i915_obj_pid_info *pid_entry;
+ pid_t current_tgid = task_tgid_nr(current);
+ int ret = 0, found = 0;
+
+ if (is_map_gtt)
+ addr |= 1;
+
+ if (!is_mutex_locked) {
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(pid_entry, &obj->pid_info, head) {
+ if (pid_entry->tgid == current_tgid) {
+ struct drm_i915_obj_virt_addr *virt_entry, *new_entry;
+
+ list_for_each_entry(virt_entry,
+ &pid_entry->virt_addr_head,
+ head) {
+ if (virt_entry->user_virt_addr == addr) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ break;
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (new_entry == NULL) {
+ DRM_ERROR("alloc failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ new_entry->user_virt_addr = addr;
+ list_add_tail(&new_entry->head,
+ &pid_entry->virt_addr_head);
+ break;
+ }
+ }
+
+out:
+ if (!is_mutex_locked)
+ mutex_unlock(&obj->base.dev->struct_mutex);
+
+ return ret;
+}
+
+static int i915_obj_virt_addr_is_invalid(struct drm_gem_object *obj,
+ struct pid *tgid, unsigned long addr)
+{
+ struct task_struct *task;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ int locked, ret = 0;
+
+ task = get_pid_task(tgid, PIDTYPE_PID);
+ if (task == NULL) {
+ DRM_DEBUG("null task for tgid=%d\n", pid_nr(tgid));
+ return -EINVAL;
+ }
+
+ mm = get_task_mm(task);
+ if (mm == NULL) {
+ DRM_DEBUG("null mm for tgid=%d\n", pid_nr(tgid));
+ ret = -EINVAL;
+ goto out_task;
+ }
+
+ locked = down_read_trylock(&mm->mmap_sem);
+ if (!locked)
+ goto out_mm;
+
+ vma = find_vma(mm, addr);
+ if (vma) {
+ if (addr & 1) { /* mmap_gtt case */
+ if (vma->vm_pgoff*PAGE_SIZE == (unsigned long)
+ drm_vma_node_offset_addr(&obj->vma_node))
+ ret = 0;
+ else
+ ret = -EINVAL;
+ } else { /* mmap case */
+ if (vma->vm_file == obj->filp)
+ ret = 0;
+ else
+ ret = -EINVAL;
+ }
+ } else
+ ret = -EINVAL;
+
+ up_read(&mm->mmap_sem);
+
+out_mm:
+ async_mmput(mm);
+out_task:
+ put_task_struct(task);
+ return ret;
+}
+
+static void i915_obj_pidarray_validate(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = gem_obj->dev;
+ struct drm_i915_obj_virt_addr *virt_entry, *virt_next;
+ struct drm_i915_obj_pid_info *pid_entry, *pid_next;
+ struct drm_file *file;
+ struct drm_i915_file_private *file_priv;
+ struct pid *tgid;
+ int pid_num, present;
+
+ /*
+ * Run a sanity check on pid_array. All entries in pid_array should
+ * be subset of the the drm filelist pid entries.
+ */
+ list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) {
+ if (pid_next == NULL) {
+ DRM_ERROR(
+ "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n",
+ &obj->base, obj->base.size/1024,
+ get_tiling_flag(obj),
+ (obj->userptr.mm != 0) ? "Y" : "N",
+ obj->stolen ? "Y" : "N", obj->base.name,
+ obj->base.handle_count);
+ break;
+ }
+
+ present = 0;
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ file_priv = file->driver_priv;
+ tgid = file_priv->tgid;
+ pid_num = pid_nr(tgid);
+
+ if (pid_num == pid_entry->tgid) {
+ present = 1;
+ break;
+ }
+ }
+ if (present == 0) {
+ DRM_DEBUG("stale_tgid=%d\n", pid_entry->tgid);
+ list_for_each_entry_safe(virt_entry, virt_next,
+ &pid_entry->virt_addr_head,
+ head) {
+ list_del(&virt_entry->head);
+ kfree(virt_entry);
+ }
+ list_del(&pid_entry->head);
+ kfree(pid_entry);
+ } else {
+ /* Validate the virtual address list */
+ struct task_struct *task =
+ get_pid_task(tgid, PIDTYPE_PID);
+ if (task == NULL)
+ continue;
+
+ list_for_each_entry_safe(virt_entry, virt_next,
+ &pid_entry->virt_addr_head,
+ head) {
+ if (i915_obj_virt_addr_is_invalid(gem_obj, tgid,
+ virt_entry->user_virt_addr)) {
+ DRM_DEBUG("stale_addr=%ld\n",
+ virt_entry->user_virt_addr);
+ list_del(&virt_entry->head);
+ kfree(virt_entry);
+ }
+ }
+ put_task_struct(task);
+ }
+ }
+}
+
+static int i915_obj_find_insert_in_hash(struct drm_i915_gem_object *obj,
+ struct pid_stat_entry *pid_entry,
+ bool *found)
+{
+ struct drm_hash_item *hash_item;
+ int ret;
+
+ ret = drm_ht_find_item(&pid_entry->namelist,
+ (unsigned long)&obj->base, &hash_item);
+ /* Not found, insert in hash */
+ if (ret) {
+ struct name_entry *entry =
+ kzalloc(sizeof(*entry), GFP_NOWAIT);
+ if (entry == NULL) {
+ DRM_ERROR("alloc failed\n");
+ return -ENOMEM;
+ }
+ entry->hash_item.key = (unsigned long)&obj->base;
+ drm_ht_insert_item(&pid_entry->namelist,
+ &entry->hash_item);
+ list_add_tail(&entry->head, &pid_entry->namefree);
+ *found = false;
+ } else
+ *found = true;
+
+ return 0;
+}
+
+static int i915_obj_shared_count(struct drm_i915_gem_object *obj,
+ struct pid_stat_entry *pid_entry,
+ bool *discard)
+{
+ struct drm_i915_obj_pid_info *pid_info_entry;
+ int ret, obj_shared_count = 0;
+
+ /*
+ * The object can be shared among different processes by either flink
+ * or dma-buf mechanism, leading to shared count more than 1. For the
+ * objects not shared , return the shared count as 1.
+ * In case of shared dma-buf objects, there's a possibility that these
+ * may be external to i915. Detect this condition through
+ * 'import_attach' field.
+ */
+ if (!obj->base.name && !obj->base.dma_buf)
+ return 1;
+ else if(obj->base.import_attach) {
+ /* not our GEM obj */
+ *discard = true;
+ return 0;
+ }
+
+ ret = i915_obj_find_insert_in_hash(obj, pid_entry, discard);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(pid_info_entry, &obj->pid_info, head)
+ obj_shared_count++;
+
+ if (WARN_ON(obj_shared_count == 0))
+ return -EINVAL;
+
+ return obj_shared_count;
+}
+
+ static int
+i915_describe_obj(struct get_obj_stats_buf *obj_stat_buf,
+ struct drm_i915_gem_object *obj)
+{
+ struct pid_stat_entry *pid_entry = obj_stat_buf->entry;
+ struct per_file_obj_mem_info *stats = &pid_entry->stats;
+ int obj_shared_count = 0;
+
+ bool discard = false;
+
+ obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard);
+ if (obj_shared_count < 0)
+ return obj_shared_count;
+
+ if (!discard && !obj->stolen &&
+ (obj->mm.madv != __I915_MADV_PURGED) &&
+ (i915_obj_get_shmem_pages_alloced(obj) != 0)) {
+ if (obj_shared_count > 1)
+ stats->phys_space_shared_proportion +=
+ obj->base.size/obj_shared_count;
+ else
+ stats->phys_space_allocated_priv +=
+ obj->base.size;
+ }
+
+ return 0;
+}
+
+ static int
+i915_drm_gem_obj_info(int id, void *ptr, void *data)
+{
+ struct drm_i915_gem_object *obj = ptr;
+ struct get_obj_stats_buf *obj_stat_buf = data;
+
+ if (obj->pid_info.next == NULL) {
+ DRM_ERROR(
+ "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n",
+ &obj->base, obj->base.size/1024,
+ get_tiling_flag(obj),
+ (obj->userptr.mm != 0) ? "Y" : "N",
+ obj->stolen ? "Y" : "N", obj->base.name,
+ obj->base.handle_count);
+ return 0;
+ }
+
+ return i915_describe_obj(obj_stat_buf, obj);
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &o->vma_list, obj_link)
+ if (drm_mm_node_allocated(&vma->node))
+ return true;
+
+ return false;
+}
+
+ static int
+i915_drm_gem_object_per_file_summary(int id, void *ptr, void *data)
+{
+ struct pid_stat_entry *pid_entry = data;
+ struct drm_i915_gem_object *obj = ptr;
+ struct per_file_obj_mem_info *stats = &pid_entry->stats;
+ int obj_shared_count = 0;
+ bool discard = false;
+
+ if (obj->pid_info.next == NULL) {
+ DRM_ERROR(
+ "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n",
+ &obj->base, obj->base.size/1024,
+ get_tiling_flag(obj),
+ (obj->userptr.mm != 0) ? "Y" : "N",
+ obj->stolen ? "Y" : "N", obj->base.name,
+ obj->base.handle_count);
+ return 0;
+ }
+
+ i915_obj_pidarray_validate(&obj->base);
+
+ stats->num_obj++;
+
+ obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard);
+ if (obj_shared_count < 0)
+ return obj_shared_count;
+
+ if (discard)
+ return 0;
+
+ if (obj_shared_count > 1)
+ stats->num_obj_shared++;
+ else
+ stats->num_obj_private++;
+
+ if (i915_gem_obj_bound_any(obj)) {
+ stats->num_obj_gtt_bound++;
+ if (obj_shared_count > 1)
+ stats->gtt_space_allocated_shared += obj->base.size;
+ else
+ stats->gtt_space_allocated_priv += obj->base.size;
+ }
+
+ if (obj->stolen) {
+ stats->num_obj_stolen++;
+ stats->stolen_space_allocated += obj->base.size;
+ } else if (obj->mm.madv == __I915_MADV_PURGED) {
+ stats->num_obj_purged++;
+ } else if (obj->mm.madv == I915_MADV_DONTNEED) {
+ stats->num_obj_purgeable++;
+ stats->num_obj_allocated++;
+ if (i915_obj_get_shmem_pages_alloced(obj) != 0) {
+ stats->phys_space_purgeable += obj->base.size;
+ if (obj_shared_count > 1) {
+ stats->phys_space_allocated_shared +=
+ obj->base.size;
+ stats->phys_space_shared_proportion +=
+ obj->base.size/obj_shared_count;
+ } else
+ stats->phys_space_allocated_priv +=
+ obj->base.size;
+ } else
+ WARN_ON(1);
+ } else if (i915_obj_get_shmem_pages_alloced(obj) != 0) {
+ stats->num_obj_allocated++;
+ if (obj_shared_count > 1) {
+ stats->phys_space_allocated_shared +=
+ obj->base.size;
+ stats->phys_space_shared_proportion +=
+ obj->base.size/obj_shared_count;
+ }
+ else
+ stats->phys_space_allocated_priv += obj->base.size;
+ }
+ return 0;
+}
+
+ static int
+__i915_get_drm_clients_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev)
+{
+ struct drm_file *file;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ struct name_entry *entry, *next;
+ struct pid_stat_entry *pid_entry, *temp_entry;
+ struct pid_stat_entry *new_pid_entry, *new_temp_entry;
+ struct list_head per_pid_stats, sorted_pid_stats;
+ int ret = 0;
+ size_t total_shared_prop_space = 0, total_priv_space = 0;
+
+ INIT_LIST_HEAD(&per_pid_stats);
+ INIT_LIST_HEAD(&sorted_pid_stats);
+
+ err_puts(m,
+ "\n\n pid Total Shared Priv Purgeable Alloced SharedPHYsize SharedPHYprop PrivPHYsize PurgeablePHYsize process\n");
+
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct pid *tgid;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ int pid_num, found = 0;
+
+ tgid = file_priv->tgid;
+ pid_num = pid_nr(tgid);
+
+ list_for_each_entry(pid_entry, &per_pid_stats, head) {
+ if (pid_entry->pid_num == pid_num) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ struct pid_stat_entry *new_entry =
+ kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (new_entry == NULL) {
+ DRM_ERROR("alloc failed\n");
+ ret = -ENOMEM;
+ break;
+ }
+ new_entry->tgid = tgid;
+ new_entry->pid_num = pid_num;
+ ret = drm_ht_create(&new_entry->namelist,
+ DRM_MAGIC_HASH_ORDER);
+ if (ret) {
+ kfree(new_entry);
+ break;
+ }
+
+ list_add_tail(&new_entry->head, &per_pid_stats);
+ INIT_LIST_HEAD(&new_entry->namefree);
+ new_entry->stats.process_name = file_priv->process_name;
+ pid_entry = new_entry;
+ }
+
+ spin_lock(&file->table_lock);
+ ret = idr_for_each(&file->object_idr,
+ &i915_drm_gem_object_per_file_summary, pid_entry);
+ spin_unlock(&file->table_lock);
+ if (ret)
+ break;
+ }
+
+ list_for_each_entry_safe(pid_entry, temp_entry, &per_pid_stats, head) {
+ if (list_empty(&sorted_pid_stats)) {
+ list_del(&pid_entry->head);
+ list_add_tail(&pid_entry->head, &sorted_pid_stats);
+ continue;
+ }
+
+ list_for_each_entry_safe(new_pid_entry, new_temp_entry,
+ &sorted_pid_stats, head) {
+ int prev_space =
+ pid_entry->stats.phys_space_shared_proportion +
+ pid_entry->stats.phys_space_allocated_priv;
+ int new_space =
+ new_pid_entry->
+ stats.phys_space_shared_proportion +
+ new_pid_entry->stats.phys_space_allocated_priv;
+ if (prev_space > new_space) {
+ list_del(&pid_entry->head);
+ list_add_tail(&pid_entry->head,
+ &new_pid_entry->head);
+ break;
+ }
+ if (list_is_last(&new_pid_entry->head,
+ &sorted_pid_stats)) {
+ list_del(&pid_entry->head);
+ list_add_tail(&pid_entry->head,
+ &sorted_pid_stats);
+ }
+ }
+ }
+
+ list_for_each_entry_safe(pid_entry, temp_entry,
+ &sorted_pid_stats, head) {
+ struct task_struct *task = get_pid_task(pid_entry->tgid,
+ PIDTYPE_PID);
+ err_printf(m,
+ "%5d %6d %6d %6d %9d %8d %14zdK %14zdK %14zdK %14zdK %s",
+ pid_entry->pid_num,
+ pid_entry->stats.num_obj,
+ pid_entry->stats.num_obj_shared,
+ pid_entry->stats.num_obj_private,
+ pid_entry->stats.num_obj_purgeable,
+ pid_entry->stats.num_obj_allocated,
+ pid_entry->stats.phys_space_allocated_shared/1024,
+ pid_entry->stats.phys_space_shared_proportion/1024,
+ pid_entry->stats.phys_space_allocated_priv/1024,
+ pid_entry->stats.phys_space_purgeable/1024,
+ pid_entry->stats.process_name);
+
+ if (task == NULL)
+ err_puts(m, "*\n");
+ else
+ err_puts(m, "\n");
+
+ total_shared_prop_space +=
+ pid_entry->stats.phys_space_shared_proportion/1024;
+ total_priv_space +=
+ pid_entry->stats.phys_space_allocated_priv/1024;
+ list_del(&pid_entry->head);
+
+ list_for_each_entry_safe(entry, next,
+ &pid_entry->namefree, head) {
+ list_del(&entry->head);
+ drm_ht_remove_item(&pid_entry->namelist,
+ &entry->hash_item);
+ kfree(entry);
+ }
+ drm_ht_remove(&pid_entry->namelist);
+ kfree(pid_entry);
+ if (task)
+ put_task_struct(task);
+ }
+
+ err_puts(m,
+ "\t\t\t\t\t\t\t\t--------------\t-------------\t--------\n");
+ err_printf(m,
+ "\t\t\t\t\t\t\t\t%13zdK\t%12zdK\tTotal\n",
+ total_shared_prop_space, total_priv_space);
+
+ err_printf(m, "\nTotal used GFX Shmem Physical space %8zdK\n",
+ dev_priv->mm.phys_mem_total/1024);
+
+ if (ret)
+ return ret;
+ if (m->bytes == 0 && m->err)
+ return m->err;
+
+ return 0;
+}
+
+#define NUM_SPACES 100
+#define INITIAL_SPACES_STR(x) #x
+#define SPACES_STR(x) INITIAL_SPACES_STR(x)
+
+ static int
+__i915_gem_get_obj_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev, struct pid *tgid)
+{
+ struct drm_file *file;
+ struct drm_i915_file_private *file_priv_reqd = NULL;
+ int bytes_copy, ret = 0;
+ struct pid_stat_entry pid_entry;
+ struct name_entry *entry, *next;
+
+ pid_entry.stats.phys_space_shared_proportion = 0;
+ pid_entry.stats.phys_space_allocated_priv = 0;
+ pid_entry.tgid = tgid;
+ pid_entry.pid_num = pid_nr(tgid);
+ ret = drm_ht_create(&pid_entry.namelist, DRM_MAGIC_HASH_ORDER);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&pid_entry.namefree);
+
+ /*
+ * Fill up initial few bytes with spaces, to insert summary data later
+ * on
+ */
+ err_printf(m, "%"SPACES_STR(NUM_SPACES)"s\n", " ");
+
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct get_obj_stats_buf obj_stat_buf;
+
+ obj_stat_buf.entry = &pid_entry;
+ obj_stat_buf.m = m;
+
+ if (file_priv->tgid != tgid)
+ continue;
+
+ file_priv_reqd = file_priv;
+ spin_lock(&file->table_lock);
+ ret = idr_for_each(&file->object_idr,
+ &i915_drm_gem_obj_info, &obj_stat_buf);
+ spin_unlock(&file->table_lock);
+ if (ret)
+ break;
+ }
+
+ if (file_priv_reqd) {
+ int space_remaining;
+
+ /* Reset the bytes counter to buffer beginning */
+ bytes_copy = m->bytes;
+ m->bytes = 0;
+
+ err_printf(m, "\n PID GfxMem Process\n");
+ err_printf(m, "%5d %8zdK ", pid_nr(file_priv_reqd->tgid),
+ (pid_entry.stats.phys_space_shared_proportion +
+ pid_entry.stats.phys_space_allocated_priv)/1024);
+
+ space_remaining = NUM_SPACES - m->bytes - 1;
+ if (strlen(file_priv_reqd->process_name) > space_remaining)
+ file_priv_reqd->process_name[space_remaining] = '\0';
+
+ err_printf(m, "%s\n", file_priv_reqd->process_name);
+
+ /* Reinstate the previous saved value of bytes counter */
+ m->bytes = bytes_copy;
+ } else
+ WARN(1, "drm file corresponding to tgid:%d not found\n",
+ pid_nr(tgid));
+
+ list_for_each_entry_safe(entry, next,
+ &pid_entry.namefree, head) {
+ list_del(&entry->head);
+ drm_ht_remove_item(&pid_entry.namelist,
+ &entry->hash_item);
+ kfree(entry);
+ }
+ drm_ht_remove(&pid_entry.namelist);
+
+ if (ret)
+ return ret;
+ if (m->bytes == 0 && m->err)
+ return m->err;
+ return 0;
+}
+
+int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev)
+{
+ int ret = 0;
+
+ /*
+ * Protect the access to global drm resources such as filelist. Protect
+ * against their removal under our noses, while in use.
+ */
+ mutex_lock(&drm_global_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_unlock(&drm_global_mutex);
+ return ret;
+ }
+
+ ret = __i915_get_drm_clients_info(m, dev);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&drm_global_mutex);
+
+ return ret;
+}
+
+int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev, struct pid *tgid)
+{
+ int ret = 0;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = __i915_gem_get_obj_info(m, dev, tgid);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+#endif
+
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->cache_dirty)
@@ -1867,6 +2711,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_mmap *args = data;
struct drm_i915_gem_object *obj;
unsigned long addr;
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ int ret;
+#endif
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
@@ -1920,6 +2767,12 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
}
i915_gem_object_put(obj);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ ret = i915_obj_insert_virt_addr(obj, addr, false, false);
+ if (ret)
+ return ret;
+#endif
+
args->addr_ptr = (uint64_t) addr;
return 0;
@@ -2125,6 +2978,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ ret = i915_obj_insert_virt_addr(obj, (unsigned long)area->vm_start, true, true);
+#endif
if (ret)
goto err_fence;
@@ -2382,6 +3238,19 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED;
obj->mm.pages = ERR_PTR(-EFAULT);
+
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ /*
+ * Mark the object as not having backing pages, as physical space
+ * returned back to kernel
+ */
+ if (obj->has_backing_pages == 1) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ dev_priv->mm.phys_mem_total -= obj->base.size;
+ obj->has_backing_pages = 0;
+ }
+#endif
}
/* Try to discard unwanted pages */
@@ -2677,6 +3546,14 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ if (obj->has_backing_pages == 0) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ dev_priv->mm.phys_mem_total += obj->base.size;
+ obj->has_backing_pages = 1;
+ }
+#endif
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -4735,6 +5612,15 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ /*
+ * Mark the object as not having backing pages, as no allocation
+ * for it yet
+ */
+ obj->has_backing_pages = 0;
+ INIT_LIST_HEAD(&obj->pid_info);
+#endif
+
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
@@ -4868,6 +5754,17 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
return atomic_long_read(&obj->base.filp->f_count) == 1;
}
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+int
+i915_gem_open_object(struct drm_gem_object *gem_obj,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ return i915_gem_obj_insert_pid(obj);
+}
+#endif
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
@@ -4921,6 +5818,16 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ if (!obj->stolen && (obj->has_backing_pages == 1)) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ dev_priv->mm.phys_mem_total -= obj->base.size;
+ obj->has_backing_pages = 0;
+ }
+ i915_gem_obj_remove_all_pids(obj);
+#endif
+
reservation_object_fini(&obj->__builtin_resv);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(i915, obj->base.size);
@@ -5887,6 +6794,11 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_request *request;
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ i915_gem_remove_sysfs_file_entry(dev, file);
+ put_pid(file_priv->tgid);
+#endif
+
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
@@ -5912,15 +6824,57 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
file_priv->dev_priv = i915;
file_priv->file = file;
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ rcu_read_lock();
+ file_priv->tgid = get_pid(find_vpid(task_tgid_nr(current)));
+ rcu_read_unlock();
+
+ file_priv->process_name = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!file_priv->process_name) {
+ ret = -ENOMEM;
+ goto out_free_file;
+ }
+
+ ret = i915_get_pid_cmdline(current, file_priv->process_name);
+ if (ret)
+ goto out_free_name;
+#endif
+
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1;
file_priv->hang_timestamp = jiffies;
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ intel_runtime_pm_get(i915);
+#endif
+
ret = i915_gem_context_open(i915, file);
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ if (ret) {
+ intel_runtime_pm_put(i915);
+ goto out_free_name;
+ }
+ intel_runtime_pm_put(i915);
+
+ ret = i915_gem_create_sysfs_file_entry(&i915->drm, file);
+ if (ret) {
+ i915_gem_context_close(file);
+ goto out_free_name;
+ }
+
+ return 0;
+
+out_free_name:
+ kfree(file_priv->process_name);
+out_free_file:
+ put_pid(file_priv->tgid);
+ kfree(file_priv);
+#else
if (ret)
kfree(file_priv);
+#endif
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 83e5e01fa9ea..338709b6640e 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -147,6 +147,10 @@ struct drm_i915_gem_object {
#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
unsigned int cache_dirty:1;
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ unsigned int has_backing_pages:1;
+#endif
+
/**
* @read_domains: Read memory domains.
*
@@ -278,6 +282,10 @@ struct drm_i915_gem_object {
void *gvt_info;
};
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ struct list_head pid_info;
+#endif
+
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index ba24ac698e8b..550359f8c351 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -91,6 +91,13 @@ static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
return true;
}
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+bool i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+ return __i915_error_ok(e);
+}
+#endif
+
static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
unsigned len)
{
@@ -162,7 +169,7 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
__i915_error_advance(e, len);
}
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
+void i915_error_puts(struct drm_i915_error_state_buf *e,
const char *str)
{
unsigned len;
@@ -874,6 +881,22 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+int i915_obj_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+ size_t count)
+{
+ memset(ebuf, 0, sizeof(*ebuf));
+
+ ebuf->buf = kmalloc(count, GFP_KERNEL);
+
+ if (ebuf->buf == NULL)
+ return -ENOMEM;
+
+ ebuf->size = count;
+ return 0;
+}
+#endif
+
static void i915_error_object_free(struct drm_i915_error_object *obj)
{
int page;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 8710fb18ed74..821bed7bd375 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -307,6 +307,13 @@ struct drm_i915_error_state_buf {
};
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+void i915_error_puts(struct drm_i915_error_state_buf *e,
+ const char *str);
+bool i915_error_ok(struct drm_i915_error_state_buf *e);
+int i915_obj_state_buf_init(struct drm_i915_error_state_buf *eb,
+ size_t count);
+#endif
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6bb2b05..4ff644202743 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,10 @@
#include "intel_drv.h"
#include "i915_drv.h"
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+#include "../drm_internal.h"
+#endif
+
static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
struct drm_minor *minor = dev_get_drvdata(kdev);
@@ -571,6 +575,284 @@ static void i915_teardown_error_capture(struct device *kdev)
{
sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
+static ssize_t i915_gem_clients_state_read(struct file *filp,
+ struct kobject *memtrack_kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct kobject *kobj = memtrack_kobj->parent;
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state_buf error_str;
+ ssize_t ret_count = 0;
+ int ret;
+
+ ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
+ if (ret)
+ return ret;
+
+ ret = i915_get_drm_clients_info(&error_str, dev);
+ if (ret)
+ goto out;
+
+ ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+ memcpy(buf, error_str.buf, ret_count);
+out:
+ i915_error_state_buf_release(&error_str);
+
+ return ret ?: ret_count;
+}
+
+#define GEM_OBJ_STAT_BUF_SIZE (4*1024) /* 4KB */
+#define GEM_OBJ_STAT_BUF_SIZE_MAX (1024*1024) /* 1MB */
+
+struct i915_gem_file_attr_priv {
+ char tgid_str[16];
+ struct pid *tgid;
+ struct drm_i915_error_state_buf buf;
+};
+
+static ssize_t i915_gem_read_objects(struct file *filp,
+ struct kobject *memtrack_kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct kobject *kobj = memtrack_kobj->parent;
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = dev_to_drm_minor(kdev);
+ struct drm_device *dev = minor->dev;
+ struct i915_gem_file_attr_priv *attr_priv;
+ struct pid *tgid;
+ ssize_t ret_count = 0;
+ long bytes_available;
+ int ret = 0, buf_size = GEM_OBJ_STAT_BUF_SIZE;
+ unsigned long timeout = msecs_to_jiffies(500) + 1;
+
+ /*
+ * There may arise a scenario where syfs file entry is being removed,
+ * and may race against sysfs read. Sysfs file remove function would
+ * have taken the drm_global_mutex and would wait for read to finish,
+ * which is again waiting to acquire drm_global_mutex, leading to
+ * deadlock. To avoid this, use mutex_trylock here with a timeout.
+ */
+ while (!mutex_trylock(&drm_global_mutex) && --timeout)
+ schedule_timeout_killable(1);
+ if (timeout == 0) {
+ DRM_DEBUG_DRIVER("Unable to acquire drm global mutex.\n");
+ return -EBUSY;
+ }
+
+ if (!attr || !attr->private) {
+ ret = -EINVAL;
+ DRM_ERROR("attr | attr->private pointer is NULL\n");
+ goto out;
+ }
+ attr_priv = attr->private;
+ tgid = attr_priv->tgid;
+
+ if (off && !attr_priv->buf.buf) {
+ ret = -EINVAL;
+ DRM_ERROR(
+ "Buf not allocated during read with non-zero offset\n");
+ goto out;
+ }
+
+ if (off == 0) {
+retry:
+ if (!attr_priv->buf.buf) {
+ ret = i915_obj_state_buf_init(&attr_priv->buf,
+ buf_size);
+ if (ret) {
+ DRM_ERROR(
+ "obj state buf init failed. buf_size=%d\n",
+ buf_size);
+ goto out;
+ }
+ } else {
+ /* Reset the buf parameters before filling data */
+ attr_priv->buf.pos = 0;
+ attr_priv->buf.bytes = 0;
+ }
+
+ /* Read the gfx device stats */
+ ret = i915_gem_get_obj_info(&attr_priv->buf, dev, tgid);
+ if (ret)
+ goto out;
+
+ ret = i915_error_ok(&attr_priv->buf);
+ if (ret) {
+ ret = 0;
+ goto copy_data;
+ }
+ if (buf_size >= GEM_OBJ_STAT_BUF_SIZE_MAX) {
+ DRM_DEBUG_DRIVER("obj stat buf size limit reached\n");
+ ret = -ENOMEM;
+ goto out;
+ } else {
+ /* Try to reallocate buf of larger size */
+ i915_error_state_buf_release(&attr_priv->buf);
+ buf_size *= 2;
+
+ ret = i915_obj_state_buf_init(&attr_priv->buf,
+ buf_size);
+ if (ret) {
+ DRM_ERROR(
+ "obj stat buf init failed. buf_size=%d\n",
+ buf_size);
+ goto out;
+ }
+ goto retry;
+ }
+ }
+copy_data:
+
+ bytes_available = (long)attr_priv->buf.bytes - (long)off;
+
+ if (bytes_available > 0) {
+ ret_count = count < bytes_available ? count : bytes_available;
+ memcpy(buf, attr_priv->buf.buf + off, ret_count);
+ } else
+ ret_count = 0;
+
+out:
+ mutex_unlock(&drm_global_mutex);
+
+ return ret ?: ret_count;
+}
+
+int i915_gem_create_sysfs_file_entry(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_gem_file_attr_priv *attr_priv;
+ struct bin_attribute *obj_attr;
+ struct drm_file *file_local;
+ int ret;
+
+ /*
+ * Check for multiple drm files having same tgid. If found, copy the
+ * bin attribute into the new file priv. Otherwise allocate a new
+ * copy of bin attribute, and create its corresponding sysfs file.
+ */
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(file_local, &dev->filelist, lhead) {
+ struct drm_i915_file_private *file_priv_local =
+ file_local->driver_priv;
+
+ if (file_priv->tgid == file_priv_local->tgid) {
+ file_priv->obj_attr = file_priv_local->obj_attr;
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ obj_attr = kzalloc(sizeof(*obj_attr), GFP_KERNEL);
+ if (!obj_attr) {
+ DRM_ERROR("Alloc failed. Out of memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ attr_priv = kzalloc(sizeof(*attr_priv), GFP_KERNEL);
+ if (!attr_priv) {
+ DRM_ERROR("Alloc failed. Out of memory\n");
+ ret = -ENOMEM;
+ goto out_obj_attr;
+ }
+
+ snprintf(attr_priv->tgid_str, 16, "%d", task_tgid_nr(current));
+ obj_attr->attr.name = attr_priv->tgid_str;
+ obj_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ obj_attr->size = 0;
+ obj_attr->read = i915_gem_read_objects;
+
+ attr_priv->tgid = file_priv->tgid;
+ obj_attr->private = attr_priv;
+
+ ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj,
+ obj_attr);
+ if (ret) {
+ DRM_ERROR(
+ "sysfs tgid file setup failed. tgid=%d, process:%s, ret:%d\n",
+ pid_nr(file_priv->tgid), file_priv->process_name, ret);
+
+ goto out_attr_priv;
+ }
+
+ file_priv->obj_attr = obj_attr;
+ return 0;
+
+out_attr_priv:
+ kfree(attr_priv);
+out_obj_attr:
+ kfree(obj_attr);
+out:
+ return ret;
+}
+
+void i915_gem_remove_sysfs_file_entry(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_file *file_local;
+ int open_count = 1;
+
+ /*
+ * The current drm file instance is already removed from filelist at
+ * this point.
+ * Check if this particular drm file being removed is the last one for
+ * that particular tgid, and no other instances for this tgid exist in
+ * the filelist. If so, remove the corresponding sysfs file entry also.
+ */
+ list_for_each_entry(file_local, &dev->filelist, lhead) {
+ struct drm_i915_file_private *file_priv_local =
+ file_local->driver_priv;
+
+ if (pid_nr(file_priv->tgid) == pid_nr(file_priv_local->tgid))
+ open_count++;
+ }
+
+ if (open_count == 1) {
+ struct i915_gem_file_attr_priv *attr_priv;
+
+ if (WARN_ON(file_priv->obj_attr == NULL))
+ return;
+ attr_priv = file_priv->obj_attr->private;
+
+ sysfs_remove_bin_file(&dev_priv->memtrack_kobj,
+ file_priv->obj_attr);
+
+ i915_error_state_buf_release(&attr_priv->buf);
+ kfree(file_priv->obj_attr->private);
+ kfree(file_priv->obj_attr);
+ }
+}
+
+static struct bin_attribute i915_gem_client_state_attr = {
+ .attr.name = "i915_gem_meminfo",
+ .attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH,
+ .size = 0,
+ .read = i915_gem_clients_state_read,
+};
+
+static struct attribute *memtrack_kobj_attrs[] = {NULL};
+
+static struct kobj_type memtrack_kobj_type = {
+ .release = NULL,
+ .sysfs_ops = NULL,
+ .default_attrs = memtrack_kobj_attrs,
+};
+#endif
#else
static void i915_setup_error_capture(struct device *kdev) {}
static void i915_teardown_error_capture(struct device *kdev) {}
@@ -623,6 +905,28 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
DRM_ERROR("RPS sysfs setup failed\n");
i915_setup_error_capture(kdev);
+
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ /*
+ * Create the gfx_memtrack directory for memtrack sysfs files
+ */
+ ret = kobject_init_and_add(
+ &dev_priv->memtrack_kobj, &memtrack_kobj_type,
+ &kdev->kobj, "gfx_memtrack");
+ if (unlikely(ret != 0)) {
+ DRM_ERROR(
+ "i915 sysfs setup memtrack directory failed\n"
+ );
+ kobject_put(&dev_priv->memtrack_kobj);
+ } else {
+ ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj,
+ &i915_gem_client_state_attr);
+ if (ret)
+ DRM_ERROR(
+ "i915_gem_client_state sysfs setup failed\n"
+ );
+ }
+#endif
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
@@ -641,4 +945,11 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
#endif
+
+#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK)
+ sysfs_remove_bin_file(&dev_priv->memtrack_kobj,
+ &i915_gem_client_state_attr);
+ kobject_del(&dev_priv->memtrack_kobj);
+ kobject_put(&dev_priv->memtrack_kobj);
+#endif
}
--
https://clearlinux.org