From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Large amounts of VRAM are usually not CPU accessible, so they are not mapped
into the processes address space. But since the device drivers usually support
swapping buffers from VRAM to system memory we can still run into an out of
memory situation when userspace starts to allocate to much.
This patch gives the OOM killer another hint which process is
holding references to memory resources.
A GEM helper is provided and automatically used for all drivers using the
DEFINE_DRM_GEM_FOPS() and DEFINE_DRM_GEM_CMA_FOPS() macros.
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
---
drivers/gpu/drm/drm_file.c | 24 ++++++++++++++++++++++++
drivers/gpu/drm/drm_gem.c | 5 +++++
include/drm/drm_file.h | 9 +++++++++
include/drm/drm_gem.h | 1 +
include/drm/drm_gem_cma_helper.h | 1 +
5 files changed, 40 insertions(+)
@@ -1049,3 +1049,27 @@ unsigned long drm_get_unmapped_area(struct file *file,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
#endif /* CONFIG_MMU */
+
+
+/**
+ * drm_file_rss() - get number of pages held by struct drm_file
+ * @f: struct drm_file to get the number of pages for
+ *
+ * Return how many pages are allocated for this client.
+ */
+long drm_file_rss(struct file *f)
+{
+
+ struct drm_file *file_priv = f->private_data;
+
+ if (!file_priv)
+ return 0;
+
+ /*
+ * Since DRM file descriptors are often DUP()ed divide by the file count
+ * reference so that each descriptor gets an equal share.
+ */
+ return DIV_ROUND_UP(atomic_long_read(&file_priv->f_rss),
+ file_count(f));
+}
+EXPORT_SYMBOL(drm_file_rss);
@@ -256,6 +256,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
+ atomic_long_sub(obj->size >> PAGE_SHIFT, &file_priv->f_rss);
drm_gem_object_handle_put_unlocked(obj);
return 0;
@@ -291,6 +292,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
+ atomic_long_sub(obj->size >> PAGE_SHIFT, &filp->f_rss);
+
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_delete);
@@ -399,6 +402,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
}
*handlep = handle;
+
+ atomic_long_add(obj->size >> PAGE_SHIFT, &file_priv->f_rss);
return 0;
err_revoke:
@@ -366,6 +366,13 @@ struct drm_file {
#if IS_ENABLED(CONFIG_DRM_LEGACY)
unsigned long lock_count; /* DRI1 legacy lock count */
#endif
+
+ /**
+ * @f_rss:
+ *
+ * How many pages are allocated through this driver connection.
+ */
+ atomic_long_t f_rss;
};
/**
@@ -430,4 +437,6 @@ unsigned long drm_get_unmapped_area(struct file *file,
#endif /* CONFIG_MMU */
+long drm_file_rss(struct file *f);
+
#endif /* _DRM_FILE_H_ */
@@ -338,6 +338,7 @@ struct drm_gem_object {
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_mmap,\
+ .file_rss = drm_file_rss,\
}
void drm_gem_object_release(struct drm_gem_object *obj);
@@ -273,6 +273,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_mmap,\
+ .file_rss = drm_file_rss,\
DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
}