mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-21 06:50:25 +00:00
drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list
Currently we create a new mmap_offset for every call to
mmap_offset_ioctl. This exposes ourselves to an abusive client that may
simply create new mmap_offsets ad infinitum, which will exhaust physical
memory and the virtual address space. In addition to the exhaustion, a
very long linear list of mmap_offsets causes other clients using the
object to incur long list walks -- these long lists can also be
generated by simply having many clients generate their own mmap_offset.
However, we can simply use the drm_vma_node itself to manage the file
association (allow/revoke) dropping our need to keep an mmo per-file.
Then if we keep a small rbtree of per-type mmap_offsets, we can lookup
duplicate requests quickly.
Fixes: cc662126b4
("drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Reviewed-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200120104924.4000706-3-chris@chris-wilson.co.uk
This commit is contained in:
parent
814df0ce5e
commit
7865559872
3 changed files with 85 additions and 29 deletions
|
@ -455,10 +455,11 @@ out:
|
||||||
|
|
||||||
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
|
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct i915_mmap_offset *mmo;
|
struct i915_mmap_offset *mmo, *mn;
|
||||||
|
|
||||||
spin_lock(&obj->mmo.lock);
|
spin_lock(&obj->mmo.lock);
|
||||||
list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
|
rbtree_postorder_for_each_entry_safe(mmo, mn,
|
||||||
|
&obj->mmo.offsets, offset) {
|
||||||
/*
|
/*
|
||||||
* vma_node_unmap for GTT mmaps handled already in
|
* vma_node_unmap for GTT mmaps handled already in
|
||||||
* __i915_gem_object_release_mmap_gtt
|
* __i915_gem_object_release_mmap_gtt
|
||||||
|
@ -487,6 +488,67 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
|
||||||
i915_gem_object_release_mmap_offset(obj);
|
i915_gem_object_release_mmap_offset(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct i915_mmap_offset *
|
||||||
|
lookup_mmo(struct drm_i915_gem_object *obj,
|
||||||
|
enum i915_mmap_type mmap_type)
|
||||||
|
{
|
||||||
|
struct rb_node *rb;
|
||||||
|
|
||||||
|
spin_lock(&obj->mmo.lock);
|
||||||
|
rb = obj->mmo.offsets.rb_node;
|
||||||
|
while (rb) {
|
||||||
|
struct i915_mmap_offset *mmo =
|
||||||
|
rb_entry(rb, typeof(*mmo), offset);
|
||||||
|
|
||||||
|
if (mmo->mmap_type == mmap_type) {
|
||||||
|
spin_unlock(&obj->mmo.lock);
|
||||||
|
return mmo;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mmo->mmap_type < mmap_type)
|
||||||
|
rb = rb->rb_right;
|
||||||
|
else
|
||||||
|
rb = rb->rb_left;
|
||||||
|
}
|
||||||
|
spin_unlock(&obj->mmo.lock);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct i915_mmap_offset *
|
||||||
|
insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
|
||||||
|
{
|
||||||
|
struct rb_node *rb, **p;
|
||||||
|
|
||||||
|
spin_lock(&obj->mmo.lock);
|
||||||
|
rb = NULL;
|
||||||
|
p = &obj->mmo.offsets.rb_node;
|
||||||
|
while (*p) {
|
||||||
|
struct i915_mmap_offset *pos;
|
||||||
|
|
||||||
|
rb = *p;
|
||||||
|
pos = rb_entry(rb, typeof(*pos), offset);
|
||||||
|
|
||||||
|
if (pos->mmap_type == mmo->mmap_type) {
|
||||||
|
spin_unlock(&obj->mmo.lock);
|
||||||
|
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
|
||||||
|
&mmo->vma_node);
|
||||||
|
kfree(mmo);
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pos->mmap_type < mmo->mmap_type)
|
||||||
|
p = &rb->rb_right;
|
||||||
|
else
|
||||||
|
p = &rb->rb_left;
|
||||||
|
}
|
||||||
|
rb_link_node(&mmo->offset, rb, p);
|
||||||
|
rb_insert_color(&mmo->offset, &obj->mmo.offsets);
|
||||||
|
spin_unlock(&obj->mmo.lock);
|
||||||
|
|
||||||
|
return mmo;
|
||||||
|
}
|
||||||
|
|
||||||
static struct i915_mmap_offset *
|
static struct i915_mmap_offset *
|
||||||
mmap_offset_attach(struct drm_i915_gem_object *obj,
|
mmap_offset_attach(struct drm_i915_gem_object *obj,
|
||||||
enum i915_mmap_type mmap_type,
|
enum i915_mmap_type mmap_type,
|
||||||
|
@ -496,20 +558,22 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
|
||||||
struct i915_mmap_offset *mmo;
|
struct i915_mmap_offset *mmo;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
mmo = lookup_mmo(obj, mmap_type);
|
||||||
|
if (mmo)
|
||||||
|
goto out;
|
||||||
|
|
||||||
mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
|
mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
|
||||||
if (!mmo)
|
if (!mmo)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
mmo->obj = obj;
|
mmo->obj = obj;
|
||||||
mmo->dev = obj->base.dev;
|
|
||||||
mmo->file = file;
|
|
||||||
mmo->mmap_type = mmap_type;
|
mmo->mmap_type = mmap_type;
|
||||||
drm_vma_node_reset(&mmo->vma_node);
|
drm_vma_node_reset(&mmo->vma_node);
|
||||||
|
|
||||||
err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
|
err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
|
||||||
obj->base.size / PAGE_SIZE);
|
&mmo->vma_node, obj->base.size / PAGE_SIZE);
|
||||||
if (likely(!err))
|
if (likely(!err))
|
||||||
goto out;
|
goto insert;
|
||||||
|
|
||||||
/* Attempt to reap some mmap space from dead objects */
|
/* Attempt to reap some mmap space from dead objects */
|
||||||
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
|
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
|
||||||
|
@ -517,19 +581,17 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
i915_gem_drain_freed_objects(i915);
|
i915_gem_drain_freed_objects(i915);
|
||||||
err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
|
err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
|
||||||
obj->base.size / PAGE_SIZE);
|
&mmo->vma_node, obj->base.size / PAGE_SIZE);
|
||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
insert:
|
||||||
|
mmo = insert_mmo(obj, mmo);
|
||||||
|
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
|
||||||
out:
|
out:
|
||||||
if (file)
|
if (file)
|
||||||
drm_vma_node_allow(&mmo->vma_node, file);
|
drm_vma_node_allow(&mmo->vma_node, file);
|
||||||
|
|
||||||
spin_lock(&obj->mmo.lock);
|
|
||||||
list_add(&mmo->offset, &obj->mmo.offsets);
|
|
||||||
spin_unlock(&obj->mmo.lock);
|
|
||||||
|
|
||||||
return mmo;
|
return mmo;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
|
|
@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||||
INIT_LIST_HEAD(&obj->lut_list);
|
INIT_LIST_HEAD(&obj->lut_list);
|
||||||
|
|
||||||
spin_lock_init(&obj->mmo.lock);
|
spin_lock_init(&obj->mmo.lock);
|
||||||
INIT_LIST_HEAD(&obj->mmo.offsets);
|
obj->mmo.offsets = RB_ROOT;
|
||||||
|
|
||||||
init_rcu_head(&obj->rcu);
|
init_rcu_head(&obj->rcu);
|
||||||
|
|
||||||
|
@ -100,8 +100,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj = to_intel_bo(gem);
|
struct drm_i915_gem_object *obj = to_intel_bo(gem);
|
||||||
struct drm_i915_file_private *fpriv = file->driver_priv;
|
struct drm_i915_file_private *fpriv = file->driver_priv;
|
||||||
|
struct i915_mmap_offset *mmo, *mn;
|
||||||
struct i915_lut_handle *lut, *ln;
|
struct i915_lut_handle *lut, *ln;
|
||||||
struct i915_mmap_offset *mmo;
|
|
||||||
LIST_HEAD(close);
|
LIST_HEAD(close);
|
||||||
|
|
||||||
i915_gem_object_lock(obj);
|
i915_gem_object_lock(obj);
|
||||||
|
@ -117,14 +117,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
||||||
i915_gem_object_unlock(obj);
|
i915_gem_object_unlock(obj);
|
||||||
|
|
||||||
spin_lock(&obj->mmo.lock);
|
spin_lock(&obj->mmo.lock);
|
||||||
list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
|
rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
|
||||||
if (mmo->file != file)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
spin_unlock(&obj->mmo.lock);
|
|
||||||
drm_vma_node_revoke(&mmo->vma_node, file);
|
drm_vma_node_revoke(&mmo->vma_node, file);
|
||||||
spin_lock(&obj->mmo.lock);
|
|
||||||
}
|
|
||||||
spin_unlock(&obj->mmo.lock);
|
spin_unlock(&obj->mmo.lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(lut, ln, &close, obj_link) {
|
list_for_each_entry_safe(lut, ln, &close, obj_link) {
|
||||||
|
@ -203,12 +197,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||||
|
|
||||||
i915_gem_object_release_mmap(obj);
|
i915_gem_object_release_mmap(obj);
|
||||||
|
|
||||||
list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
|
rbtree_postorder_for_each_entry_safe(mmo, mn,
|
||||||
|
&obj->mmo.offsets,
|
||||||
|
offset) {
|
||||||
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
|
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
|
||||||
&mmo->vma_node);
|
&mmo->vma_node);
|
||||||
kfree(mmo);
|
kfree(mmo);
|
||||||
}
|
}
|
||||||
INIT_LIST_HEAD(&obj->mmo.offsets);
|
obj->mmo.offsets = RB_ROOT;
|
||||||
|
|
||||||
GEM_BUG_ON(atomic_read(&obj->bind_count));
|
GEM_BUG_ON(atomic_read(&obj->bind_count));
|
||||||
GEM_BUG_ON(obj->userfault_count);
|
GEM_BUG_ON(obj->userfault_count);
|
||||||
|
|
|
@ -71,13 +71,11 @@ enum i915_mmap_type {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct i915_mmap_offset {
|
struct i915_mmap_offset {
|
||||||
struct drm_device *dev;
|
|
||||||
struct drm_vma_offset_node vma_node;
|
struct drm_vma_offset_node vma_node;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
struct drm_file *file;
|
|
||||||
enum i915_mmap_type mmap_type;
|
enum i915_mmap_type mmap_type;
|
||||||
|
|
||||||
struct list_head offset;
|
struct rb_node offset;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_i915_gem_object {
|
struct drm_i915_gem_object {
|
||||||
|
@ -137,7 +135,7 @@ struct drm_i915_gem_object {
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
spinlock_t lock; /* Protects access to mmo offsets */
|
spinlock_t lock; /* Protects access to mmo offsets */
|
||||||
struct list_head offsets;
|
struct rb_root offsets;
|
||||||
} mmo;
|
} mmo;
|
||||||
|
|
||||||
I915_SELFTEST_DECLARE(struct list_head st_link);
|
I915_SELFTEST_DECLARE(struct list_head st_link);
|
||||||
|
|
Loading…
Add table
Reference in a new issue