2019-05-28 10:29:46 +01:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2014-2016 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_gem_object.h"
|
2019-05-28 10:29:50 +01:00
|
|
|
#include "i915_scatterlist.h"
|
2019-10-25 16:37:24 +01:00
|
|
|
#include "i915_gem_lmem.h"
|
2019-12-04 12:00:32 +00:00
|
|
|
#include "i915_gem_mman.h"
|
2019-05-28 10:29:46 +01:00
|
|
|
|
|
|
|
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages,
|
|
|
|
unsigned int sg_page_sizes)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
unsigned long supported = INTEL_INFO(i915)->page_sizes;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
lockdep_assert_held(&obj->mm.lock);
|
|
|
|
|
2019-10-08 17:01:16 +01:00
|
|
|
if (i915_gem_object_is_volatile(obj))
|
|
|
|
obj->mm.madv = I915_MADV_DONTNEED;
|
|
|
|
|
2019-05-28 10:29:46 +01:00
|
|
|
/* Make the pages coherent with the GPU (flushing any swapin). */
|
|
|
|
if (obj->cache_dirty) {
|
|
|
|
obj->write_domain = 0;
|
|
|
|
if (i915_gem_object_has_struct_page(obj))
|
|
|
|
drm_clflush_sg(pages);
|
|
|
|
obj->cache_dirty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj->mm.get_page.sg_pos = pages->sgl;
|
|
|
|
obj->mm.get_page.sg_idx = 0;
|
|
|
|
|
|
|
|
obj->mm.pages = pages;
|
|
|
|
|
|
|
|
if (i915_gem_object_is_tiled(obj) &&
|
|
|
|
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
|
|
|
GEM_BUG_ON(obj->mm.quirked);
|
|
|
|
__i915_gem_object_pin_pages(obj);
|
|
|
|
obj->mm.quirked = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_BUG_ON(!sg_page_sizes);
|
|
|
|
obj->mm.page_sizes.phys = sg_page_sizes;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the supported page-sizes which fit into the given
|
|
|
|
* sg_page_sizes. This will give us the page-sizes which we may be able
|
|
|
|
* to use opportunistically when later inserting into the GTT. For
|
|
|
|
* example if phys=2G, then in theory we should be able to use 1G, 2M,
|
|
|
|
* 64K or 4K pages, although in practice this will depend on a number of
|
|
|
|
* other factors.
|
|
|
|
*/
|
|
|
|
obj->mm.page_sizes.sg = 0;
|
|
|
|
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
|
|
|
|
if (obj->mm.page_sizes.phys & ~0u << i)
|
|
|
|
obj->mm.page_sizes.sg |= BIT(i);
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
|
|
|
|
|
2019-05-30 21:35:00 +01:00
|
|
|
if (i915_gem_object_is_shrinkable(obj)) {
|
2019-06-12 11:57:20 +01:00
|
|
|
struct list_head *list;
|
2019-06-10 15:54:30 +01:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
|
|
|
|
2019-05-30 21:35:00 +01:00
|
|
|
i915->mm.shrink_count++;
|
|
|
|
i915->mm.shrink_memory += obj->base.size;
|
2019-06-12 11:57:20 +01:00
|
|
|
|
|
|
|
if (obj->mm.madv != I915_MADV_WILLNEED)
|
|
|
|
list = &i915->mm.purge_list;
|
|
|
|
else
|
|
|
|
list = &i915->mm.shrink_list;
|
|
|
|
list_add_tail(&obj->mm.link, list);
|
2019-06-10 15:54:30 +01:00
|
|
|
|
2019-09-10 22:22:04 +01:00
|
|
|
atomic_set(&obj->mm.shrink_pin, 0);
|
2019-06-10 15:54:30 +01:00
|
|
|
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
2019-05-30 21:35:00 +01:00
|
|
|
}
|
2019-05-28 10:29:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2020-01-22 15:57:50 +03:00
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
2019-05-28 10:29:46 +01:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
|
2020-01-22 15:57:50 +03:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"Attempting to obtain a purgeable object\n");
|
2019-05-28 10:29:46 +01:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = obj->ops->get_pages(obj);
|
|
|
|
GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure that the associated pages are gathered from the backing storage
|
|
|
|
* and pinned into our object. i915_gem_object_pin_pages() may be called
|
|
|
|
* multiple times before they are released by a single call to
|
|
|
|
* i915_gem_object_unpin_pages() - once the pages are no longer referenced
|
|
|
|
* either as a result of memory pressure (reaping pages under the shrinker)
|
|
|
|
* or as the object is itself released.
|
|
|
|
*/
|
|
|
|
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 10:01:48 +01:00
|
|
|
err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
|
2019-05-28 10:29:46 +01:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
|
|
|
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
|
|
|
|
|
|
|
err = ____i915_gem_object_get_pages(obj);
|
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
smp_mb__before_atomic();
|
|
|
|
}
|
|
|
|
atomic_inc(&obj->mm.pages_pin_count);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&obj->mm.lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Immediately discard the backing storage */
|
|
|
|
void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
drm_gem_free_mmap_offset(&obj->base);
|
|
|
|
if (obj->ops->truncate)
|
|
|
|
obj->ops->truncate(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to discard unwanted pages */
|
|
|
|
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&obj->mm.lock);
|
|
|
|
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
|
|
|
|
|
|
|
if (obj->ops->writeback)
|
|
|
|
obj->ops->writeback(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct radix_tree_iter iter;
|
|
|
|
void __rcu **slot;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
|
|
|
|
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2019-10-25 16:37:24 +01:00
|
|
|
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
|
|
|
|
{
|
2020-01-02 20:42:15 +00:00
|
|
|
if (is_vmalloc_addr(ptr))
|
2019-10-25 16:37:24 +01:00
|
|
|
vunmap(ptr);
|
|
|
|
}
|
|
|
|
|
2019-05-28 10:29:46 +01:00
|
|
|
struct sg_table *
|
|
|
|
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct sg_table *pages;
|
|
|
|
|
|
|
|
pages = fetch_and_zero(&obj->mm.pages);
|
|
|
|
if (IS_ERR_OR_NULL(pages))
|
|
|
|
return pages;
|
|
|
|
|
2019-10-08 17:01:16 +01:00
|
|
|
if (i915_gem_object_is_volatile(obj))
|
|
|
|
obj->mm.madv = I915_MADV_WILLNEED;
|
|
|
|
|
2019-08-02 22:21:36 +01:00
|
|
|
i915_gem_object_make_unshrinkable(obj);
|
2019-05-28 10:29:46 +01:00
|
|
|
|
|
|
|
if (obj->mm.mapping) {
|
2019-10-25 16:37:24 +01:00
|
|
|
unmap_object(obj, page_mask_bits(obj->mm.mapping));
|
2019-05-28 10:29:46 +01:00
|
|
|
obj->mm.mapping = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
__i915_gem_object_reset_page_iter(obj);
|
|
|
|
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 10:01:48 +01:00
|
|
|
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
|
2019-05-28 10:29:46 +01:00
|
|
|
{
|
|
|
|
struct sg_table *pages;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (i915_gem_object_has_pinned_pages(obj))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/* May be called by shrinker from within get_pages() (on another bo) */
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 10:01:48 +01:00
|
|
|
mutex_lock(&obj->mm.lock);
|
2019-05-28 10:29:46 +01:00
|
|
|
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2019-12-04 12:00:32 +00:00
|
|
|
i915_gem_object_release_mmap_offset(obj);
|
|
|
|
|
2019-05-28 10:29:46 +01:00
|
|
|
/*
|
|
|
|
* ->put_pages might need to allocate memory for the bit17 swizzle
|
|
|
|
* array, hence protect them from being reaped by removing them from gtt
|
|
|
|
* lists early.
|
|
|
|
*/
|
|
|
|
pages = __i915_gem_object_unset_pages(obj);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX Temporary hijinx to avoid updating all backends to handle
|
|
|
|
* NULL pages. In the future, when we have more asynchronous
|
|
|
|
* get_pages backends we should be better able to handle the
|
|
|
|
* cancellation of the async task in a more uniform manner.
|
|
|
|
*/
|
|
|
|
if (!pages && !i915_gem_object_needs_async_cancel(obj))
|
|
|
|
pages = ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
if (!IS_ERR(pages))
|
|
|
|
obj->ops->put_pages(obj, pages);
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&obj->mm.lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The 'mapping' part of i915_gem_object_pin_map() below */
|
2020-10-17 16:15:28 -07:00
|
|
|
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
|
|
|
|
enum i915_map_type type)
|
2019-05-28 10:29:46 +01:00
|
|
|
{
|
2020-10-17 16:15:28 -07:00
|
|
|
unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
|
|
|
|
struct page *stack[32], **pages = stack, *page;
|
|
|
|
struct sgt_iter iter;
|
2019-05-28 10:29:46 +01:00
|
|
|
pgprot_t pgprot;
|
2020-10-17 16:15:28 -07:00
|
|
|
void *vaddr;
|
2019-10-25 16:37:24 +01:00
|
|
|
|
2020-10-17 16:15:28 -07:00
|
|
|
switch (type) {
|
|
|
|
default:
|
|
|
|
MISSING_CASE(type);
|
|
|
|
fallthrough; /* to use PAGE_KERNEL anyway */
|
|
|
|
case I915_MAP_WB:
|
2020-09-15 10:14:15 +01:00
|
|
|
/*
|
|
|
|
* On 32b, highmem using a finite set of indirect PTE (i.e.
|
|
|
|
* vmap) to provide virtual mappings of the high pages.
|
|
|
|
* As these are finite, map_new_virtual() must wait for some
|
|
|
|
* other kmap() to finish when it runs out. If we map a large
|
|
|
|
* number of objects, there is no method for it to tell us
|
|
|
|
* to release the mappings, and we deadlock.
|
|
|
|
*
|
|
|
|
* However, if we make an explicit vmap of the page, that
|
|
|
|
* uses a larger vmalloc arena, and also has the ability
|
|
|
|
* to tell us to release unwanted mappings. Most importantly,
|
|
|
|
* it will fail and propagate an error instead of waiting
|
|
|
|
* forever.
|
|
|
|
*
|
|
|
|
* So if the page is beyond the 32b boundary, make an explicit
|
2020-10-17 16:15:25 -07:00
|
|
|
* vmap.
|
2020-09-15 10:14:15 +01:00
|
|
|
*/
|
2020-10-17 16:15:28 -07:00
|
|
|
if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
|
|
|
|
return page_address(sg_page(obj->mm.pages->sgl));
|
2019-05-28 10:29:46 +01:00
|
|
|
pgprot = PAGE_KERNEL;
|
|
|
|
break;
|
|
|
|
case I915_MAP_WC:
|
|
|
|
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-10-17 16:15:28 -07:00
|
|
|
if (n_pages > ARRAY_SIZE(stack)) {
|
|
|
|
/* Too big for stack -- allocate temporary array instead */
|
|
|
|
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
|
|
|
|
if (!pages)
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-01-02 20:42:15 +00:00
|
|
|
|
2020-10-17 16:15:28 -07:00
|
|
|
i = 0;
|
|
|
|
for_each_sgt_page(page, iter, obj->mm.pages)
|
|
|
|
pages[i++] = page;
|
|
|
|
vaddr = vmap(pages, n_pages, 0, pgprot);
|
|
|
|
if (pages != stack)
|
|
|
|
kvfree(pages);
|
|
|
|
return vaddr;
|
|
|
|
}
|
2020-01-02 20:42:15 +00:00
|
|
|
|
2020-10-17 16:15:28 -07:00
|
|
|
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
|
|
|
|
enum i915_map_type type)
|
|
|
|
{
|
|
|
|
resource_size_t iomap = obj->mm.region->iomap.base -
|
|
|
|
obj->mm.region->region.start;
|
|
|
|
unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
|
|
|
|
unsigned long stack[32], *pfns = stack, i;
|
|
|
|
struct sgt_iter iter;
|
|
|
|
dma_addr_t addr;
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
if (type != I915_MAP_WC)
|
|
|
|
return NULL;
|
2020-01-02 20:42:15 +00:00
|
|
|
|
2020-10-17 16:15:28 -07:00
|
|
|
if (n_pfn > ARRAY_SIZE(stack)) {
|
|
|
|
/* Too big for stack -- allocate temporary array instead */
|
|
|
|
pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
|
|
|
|
if (!pfns)
|
|
|
|
return NULL;
|
2020-01-02 20:42:15 +00:00
|
|
|
}
|
|
|
|
|
2020-10-17 16:15:28 -07:00
|
|
|
i = 0;
|
|
|
|
for_each_sgt_daddr(addr, iter, obj->mm.pages)
|
|
|
|
pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
|
|
|
|
vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
|
|
|
|
if (pfns != stack)
|
|
|
|
kvfree(pfns);
|
|
|
|
return vaddr;
|
2019-05-28 10:29:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* get, pin, and map the pages of the object into kernel space */
|
|
|
|
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|
|
|
enum i915_map_type type)
|
|
|
|
{
|
|
|
|
enum i915_map_type has_type;
|
2019-10-25 16:37:24 +01:00
|
|
|
unsigned int flags;
|
2019-05-28 10:29:46 +01:00
|
|
|
bool pinned;
|
|
|
|
void *ptr;
|
|
|
|
int err;
|
|
|
|
|
2019-10-25 16:37:24 +01:00
|
|
|
flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
|
|
|
|
if (!i915_gem_object_type_has(obj, flags))
|
2019-05-28 10:29:46 +01:00
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 10:01:48 +01:00
|
|
|
err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
|
2019-05-28 10:29:46 +01:00
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
pinned = !(type & I915_MAP_OVERRIDE);
|
|
|
|
type &= ~I915_MAP_OVERRIDE;
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
|
|
|
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
|
|
|
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
|
|
|
|
|
|
|
err = ____i915_gem_object_get_pages(obj);
|
|
|
|
if (err)
|
|
|
|
goto err_unlock;
|
|
|
|
|
|
|
|
smp_mb__before_atomic();
|
|
|
|
}
|
|
|
|
atomic_inc(&obj->mm.pages_pin_count);
|
|
|
|
pinned = false;
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
|
|
|
|
|
|
|
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
|
|
|
if (ptr && has_type != type) {
|
|
|
|
if (pinned) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
|
2019-10-25 16:37:24 +01:00
|
|
|
unmap_object(obj, ptr);
|
2019-05-28 10:29:46 +01:00
|
|
|
|
|
|
|
ptr = obj->mm.mapping = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ptr) {
|
2020-10-17 16:15:28 -07:00
|
|
|
if (GEM_WARN_ON(type == I915_MAP_WC &&
|
|
|
|
!static_cpu_has(X86_FEATURE_PAT)))
|
|
|
|
ptr = NULL;
|
|
|
|
else if (i915_gem_object_has_struct_page(obj))
|
|
|
|
ptr = i915_gem_object_map_page(obj, type);
|
|
|
|
else
|
|
|
|
ptr = i915_gem_object_map_pfn(obj, type);
|
2019-05-28 10:29:46 +01:00
|
|
|
if (!ptr) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj->mm.mapping = page_pack_bits(ptr, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&obj->mm.lock);
|
|
|
|
return ptr;
|
|
|
|
|
|
|
|
err_unpin:
|
|
|
|
atomic_dec(&obj->mm.pages_pin_count);
|
|
|
|
err_unlock:
|
|
|
|
ptr = ERR_PTR(err);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long offset,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
enum i915_map_type has_type;
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
|
|
|
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
|
|
|
|
offset, size, obj->base.size));
|
|
|
|
|
2020-05-11 15:13:03 +01:00
|
|
|
wmb(); /* let all previous writes be visible to coherent partners */
|
2019-05-28 10:29:46 +01:00
|
|
|
obj->mm.dirty = true;
|
|
|
|
|
|
|
|
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
|
|
|
if (has_type == I915_MAP_WC)
|
|
|
|
return;
|
|
|
|
|
|
|
|
drm_clflush_virt_range(ptr + offset, size);
|
|
|
|
if (size == obj->base.size) {
|
|
|
|
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
|
|
|
|
obj->cache_dirty = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-08 18:37:47 +01:00
|
|
|
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!obj->mm.mapping);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We allow removing the mapping from underneath pinned pages!
|
|
|
|
*
|
|
|
|
* Furthermore, since this is an unsafe operation reserved only
|
|
|
|
* for construction time manipulation, we ignore locking prudence.
|
|
|
|
*/
|
|
|
|
unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
}
|
|
|
|
|
2019-05-28 10:29:46 +01:00
|
|
|
struct scatterlist *
|
|
|
|
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int n,
|
|
|
|
unsigned int *offset)
|
|
|
|
{
|
|
|
|
struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
|
|
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int idx, count;
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
|
|
|
|
|
|
|
/* As we iterate forward through the sg, we record each entry in a
|
|
|
|
* radixtree for quick repeated (backwards) lookups. If we have seen
|
|
|
|
* this index previously, we will have an entry for it.
|
|
|
|
*
|
|
|
|
* Initial lookup is O(N), but this is amortized to O(1) for
|
|
|
|
* sequential page access (where each new request is consecutive
|
|
|
|
* to the previous one). Repeated lookups are O(lg(obj->base.size)),
|
|
|
|
* i.e. O(1) with a large constant!
|
|
|
|
*/
|
|
|
|
if (n < READ_ONCE(iter->sg_idx))
|
|
|
|
goto lookup;
|
|
|
|
|
|
|
|
mutex_lock(&iter->lock);
|
|
|
|
|
|
|
|
/* We prefer to reuse the last sg so that repeated lookup of this
|
|
|
|
* (or the subsequent) sg are fast - comparing against the last
|
|
|
|
* sg is faster than going through the radixtree.
|
|
|
|
*/
|
|
|
|
|
|
|
|
sg = iter->sg_pos;
|
|
|
|
idx = iter->sg_idx;
|
|
|
|
count = __sg_page_count(sg);
|
|
|
|
|
|
|
|
while (idx + count <= n) {
|
|
|
|
void *entry;
|
|
|
|
unsigned long i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* If we cannot allocate and insert this entry, or the
|
|
|
|
* individual pages from this range, cancel updating the
|
|
|
|
* sg_idx so that on this lookup we are forced to linearly
|
|
|
|
* scan onwards, but on future lookups we will try the
|
|
|
|
* insertion again (in which case we need to be careful of
|
|
|
|
* the error return reporting that we have already inserted
|
|
|
|
* this index).
|
|
|
|
*/
|
|
|
|
ret = radix_tree_insert(&iter->radix, idx, sg);
|
|
|
|
if (ret && ret != -EEXIST)
|
|
|
|
goto scan;
|
|
|
|
|
|
|
|
entry = xa_mk_value(idx);
|
|
|
|
for (i = 1; i < count; i++) {
|
|
|
|
ret = radix_tree_insert(&iter->radix, idx + i, entry);
|
|
|
|
if (ret && ret != -EEXIST)
|
|
|
|
goto scan;
|
|
|
|
}
|
|
|
|
|
|
|
|
idx += count;
|
|
|
|
sg = ____sg_next(sg);
|
|
|
|
count = __sg_page_count(sg);
|
|
|
|
}
|
|
|
|
|
|
|
|
scan:
|
|
|
|
iter->sg_pos = sg;
|
|
|
|
iter->sg_idx = idx;
|
|
|
|
|
|
|
|
mutex_unlock(&iter->lock);
|
|
|
|
|
|
|
|
if (unlikely(n < idx)) /* insertion completed by another thread */
|
|
|
|
goto lookup;
|
|
|
|
|
|
|
|
/* In case we failed to insert the entry into the radixtree, we need
|
|
|
|
* to look beyond the current sg.
|
|
|
|
*/
|
|
|
|
while (idx + count <= n) {
|
|
|
|
idx += count;
|
|
|
|
sg = ____sg_next(sg);
|
|
|
|
count = __sg_page_count(sg);
|
|
|
|
}
|
|
|
|
|
|
|
|
*offset = n - idx;
|
|
|
|
return sg;
|
|
|
|
|
|
|
|
lookup:
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
sg = radix_tree_lookup(&iter->radix, n);
|
|
|
|
GEM_BUG_ON(!sg);
|
|
|
|
|
|
|
|
/* If this index is in the middle of multi-page sg entry,
|
|
|
|
* the radix tree will contain a value entry that points
|
|
|
|
* to the start of that range. We will return the pointer to
|
|
|
|
* the base page and the offset of this page within the
|
|
|
|
* sg entry's range.
|
|
|
|
*/
|
|
|
|
*offset = 0;
|
|
|
|
if (unlikely(xa_is_value(sg))) {
|
|
|
|
unsigned long base = xa_to_value(sg);
|
|
|
|
|
|
|
|
sg = radix_tree_lookup(&iter->radix, base);
|
|
|
|
GEM_BUG_ON(!sg);
|
|
|
|
|
|
|
|
*offset = n - base;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return sg;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct page *
|
|
|
|
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
|
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int offset;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
|
|
|
|
|
|
|
|
sg = i915_gem_object_get_sg(obj, n, &offset);
|
|
|
|
return nth_page(sg_page(sg), offset);
|
|
|
|
}
|
|
|
|
|
2020-09-08 15:40:43 +10:00
|
|
|
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
|
|
|
|
struct page *
|
|
|
|
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = i915_gem_object_get_page(obj, n);
|
|
|
|
if (!obj->mm.dirty)
|
|
|
|
set_page_dirty(page);
|
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2019-05-28 10:29:46 +01:00
|
|
|
dma_addr_t
|
|
|
|
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long n,
|
|
|
|
unsigned int *len)
|
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int offset;
|
|
|
|
|
|
|
|
sg = i915_gem_object_get_sg(obj, n, &offset);
|
|
|
|
|
|
|
|
if (len)
|
|
|
|
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
|
|
|
|
|
|
|
|
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_addr_t
|
|
|
|
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long n)
|
|
|
|
{
|
|
|
|
return i915_gem_object_get_dma_address_len(obj, n, NULL);
|
|
|
|
}
|