2025-03-28 00:26:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-04-24 21:07:14 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2019-05-28 10:29:49 +01:00
|
|
|
#include "gem/i915_gem_pm.h"
|
2021-09-22 08:25:22 +02:00
|
|
|
#include "gem/i915_gem_ttm_pm.h"
|
2019-07-12 20:29:53 +01:00
|
|
|
#include "gt/intel_gt.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 21:07:17 +01:00
|
|
|
#include "gt/intel_gt_pm.h"
|
2019-10-04 14:40:06 +01:00
|
|
|
#include "gt/intel_gt_requests.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 21:07:17 +01:00
|
|
|
|
2022-02-09 14:31:21 +02:00
|
|
|
#include "i915_driver.h"
|
2019-04-24 21:07:14 +01:00
|
|
|
#include "i915_drv.h"
|
|
|
|
|
2024-09-04 17:52:18 +03:00
|
|
|
#if IS_ENABLED(CONFIG_X86)
|
2021-01-19 21:43:31 +00:00
|
|
|
#include <asm/smp.h>
|
|
|
|
#else
|
|
|
|
#define wbinvd_on_all_cpus() \
|
|
|
|
pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
|
|
|
|
#endif
|
|
|
|
|
2019-04-24 21:07:14 +01:00
|
|
|
void i915_gem_suspend(struct drm_i915_private *i915)
|
|
|
|
{
|
2022-09-15 16:26:53 -07:00
|
|
|
struct intel_gt *gt;
|
|
|
|
unsigned int i;
|
|
|
|
|
2019-12-13 07:51:52 -08:00
|
|
|
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
|
2019-04-24 21:07:14 +01:00
|
|
|
|
2022-10-27 14:52:41 +05:30
|
|
|
intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
|
2023-12-29 13:51:43 -08:00
|
|
|
/*
|
|
|
|
* On rare occasions, we've observed the fence completion triggers
|
|
|
|
* free_engines asynchronously via rcu_call. Ensure those are done.
|
|
|
|
* This path is only called on suspend, so it's an acceptable cost.
|
|
|
|
*/
|
|
|
|
rcu_barrier();
|
|
|
|
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 21:07:17 +01:00
|
|
|
flush_workqueue(i915->wq);
|
2019-04-24 21:07:14 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to flush all the executing contexts to main memory so
|
|
|
|
* that they can saved in the hibernation image. To ensure the last
|
|
|
|
* context image is coherent, we have to switch away from it. That
|
|
|
|
* leaves the i915->kernel_context still active when
|
|
|
|
* we actually suspend, and its image in memory may not match the GPU
|
|
|
|
* state. Fortunately, the kernel_context is disposable and we do
|
|
|
|
* not rely on its state.
|
|
|
|
*/
|
2022-09-15 16:26:53 -07:00
|
|
|
for_each_gt(gt, i915, i)
|
|
|
|
intel_gt_suspend_prepare(gt);
|
2019-04-24 21:07:14 +01:00
|
|
|
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 21:07:17 +01:00
|
|
|
i915_gem_drain_freed_objects(i915);
|
2019-04-24 21:07:14 +01:00
|
|
|
}
|
|
|
|
|
2021-09-22 08:25:22 +02:00
|
|
|
static int lmem_restore(struct drm_i915_private *i915, u32 flags)
|
|
|
|
{
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
int ret = 0, id;
|
|
|
|
|
|
|
|
for_each_memory_region(mr, i915, id) {
|
|
|
|
if (mr->type == INTEL_MEMORY_LOCAL) {
|
|
|
|
ret = i915_ttm_restore_region(mr, flags);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lmem_suspend(struct drm_i915_private *i915, u32 flags)
|
|
|
|
{
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
int ret = 0, id;
|
|
|
|
|
|
|
|
for_each_memory_region(mr, i915, id) {
|
|
|
|
if (mr->type == INTEL_MEMORY_LOCAL) {
|
|
|
|
ret = i915_ttm_backup_region(mr, flags);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lmem_recover(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
int id;
|
|
|
|
|
|
|
|
for_each_memory_region(mr, i915, id)
|
|
|
|
if (mr->type == INTEL_MEMORY_LOCAL)
|
|
|
|
i915_ttm_recover_region(mr);
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_gem_backup_suspend(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Opportunistically try to evict unpinned objects */
|
|
|
|
ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU);
|
|
|
|
if (ret)
|
|
|
|
goto out_recover;
|
|
|
|
|
|
|
|
i915_gem_suspend(i915);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* More objects may have become unpinned as requests were
|
|
|
|
* retired. Now try to evict again. The gt may be wedged here
|
|
|
|
* in which case we automatically fall back to memcpy.
|
2021-09-22 08:25:25 +02:00
|
|
|
* We allow also backing up pinned objects that have not been
|
|
|
|
* marked for early recover, and that may contain, for example,
|
|
|
|
* page-tables for the migrate context.
|
2021-09-22 08:25:22 +02:00
|
|
|
*/
|
2021-09-22 08:25:25 +02:00
|
|
|
ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU |
|
|
|
|
I915_TTM_BACKUP_PINNED);
|
2021-09-22 08:25:22 +02:00
|
|
|
if (ret)
|
|
|
|
goto out_recover;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remaining objects are backed up using memcpy once we've stopped
|
|
|
|
* using the migrate context.
|
|
|
|
*/
|
|
|
|
ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED);
|
|
|
|
if (ret)
|
|
|
|
goto out_recover;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_recover:
|
|
|
|
lmem_recover(i915);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-04-24 21:07:14 +01:00
|
|
|
void i915_gem_suspend_late(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct list_head *phases[] = {
|
2019-06-12 11:57:20 +01:00
|
|
|
&i915->mm.shrink_list,
|
2019-05-30 21:34:59 +01:00
|
|
|
&i915->mm.purge_list,
|
2019-04-24 21:07:14 +01:00
|
|
|
NULL
|
|
|
|
}, **phase;
|
2022-09-15 16:26:53 -07:00
|
|
|
struct intel_gt *gt;
|
2019-06-12 11:57:20 +01:00
|
|
|
unsigned long flags;
|
2022-09-15 16:26:53 -07:00
|
|
|
unsigned int i;
|
2021-01-19 21:43:31 +00:00
|
|
|
bool flush = false;
|
2019-04-24 21:07:14 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Neither the BIOS, ourselves or any other kernel
|
|
|
|
* expects the system to be in execlists mode on startup,
|
|
|
|
* so we need to reset the GPU back to legacy mode. And the only
|
|
|
|
* known way to disable logical contexts is through a GPU reset.
|
|
|
|
*
|
|
|
|
* So in order to leave the system in a known default configuration,
|
|
|
|
* always reset the GPU upon unload and suspend. Afterwards we then
|
|
|
|
* clean up the GEM state tracking, flushing off the requests and
|
|
|
|
* leaving the system in a known idle state.
|
|
|
|
*
|
|
|
|
* Note that is of the upmost importance that the GPU is idle and
|
|
|
|
* all stray writes are flushed *before* we dismantle the backing
|
|
|
|
* storage for the pinned objects.
|
|
|
|
*
|
|
|
|
* However, since we are uncertain that resetting the GPU on older
|
|
|
|
* machines is a good idea, we don't - just in case it leaves the
|
|
|
|
* machine in an unusable condition.
|
|
|
|
*/
|
|
|
|
|
2023-12-29 13:51:43 -08:00
|
|
|
/* Like i915_gem_suspend, flush tasks staged from fence triggers */
|
|
|
|
rcu_barrier();
|
|
|
|
|
2022-09-15 16:26:53 -07:00
|
|
|
for_each_gt(gt, i915, i)
|
|
|
|
intel_gt_suspend_late(gt);
|
2019-11-01 14:10:09 +00:00
|
|
|
|
2019-06-12 11:57:20 +01:00
|
|
|
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
2019-04-24 21:07:14 +01:00
|
|
|
for (phase = phases; *phase; phase++) {
|
2021-01-19 21:43:31 +00:00
|
|
|
list_for_each_entry(obj, *phase, mm.link) {
|
|
|
|
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
|
|
|
flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
|
|
|
|
__start_cpu_write(obj); /* presume auto-hibernate */
|
2019-05-28 10:29:51 +01:00
|
|
|
}
|
2019-04-24 21:07:14 +01:00
|
|
|
}
|
2019-06-12 11:57:20 +01:00
|
|
|
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
2021-01-19 21:43:31 +00:00
|
|
|
if (flush)
|
|
|
|
wbinvd_on_all_cpus();
|
2019-04-24 21:07:14 +01:00
|
|
|
}
|
|
|
|
|
2021-01-23 14:55:43 +00:00
|
|
|
int i915_gem_freeze(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
/* Discard all purgeable objects, let userspace recover those as
|
|
|
|
* required after resuming.
|
|
|
|
*/
|
|
|
|
i915_gem_shrink_all(i915);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_gem_freeze_late(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called just before we write the hibernation image.
|
|
|
|
*
|
|
|
|
* We need to update the domain tracking to reflect that the CPU
|
|
|
|
* will be accessing all the pages to create and restore from the
|
|
|
|
* hibernation, and so upon restoration those pages will be in the
|
|
|
|
* CPU domain.
|
|
|
|
*
|
|
|
|
* To make sure the hibernation image contains the latest state,
|
|
|
|
* we update that state just before writing out the image.
|
|
|
|
*
|
|
|
|
* To try and reduce the hibernation image, we manually shrink
|
|
|
|
* the objects as well, see i915_gem_freeze()
|
|
|
|
*/
|
|
|
|
|
|
|
|
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
2021-03-23 16:50:50 +01:00
|
|
|
i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
|
2021-01-23 14:55:43 +00:00
|
|
|
i915_gem_drain_freed_objects(i915);
|
|
|
|
|
|
|
|
wbinvd_on_all_cpus();
|
|
|
|
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
|
|
|
|
__start_cpu_write(obj);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-24 21:07:14 +01:00
|
|
|
void i915_gem_resume(struct drm_i915_private *i915)
|
|
|
|
{
|
2022-09-15 16:26:52 -07:00
|
|
|
struct intel_gt *gt;
|
|
|
|
int ret, i, j;
|
2021-09-22 08:25:22 +02:00
|
|
|
|
2019-12-13 07:51:52 -08:00
|
|
|
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
|
2019-04-24 21:07:14 +01:00
|
|
|
|
2021-09-22 08:25:22 +02:00
|
|
|
ret = lmem_restore(i915, 0);
|
|
|
|
GEM_WARN_ON(ret);
|
|
|
|
|
2019-04-24 21:07:14 +01:00
|
|
|
/*
|
|
|
|
* As we didn't flush the kernel context before suspend, we cannot
|
|
|
|
* guarantee that the context image is complete. So let's just reset
|
|
|
|
* it and start again.
|
|
|
|
*/
|
2022-09-15 16:26:52 -07:00
|
|
|
for_each_gt(gt, i915, i)
|
|
|
|
if (intel_gt_resume(gt))
|
|
|
|
goto err_wedged;
|
2021-09-22 08:25:22 +02:00
|
|
|
|
|
|
|
ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
|
|
|
|
GEM_WARN_ON(ret);
|
2022-09-15 16:26:52 -07:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err_wedged:
|
|
|
|
for_each_gt(gt, i915, j) {
|
|
|
|
if (!intel_gt_is_wedged(gt)) {
|
|
|
|
dev_err(i915->drm.dev,
|
|
|
|
"Failed to re-initialize GPU[%u], declaring it wedged!\n",
|
|
|
|
j);
|
|
|
|
intel_gt_set_wedged(gt);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (j == i)
|
|
|
|
break;
|
|
|
|
}
|
2019-04-24 21:07:14 +01:00
|
|
|
}
|