2016-12-31 11:20:11 +00:00
|
|
|
/*
|
2019-05-28 10:29:49 +01:00
|
|
|
* SPDX-License-Identifier: MIT
|
2016-12-31 11:20:11 +00:00
|
|
|
*
|
2019-05-28 10:29:49 +01:00
|
|
|
* Copyright © 2016 Intel Corporation
|
2016-12-31 11:20:11 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __I915_GEM_CONTEXT_H__
|
|
|
|
#define __I915_GEM_CONTEXT_H__
|
|
|
|
|
2019-03-08 13:25:17 +00:00
|
|
|
#include "i915_gem_context_types.h"
|
2016-12-31 11:20:11 +00:00
|
|
|
|
2019-04-24 18:48:39 +01:00
|
|
|
#include "gt/intel_context.h"
|
|
|
|
|
2019-10-04 14:40:09 +01:00
|
|
|
#include "i915_drv.h"
|
2018-02-13 14:18:33 +00:00
|
|
|
#include "i915_gem.h"
|
2018-05-24 16:06:20 +01:00
|
|
|
#include "i915_scheduler.h"
|
2019-02-05 09:50:28 +00:00
|
|
|
#include "intel_device_info.h"
|
2018-02-13 14:18:33 +00:00
|
|
|
|
2016-12-31 11:20:11 +00:00
|
|
|
struct drm_device;
|
|
|
|
struct drm_file;
|
|
|
|
|
|
|
|
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
return test_bit(CONTEXT_CLOSED, &ctx->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(i915_gem_context_is_closed(ctx));
|
2018-09-11 14:22:06 +01:00
|
|
|
set_bit(CONTEXT_CLOSED, &ctx->flags);
|
2016-12-31 11:20:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
|
|
|
|
{
|
2018-09-11 14:22:06 +01:00
|
|
|
return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
|
2016-12-31 11:20:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
|
|
|
|
{
|
2018-09-11 14:22:06 +01:00
|
|
|
set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
|
2016-12-31 11:20:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
|
|
|
|
{
|
2018-09-11 14:22:06 +01:00
|
|
|
clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
|
2016-12-31 11:20:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
|
|
|
|
{
|
2018-09-11 14:22:06 +01:00
|
|
|
return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
|
2016-12-31 11:20:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
|
|
|
|
{
|
2018-09-11 14:22:06 +01:00
|
|
|
set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
|
2016-12-31 11:20:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
|
|
|
|
{
|
2018-09-11 14:22:06 +01:00
|
|
|
clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
|
2016-12-31 11:20:11 +00:00
|
|
|
}
|
|
|
|
|
2019-02-18 10:58:21 +00:00
|
|
|
static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
|
|
|
|
}
|
|
|
|
|
drm/i915/gem: Make context persistence optional
Our existing behaviour is to allow contexts and their GPU requests to
persist past the point of closure until the requests are complete. This
allows clients to operate in a 'fire-and-forget' manner where they can
setup a rendering pipeline and hand it over to the display server and
immediately exit. As the rendering pipeline is kept alive until
completion, the display server (or other consumer) can use the results
in the future and present them to the user.
The compute model is a little different. They have little to no buffer
sharing between processes as their kernels tend to operate on a
continuous stream, feeding the results back to the client application.
These kernels operate for an indeterminate length of time, with many
clients wishing that the kernel was always running for as long as they
keep feeding in the data, i.e. acting like a DSP.
Not all clients want this persistent "desktop" behaviour and would prefer
that the contexts are cleaned up immediately upon closure. This ensures
that when clients are run without hangchecking (e.g. for compute kernels
of indeterminate runtime), any GPU hang or other unexpected workloads
are terminated with the process and does not continue to hog resources.
The default behaviour for new contexts is the legacy persistence mode,
as some desktop applications are dependent upon the existing behaviour.
New clients will have to opt in to immediate cleanup on context
closure. If the hangchecking modparam is disabled, so is persistent
context support -- all contexts will be terminated on closure.
We expect this behaviour change to be welcomed by compute users, who
have often been caught between a rock and a hard place. They disable
hangchecking to avoid their kernels being "unfairly" declared hung, but
have also experienced true hangs that the system was then unable to
clean up. Naturally, this leads to bug reports.
Testcase: igt/gem_ctx_persistence
Link: https://github.com/intel/compute-runtime/pull/228
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20191029202338.8841-1-chris@chris-wilson.co.uk
2019-10-29 20:23:38 +00:00
|
|
|
static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
|
|
|
|
}
|
|
|
|
|
drm/i915: Allow a context to define its set of engines
Over the last few years, we have debated how to extend the user API to
support an increase in the number of engines, that may be sparse and
even be heterogeneous within a class (not all video decoders created
equal). We settled on using (class, instance) tuples to identify a
specific engine, with an API for the user to construct a map of engines
to capabilities. Into this picture, we then add a challenge of virtual
engines; one user engine that maps behind the scenes to any number of
physical engines. To keep it general, we want the user to have full
control over that mapping. To that end, we allow the user to constrain a
context to define the set of engines that it can access, order fully
controlled by the user via (class, instance). With such precise control
in context setup, we can continue to use the existing execbuf uABI of
specifying a single index; only now it doesn't automagically map onto
the engines, it uses the user defined engine map from the context.
v2: Fixup freeing of local on success of get_engines()
v3: Allow empty engines[]
v4: s/nengine/num_engines/
v5: Replace 64 limit on num_engines with a note that execbuf is
currently limited to only using the first 64 engines.
v6: Actually use the engines_mutex to guard the ctx->engines.
Testcase: igt/gem_ctx_engines
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-2-chris@chris-wilson.co.uk
2019-05-21 22:11:26 +01:00
|
|
|
static inline bool
|
|
|
|
i915_gem_context_user_engines(const struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
|
|
|
|
}
|
|
|
|
|
2016-12-31 11:20:11 +00:00
|
|
|
/* i915_gem_context.c */
|
2019-12-21 16:03:24 +00:00
|
|
|
void i915_gem_init__contexts(struct drm_i915_private *i915);
|
2019-10-04 14:40:09 +01:00
|
|
|
void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
|
2017-06-20 12:05:45 +01:00
|
|
|
|
|
|
|
int i915_gem_context_open(struct drm_i915_private *i915,
|
|
|
|
struct drm_file *file);
|
|
|
|
void i915_gem_context_close(struct drm_file *file);
|
|
|
|
|
2017-06-20 12:05:46 +01:00
|
|
|
void i915_gem_context_release(struct kref *ctx_ref);
|
2016-12-31 11:20:11 +00:00
|
|
|
|
2019-03-22 09:23:23 +00:00
|
|
|
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file);
|
|
|
|
int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file);
|
|
|
|
|
2016-12-31 11:20:11 +00:00
|
|
|
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file);
|
|
|
|
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file);
|
|
|
|
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv);
|
|
|
|
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv);
|
|
|
|
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file);
|
|
|
|
|
2017-06-20 12:05:46 +01:00
|
|
|
static inline struct i915_gem_context *
|
|
|
|
i915_gem_context_get(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
kref_get(&ctx->ref);
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void i915_gem_context_put(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
kref_put(&ctx->ref, i915_gem_context_release);
|
|
|
|
}
|
|
|
|
|
2019-10-04 14:40:09 +01:00
|
|
|
static inline struct i915_address_space *
|
|
|
|
i915_gem_context_vm(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct i915_address_space *
|
|
|
|
i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
struct i915_address_space *vm;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
vm = rcu_dereference(ctx->vm);
|
|
|
|
if (!vm)
|
|
|
|
vm = &ctx->i915->ggtt.vm;
|
|
|
|
vm = i915_vm_get(vm);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return vm;
|
|
|
|
}
|
|
|
|
|
2019-04-26 17:33:34 +01:00
|
|
|
static inline struct i915_gem_engines *
|
|
|
|
i915_gem_context_engines(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
return rcu_dereference_protected(ctx->engines,
|
|
|
|
lockdep_is_held(&ctx->engines_mutex));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct i915_gem_engines *
|
|
|
|
i915_gem_context_lock_engines(struct i915_gem_context *ctx)
|
|
|
|
__acquires(&ctx->engines_mutex)
|
|
|
|
{
|
|
|
|
mutex_lock(&ctx->engines_mutex);
|
|
|
|
return i915_gem_context_engines(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
|
|
|
|
__releases(&ctx->engines_mutex)
|
|
|
|
{
|
|
|
|
mutex_unlock(&ctx->engines_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct intel_context *
|
|
|
|
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
|
|
|
|
{
|
2020-03-16 16:14:47 +00:00
|
|
|
struct intel_context *ce;
|
2019-04-26 17:33:34 +01:00
|
|
|
|
|
|
|
rcu_read_lock(); {
|
|
|
|
struct i915_gem_engines *e = rcu_dereference(ctx->engines);
|
2020-03-16 16:14:47 +00:00
|
|
|
if (unlikely(!e)) /* context was closed! */
|
|
|
|
ce = ERR_PTR(-ENOENT);
|
|
|
|
else if (likely(idx < e->num_engines && e->engines[idx]))
|
2019-04-26 17:33:34 +01:00
|
|
|
ce = intel_context_get(e->engines[idx]);
|
2020-03-16 16:14:47 +00:00
|
|
|
else
|
|
|
|
ce = ERR_PTR(-EINVAL);
|
2019-04-26 17:33:34 +01:00
|
|
|
} rcu_read_unlock();
|
|
|
|
|
|
|
|
return ce;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
|
|
|
|
struct i915_gem_engines *engines)
|
|
|
|
{
|
|
|
|
it->engines = engines;
|
|
|
|
it->idx = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct intel_context *
|
|
|
|
i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
|
|
|
|
|
|
|
|
#define for_each_gem_engine(ce, engines, it) \
|
|
|
|
for (i915_gem_engines_iter_init(&(it), (engines)); \
|
|
|
|
((ce) = i915_gem_engines_iter_next(&(it)));)
|
|
|
|
|
2019-02-28 10:20:34 +00:00
|
|
|
struct i915_lut_handle *i915_lut_handle_alloc(void);
|
|
|
|
void i915_lut_handle_free(struct i915_lut_handle *lut);
|
|
|
|
|
2020-07-07 17:39:50 -07:00
|
|
|
int i915_gem_user_to_context_sseu(struct intel_gt *gt,
|
2020-03-17 15:22:22 +02:00
|
|
|
const struct drm_i915_gem_context_param_sseu *user,
|
|
|
|
struct intel_sseu *context);
|
|
|
|
|
2016-12-31 11:20:11 +00:00
|
|
|
#endif /* !__I915_GEM_CONTEXT_H__ */
|