mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-18 12:30:31 +00:00
drm/i915: Lift timeline into intel_context
Move the timeline from being inside the intel_ring to intel_context itself. This saves much pointer dancing and makes the relations of the context to its timeline much clearer. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190809182518.20486-4-chris@chris-wilson.co.uk
This commit is contained in:
parent
48ae397b6b
commit
75d0a7f31e
15 changed files with 141 additions and 136 deletions
|
@ -489,6 +489,29 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
|
||||||
i915_vm_put(vm);
|
i915_vm_put(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __set_timeline(struct intel_timeline **dst,
|
||||||
|
struct intel_timeline *src)
|
||||||
|
{
|
||||||
|
struct intel_timeline *old = *dst;
|
||||||
|
|
||||||
|
*dst = src ? intel_timeline_get(src) : NULL;
|
||||||
|
|
||||||
|
if (old)
|
||||||
|
intel_timeline_put(old);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __apply_timeline(struct intel_context *ce, void *timeline)
|
||||||
|
{
|
||||||
|
__set_timeline(&ce->timeline, timeline);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __assign_timeline(struct i915_gem_context *ctx,
|
||||||
|
struct intel_timeline *timeline)
|
||||||
|
{
|
||||||
|
__set_timeline(&ctx->timeline, timeline);
|
||||||
|
context_apply_all(ctx, __apply_timeline, timeline);
|
||||||
|
}
|
||||||
|
|
||||||
static struct i915_gem_context *
|
static struct i915_gem_context *
|
||||||
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
|
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
|
||||||
{
|
{
|
||||||
|
@ -531,7 +554,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
|
||||||
return ERR_CAST(timeline);
|
return ERR_CAST(timeline);
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->timeline = timeline;
|
__assign_timeline(ctx, timeline);
|
||||||
|
intel_timeline_put(timeline);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_i915_context_create(ctx);
|
trace_i915_context_create(ctx);
|
||||||
|
@ -1931,13 +1955,8 @@ unlock:
|
||||||
static int clone_timeline(struct i915_gem_context *dst,
|
static int clone_timeline(struct i915_gem_context *dst,
|
||||||
struct i915_gem_context *src)
|
struct i915_gem_context *src)
|
||||||
{
|
{
|
||||||
if (src->timeline) {
|
if (src->timeline)
|
||||||
GEM_BUG_ON(src->timeline == dst->timeline);
|
__assign_timeline(dst, src->timeline);
|
||||||
|
|
||||||
if (dst->timeline)
|
|
||||||
intel_timeline_put(dst->timeline);
|
|
||||||
dst->timeline = intel_timeline_get(src->timeline);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2182,7 +2182,7 @@ err_unpin:
|
||||||
static void eb_unpin_context(struct i915_execbuffer *eb)
|
static void eb_unpin_context(struct i915_execbuffer *eb)
|
||||||
{
|
{
|
||||||
struct intel_context *ce = eb->context;
|
struct intel_context *ce = eb->context;
|
||||||
struct intel_timeline *tl = ce->ring->timeline;
|
struct intel_timeline *tl = ce->timeline;
|
||||||
|
|
||||||
mutex_lock(&tl->mutex);
|
mutex_lock(&tl->mutex);
|
||||||
intel_context_exit(ce);
|
intel_context_exit(ce);
|
||||||
|
|
|
@ -68,7 +68,7 @@ int __intel_context_do_pin(struct intel_context *ce)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
|
GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
|
||||||
ce->engine->name, ce->ring->timeline->fence_context,
|
ce->engine->name, ce->timeline->fence_context,
|
||||||
ce->ring->head, ce->ring->tail);
|
ce->ring->head, ce->ring->tail);
|
||||||
|
|
||||||
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
|
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
|
||||||
|
@ -98,7 +98,7 @@ void intel_context_unpin(struct intel_context *ce)
|
||||||
|
|
||||||
if (likely(atomic_dec_and_test(&ce->pin_count))) {
|
if (likely(atomic_dec_and_test(&ce->pin_count))) {
|
||||||
GEM_TRACE("%s context:%llx retire\n",
|
GEM_TRACE("%s context:%llx retire\n",
|
||||||
ce->engine->name, ce->ring->timeline->fence_context);
|
ce->engine->name, ce->timeline->fence_context);
|
||||||
|
|
||||||
ce->ops->unpin(ce);
|
ce->ops->unpin(ce);
|
||||||
|
|
||||||
|
@ -143,11 +143,12 @@ static void __intel_context_retire(struct i915_active *active)
|
||||||
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
||||||
|
|
||||||
GEM_TRACE("%s context:%llx retire\n",
|
GEM_TRACE("%s context:%llx retire\n",
|
||||||
ce->engine->name, ce->ring->timeline->fence_context);
|
ce->engine->name, ce->timeline->fence_context);
|
||||||
|
|
||||||
if (ce->state)
|
if (ce->state)
|
||||||
__context_unpin_state(ce->state);
|
__context_unpin_state(ce->state);
|
||||||
|
|
||||||
|
intel_timeline_unpin(ce->timeline);
|
||||||
intel_ring_unpin(ce->ring);
|
intel_ring_unpin(ce->ring);
|
||||||
intel_context_put(ce);
|
intel_context_put(ce);
|
||||||
}
|
}
|
||||||
|
@ -163,15 +164,21 @@ static int __intel_context_active(struct i915_active *active)
|
||||||
if (err)
|
if (err)
|
||||||
goto err_put;
|
goto err_put;
|
||||||
|
|
||||||
|
err = intel_timeline_pin(ce->timeline);
|
||||||
|
if (err)
|
||||||
|
goto err_ring;
|
||||||
|
|
||||||
if (!ce->state)
|
if (!ce->state)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err = __context_pin_state(ce->state);
|
err = __context_pin_state(ce->state);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_ring;
|
goto err_timeline;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_timeline:
|
||||||
|
intel_timeline_unpin(ce->timeline);
|
||||||
err_ring:
|
err_ring:
|
||||||
intel_ring_unpin(ce->ring);
|
intel_ring_unpin(ce->ring);
|
||||||
err_put:
|
err_put:
|
||||||
|
@ -218,6 +225,8 @@ intel_context_init(struct intel_context *ce,
|
||||||
|
|
||||||
ce->gem_context = ctx;
|
ce->gem_context = ctx;
|
||||||
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
|
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
|
||||||
|
if (ctx->timeline)
|
||||||
|
ce->timeline = intel_timeline_get(ctx->timeline);
|
||||||
|
|
||||||
ce->engine = engine;
|
ce->engine = engine;
|
||||||
ce->ops = engine->cops;
|
ce->ops = engine->cops;
|
||||||
|
@ -235,6 +244,8 @@ intel_context_init(struct intel_context *ce,
|
||||||
|
|
||||||
void intel_context_fini(struct intel_context *ce)
|
void intel_context_fini(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
|
if (ce->timeline)
|
||||||
|
intel_timeline_put(ce->timeline);
|
||||||
i915_vm_put(ce->vm);
|
i915_vm_put(ce->vm);
|
||||||
|
|
||||||
mutex_destroy(&ce->pin_mutex);
|
mutex_destroy(&ce->pin_mutex);
|
||||||
|
@ -279,7 +290,7 @@ void intel_context_exit_engine(struct intel_context *ce)
|
||||||
int intel_context_prepare_remote_request(struct intel_context *ce,
|
int intel_context_prepare_remote_request(struct intel_context *ce,
|
||||||
struct i915_request *rq)
|
struct i915_request *rq)
|
||||||
{
|
{
|
||||||
struct intel_timeline *tl = ce->ring->timeline;
|
struct intel_timeline *tl = ce->timeline;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Only suitable for use in remotely modifying this context */
|
/* Only suitable for use in remotely modifying this context */
|
||||||
|
|
|
@ -120,15 +120,15 @@ static inline void intel_context_put(struct intel_context *ce)
|
||||||
|
|
||||||
static inline int __must_check
|
static inline int __must_check
|
||||||
intel_context_timeline_lock(struct intel_context *ce)
|
intel_context_timeline_lock(struct intel_context *ce)
|
||||||
__acquires(&ce->ring->timeline->mutex)
|
__acquires(&ce->timeline->mutex)
|
||||||
{
|
{
|
||||||
return mutex_lock_interruptible(&ce->ring->timeline->mutex);
|
return mutex_lock_interruptible(&ce->timeline->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void intel_context_timeline_unlock(struct intel_context *ce)
|
static inline void intel_context_timeline_unlock(struct intel_context *ce)
|
||||||
__releases(&ce->ring->timeline->mutex)
|
__releases(&ce->timeline->mutex)
|
||||||
{
|
{
|
||||||
mutex_unlock(&ce->ring->timeline->mutex);
|
mutex_unlock(&ce->timeline->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int intel_context_prepare_remote_request(struct intel_context *ce,
|
int intel_context_prepare_remote_request(struct intel_context *ce,
|
||||||
|
|
|
@ -53,6 +53,7 @@ struct intel_context {
|
||||||
|
|
||||||
struct i915_vma *state;
|
struct i915_vma *state;
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
|
struct intel_timeline *timeline;
|
||||||
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
#define CONTEXT_ALLOC_BIT 0
|
#define CONTEXT_ALLOC_BIT 0
|
||||||
|
|
|
@ -196,9 +196,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
|
||||||
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
|
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
|
||||||
|
|
||||||
struct intel_ring *
|
struct intel_ring *
|
||||||
intel_engine_create_ring(struct intel_engine_cs *engine,
|
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
|
||||||
struct intel_timeline *timeline,
|
|
||||||
int size);
|
|
||||||
int intel_ring_pin(struct intel_ring *ring);
|
int intel_ring_pin(struct intel_ring *ring);
|
||||||
void intel_ring_reset(struct intel_ring *ring, u32 tail);
|
void intel_ring_reset(struct intel_ring *ring, u32 tail);
|
||||||
unsigned int intel_ring_update_space(struct intel_ring *ring);
|
unsigned int intel_ring_update_space(struct intel_ring *ring);
|
||||||
|
|
|
@ -680,7 +680,6 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
|
||||||
goto out_frame;
|
goto out_frame;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&frame->ring.request_list);
|
INIT_LIST_HEAD(&frame->ring.request_list);
|
||||||
frame->ring.timeline = &frame->timeline;
|
|
||||||
frame->ring.vaddr = frame->cs;
|
frame->ring.vaddr = frame->cs;
|
||||||
frame->ring.size = sizeof(frame->cs);
|
frame->ring.size = sizeof(frame->cs);
|
||||||
frame->ring.effective_size = frame->ring.size;
|
frame->ring.effective_size = frame->ring.size;
|
||||||
|
|
|
@ -69,7 +69,6 @@ struct intel_ring {
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
|
|
||||||
struct intel_timeline *timeline;
|
|
||||||
struct list_head request_list;
|
struct list_head request_list;
|
||||||
struct list_head active_link;
|
struct list_head active_link;
|
||||||
|
|
||||||
|
@ -286,8 +285,6 @@ struct intel_engine_cs {
|
||||||
|
|
||||||
struct intel_sseu sseu;
|
struct intel_sseu sseu;
|
||||||
|
|
||||||
struct intel_ring *buffer;
|
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct list_head requests;
|
struct list_head requests;
|
||||||
|
@ -306,6 +303,11 @@ struct intel_engine_cs {
|
||||||
struct drm_i915_gem_object *default_state;
|
struct drm_i915_gem_object *default_state;
|
||||||
void *pinned_default_state;
|
void *pinned_default_state;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
struct intel_ring *ring;
|
||||||
|
struct intel_timeline *timeline;
|
||||||
|
} legacy;
|
||||||
|
|
||||||
/* Rather than have every client wait upon all user interrupts,
|
/* Rather than have every client wait upon all user interrupts,
|
||||||
* with the herd waking after every interrupt and each doing the
|
* with the herd waking after every interrupt and each doing the
|
||||||
* heavyweight seqno dance, we delegate the task (of being the
|
* heavyweight seqno dance, we delegate the task (of being the
|
||||||
|
|
|
@ -2821,9 +2821,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
|
||||||
|
|
||||||
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
|
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
/* Intentionally left blank. */
|
|
||||||
engine->buffer = NULL;
|
|
||||||
|
|
||||||
tasklet_init(&engine->execlists.tasklet,
|
tasklet_init(&engine->execlists.tasklet,
|
||||||
execlists_submission_tasklet, (unsigned long)engine);
|
execlists_submission_tasklet, (unsigned long)engine);
|
||||||
timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
|
timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
|
||||||
|
@ -3071,23 +3068,13 @@ err_unpin_ctx:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct intel_timeline *
|
|
||||||
get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
|
|
||||||
{
|
|
||||||
if (ctx->timeline)
|
|
||||||
return intel_timeline_get(ctx->timeline);
|
|
||||||
else
|
|
||||||
return intel_timeline_create(gt, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __execlists_context_alloc(struct intel_context *ce,
|
static int __execlists_context_alloc(struct intel_context *ce,
|
||||||
struct intel_engine_cs *engine)
|
struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *ctx_obj;
|
struct drm_i915_gem_object *ctx_obj;
|
||||||
|
struct intel_ring *ring;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
u32 context_size;
|
u32 context_size;
|
||||||
struct intel_ring *ring;
|
|
||||||
struct intel_timeline *timeline;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
GEM_BUG_ON(ce->state);
|
GEM_BUG_ON(ce->state);
|
||||||
|
@ -3109,15 +3096,19 @@ static int __execlists_context_alloc(struct intel_context *ce,
|
||||||
goto error_deref_obj;
|
goto error_deref_obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
timeline = get_timeline(ce->gem_context, engine->gt);
|
if (!ce->timeline) {
|
||||||
if (IS_ERR(timeline)) {
|
struct intel_timeline *tl;
|
||||||
ret = PTR_ERR(timeline);
|
|
||||||
goto error_deref_obj;
|
tl = intel_timeline_create(engine->gt, NULL);
|
||||||
|
if (IS_ERR(tl)) {
|
||||||
|
ret = PTR_ERR(tl);
|
||||||
|
goto error_deref_obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
ce->timeline = tl;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring = intel_engine_create_ring(engine, timeline,
|
ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
|
||||||
(unsigned long)ce->ring);
|
|
||||||
intel_timeline_put(timeline);
|
|
||||||
if (IS_ERR(ring)) {
|
if (IS_ERR(ring)) {
|
||||||
ret = PTR_ERR(ring);
|
ret = PTR_ERR(ring);
|
||||||
goto error_deref_obj;
|
goto error_deref_obj;
|
||||||
|
|
|
@ -636,7 +636,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
|
||||||
static int xcs_resume(struct intel_engine_cs *engine)
|
static int xcs_resume(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->i915;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct intel_ring *ring = engine->buffer;
|
struct intel_ring *ring = engine->legacy.ring;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
|
GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
|
||||||
|
@ -832,12 +832,12 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
|
||||||
*/
|
*/
|
||||||
__i915_request_reset(rq, stalled);
|
__i915_request_reset(rq, stalled);
|
||||||
|
|
||||||
GEM_BUG_ON(rq->ring != engine->buffer);
|
GEM_BUG_ON(rq->ring != engine->legacy.ring);
|
||||||
head = rq->head;
|
head = rq->head;
|
||||||
} else {
|
} else {
|
||||||
head = engine->buffer->tail;
|
head = engine->legacy.ring->tail;
|
||||||
}
|
}
|
||||||
engine->buffer->head = intel_ring_wrap(engine->buffer, head);
|
engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&engine->active.lock, flags);
|
spin_unlock_irqrestore(&engine->active.lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1192,10 +1192,6 @@ int intel_ring_pin(struct intel_ring *ring)
|
||||||
if (atomic_fetch_inc(&ring->pin_count))
|
if (atomic_fetch_inc(&ring->pin_count))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = intel_timeline_pin(ring->timeline);
|
|
||||||
if (ret)
|
|
||||||
goto err_unpin;
|
|
||||||
|
|
||||||
flags = PIN_GLOBAL;
|
flags = PIN_GLOBAL;
|
||||||
|
|
||||||
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
|
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
|
||||||
|
@ -1208,7 +1204,7 @@ int intel_ring_pin(struct intel_ring *ring)
|
||||||
|
|
||||||
ret = i915_vma_pin(vma, 0, 0, flags);
|
ret = i915_vma_pin(vma, 0, 0, flags);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
goto err_timeline;
|
goto err_unpin;
|
||||||
|
|
||||||
if (i915_vma_is_map_and_fenceable(vma))
|
if (i915_vma_is_map_and_fenceable(vma))
|
||||||
addr = (void __force *)i915_vma_pin_iomap(vma);
|
addr = (void __force *)i915_vma_pin_iomap(vma);
|
||||||
|
@ -1225,13 +1221,10 @@ int intel_ring_pin(struct intel_ring *ring)
|
||||||
GEM_BUG_ON(ring->vaddr);
|
GEM_BUG_ON(ring->vaddr);
|
||||||
ring->vaddr = addr;
|
ring->vaddr = addr;
|
||||||
|
|
||||||
GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_ring:
|
err_ring:
|
||||||
i915_vma_unpin(vma);
|
i915_vma_unpin(vma);
|
||||||
err_timeline:
|
|
||||||
intel_timeline_unpin(ring->timeline);
|
|
||||||
err_unpin:
|
err_unpin:
|
||||||
atomic_dec(&ring->pin_count);
|
atomic_dec(&ring->pin_count);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1254,8 +1247,6 @@ void intel_ring_unpin(struct intel_ring *ring)
|
||||||
if (!atomic_dec_and_test(&ring->pin_count))
|
if (!atomic_dec_and_test(&ring->pin_count))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
|
|
||||||
|
|
||||||
/* Discard any unused bytes beyond that submitted to hw. */
|
/* Discard any unused bytes beyond that submitted to hw. */
|
||||||
intel_ring_reset(ring, ring->tail);
|
intel_ring_reset(ring, ring->tail);
|
||||||
|
|
||||||
|
@ -1270,8 +1261,6 @@ void intel_ring_unpin(struct intel_ring *ring)
|
||||||
|
|
||||||
i915_vma_unpin(vma);
|
i915_vma_unpin(vma);
|
||||||
i915_vma_make_purgeable(vma);
|
i915_vma_make_purgeable(vma);
|
||||||
|
|
||||||
intel_timeline_unpin(ring->timeline);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
|
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
|
||||||
|
@ -1306,9 +1295,7 @@ err:
|
||||||
}
|
}
|
||||||
|
|
||||||
struct intel_ring *
|
struct intel_ring *
|
||||||
intel_engine_create_ring(struct intel_engine_cs *engine,
|
intel_engine_create_ring(struct intel_engine_cs *engine, int size)
|
||||||
struct intel_timeline *timeline,
|
|
||||||
int size)
|
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = engine->i915;
|
struct drm_i915_private *i915 = engine->i915;
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
|
@ -1323,7 +1310,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
|
||||||
|
|
||||||
kref_init(&ring->ref);
|
kref_init(&ring->ref);
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
ring->timeline = intel_timeline_get(timeline);
|
|
||||||
|
|
||||||
ring->size = size;
|
ring->size = size;
|
||||||
/* Workaround an erratum on the i830 which causes a hang if
|
/* Workaround an erratum on the i830 which causes a hang if
|
||||||
|
@ -1353,7 +1339,6 @@ void intel_ring_free(struct kref *ref)
|
||||||
i915_vma_close(ring->vma);
|
i915_vma_close(ring->vma);
|
||||||
i915_vma_put(ring->vma);
|
i915_vma_put(ring->vma);
|
||||||
|
|
||||||
intel_timeline_put(ring->timeline);
|
|
||||||
kfree(ring);
|
kfree(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1485,8 +1470,9 @@ static int ring_context_alloc(struct intel_context *ce)
|
||||||
struct intel_engine_cs *engine = ce->engine;
|
struct intel_engine_cs *engine = ce->engine;
|
||||||
|
|
||||||
/* One ringbuffer to rule them all */
|
/* One ringbuffer to rule them all */
|
||||||
GEM_BUG_ON(!engine->buffer);
|
GEM_BUG_ON(!engine->legacy.ring);
|
||||||
ce->ring = engine->buffer;
|
ce->ring = engine->legacy.ring;
|
||||||
|
ce->timeline = intel_timeline_get(engine->legacy.timeline);
|
||||||
|
|
||||||
GEM_BUG_ON(ce->state);
|
GEM_BUG_ON(ce->state);
|
||||||
if (engine->context_size) {
|
if (engine->context_size) {
|
||||||
|
@ -2165,8 +2151,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
|
||||||
|
|
||||||
intel_engine_cleanup_common(engine);
|
intel_engine_cleanup_common(engine);
|
||||||
|
|
||||||
intel_ring_unpin(engine->buffer);
|
intel_ring_unpin(engine->legacy.ring);
|
||||||
intel_ring_put(engine->buffer);
|
intel_ring_put(engine->legacy.ring);
|
||||||
|
|
||||||
|
intel_timeline_unpin(engine->legacy.timeline);
|
||||||
|
intel_timeline_put(engine->legacy.timeline);
|
||||||
|
|
||||||
kfree(engine);
|
kfree(engine);
|
||||||
}
|
}
|
||||||
|
@ -2350,32 +2339,40 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
|
||||||
}
|
}
|
||||||
GEM_BUG_ON(timeline->has_initial_breadcrumb);
|
GEM_BUG_ON(timeline->has_initial_breadcrumb);
|
||||||
|
|
||||||
ring = intel_engine_create_ring(engine, timeline, SZ_16K);
|
err = intel_timeline_pin(timeline);
|
||||||
intel_timeline_put(timeline);
|
if (err)
|
||||||
|
goto err_timeline;
|
||||||
|
|
||||||
|
ring = intel_engine_create_ring(engine, SZ_16K);
|
||||||
if (IS_ERR(ring)) {
|
if (IS_ERR(ring)) {
|
||||||
err = PTR_ERR(ring);
|
err = PTR_ERR(ring);
|
||||||
goto err;
|
goto err_timeline_unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = intel_ring_pin(ring);
|
err = intel_ring_pin(ring);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_ring;
|
goto err_ring;
|
||||||
|
|
||||||
GEM_BUG_ON(engine->buffer);
|
GEM_BUG_ON(engine->legacy.ring);
|
||||||
engine->buffer = ring;
|
engine->legacy.ring = ring;
|
||||||
|
engine->legacy.timeline = timeline;
|
||||||
|
|
||||||
err = intel_engine_init_common(engine);
|
err = intel_engine_init_common(engine);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_unpin;
|
goto err_ring_unpin;
|
||||||
|
|
||||||
GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
|
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_unpin:
|
err_ring_unpin:
|
||||||
intel_ring_unpin(ring);
|
intel_ring_unpin(ring);
|
||||||
err_ring:
|
err_ring:
|
||||||
intel_ring_put(ring);
|
intel_ring_put(ring);
|
||||||
|
err_timeline_unpin:
|
||||||
|
intel_timeline_unpin(timeline);
|
||||||
|
err_timeline:
|
||||||
|
intel_timeline_put(timeline);
|
||||||
err:
|
err:
|
||||||
intel_engine_cleanup_common(engine);
|
intel_engine_cleanup_common(engine);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -32,11 +32,6 @@
|
||||||
#include "mock_engine.h"
|
#include "mock_engine.h"
|
||||||
#include "selftests/mock_request.h"
|
#include "selftests/mock_request.h"
|
||||||
|
|
||||||
struct mock_ring {
|
|
||||||
struct intel_ring base;
|
|
||||||
struct intel_timeline timeline;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void mock_timeline_pin(struct intel_timeline *tl)
|
static void mock_timeline_pin(struct intel_timeline *tl)
|
||||||
{
|
{
|
||||||
tl->pin_count++;
|
tl->pin_count++;
|
||||||
|
@ -51,36 +46,22 @@ static void mock_timeline_unpin(struct intel_timeline *tl)
|
||||||
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
|
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
const unsigned long sz = PAGE_SIZE / 2;
|
const unsigned long sz = PAGE_SIZE / 2;
|
||||||
struct mock_ring *ring;
|
struct intel_ring *ring;
|
||||||
|
|
||||||
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
|
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
|
||||||
if (!ring)
|
if (!ring)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) {
|
kref_init(&ring->ref);
|
||||||
kfree(ring);
|
ring->size = sz;
|
||||||
return NULL;
|
ring->effective_size = sz;
|
||||||
}
|
ring->vaddr = (void *)(ring + 1);
|
||||||
|
atomic_set(&ring->pin_count, 1);
|
||||||
|
|
||||||
kref_init(&ring->base.ref);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
ring->base.size = sz;
|
intel_ring_update_space(ring);
|
||||||
ring->base.effective_size = sz;
|
|
||||||
ring->base.vaddr = (void *)(ring + 1);
|
|
||||||
ring->base.timeline = &ring->timeline;
|
|
||||||
atomic_set(&ring->base.pin_count, 1);
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&ring->base.request_list);
|
return ring;
|
||||||
intel_ring_update_space(&ring->base);
|
|
||||||
|
|
||||||
return &ring->base;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mock_ring_free(struct intel_ring *base)
|
|
||||||
{
|
|
||||||
struct mock_ring *ring = container_of(base, typeof(*ring), base);
|
|
||||||
|
|
||||||
intel_timeline_fini(&ring->timeline);
|
|
||||||
kfree(ring);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct i915_request *first_request(struct mock_engine *engine)
|
static struct i915_request *first_request(struct mock_engine *engine)
|
||||||
|
@ -131,7 +112,6 @@ static void hw_delay_complete(struct timer_list *t)
|
||||||
|
|
||||||
static void mock_context_unpin(struct intel_context *ce)
|
static void mock_context_unpin(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
mock_timeline_unpin(ce->ring->timeline);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mock_context_destroy(struct kref *ref)
|
static void mock_context_destroy(struct kref *ref)
|
||||||
|
@ -140,8 +120,10 @@ static void mock_context_destroy(struct kref *ref)
|
||||||
|
|
||||||
GEM_BUG_ON(intel_context_is_pinned(ce));
|
GEM_BUG_ON(intel_context_is_pinned(ce));
|
||||||
|
|
||||||
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
|
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
|
||||||
mock_ring_free(ce->ring);
|
kfree(ce->ring);
|
||||||
|
mock_timeline_unpin(ce->timeline);
|
||||||
|
}
|
||||||
|
|
||||||
intel_context_fini(ce);
|
intel_context_fini(ce);
|
||||||
intel_context_free(ce);
|
intel_context_free(ce);
|
||||||
|
@ -153,19 +135,21 @@ static int mock_context_alloc(struct intel_context *ce)
|
||||||
if (!ce->ring)
|
if (!ce->ring)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
GEM_BUG_ON(ce->timeline);
|
||||||
|
ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
|
||||||
|
if (IS_ERR(ce->timeline)) {
|
||||||
|
kfree(ce->engine);
|
||||||
|
return PTR_ERR(ce->timeline);
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_timeline_pin(ce->timeline);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mock_context_pin(struct intel_context *ce)
|
static int mock_context_pin(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
int ret;
|
return intel_context_active_acquire(ce);
|
||||||
|
|
||||||
ret = intel_context_active_acquire(ce);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
mock_timeline_pin(ce->ring->timeline);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct intel_context_ops mock_context_ops = {
|
static const struct intel_context_ops mock_context_ops = {
|
||||||
|
|
|
@ -32,7 +32,7 @@ static int request_sync(struct i915_request *rq)
|
||||||
|
|
||||||
static int context_sync(struct intel_context *ce)
|
static int context_sync(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
struct intel_timeline *tl = ce->ring->timeline;
|
struct intel_timeline *tl = ce->timeline;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
|
|
@ -246,7 +246,7 @@ static bool __active_del_barrier(struct i915_active *ref,
|
||||||
struct llist_node *head = NULL, *tail = NULL;
|
struct llist_node *head = NULL, *tail = NULL;
|
||||||
struct llist_node *pos, *next;
|
struct llist_node *pos, *next;
|
||||||
|
|
||||||
GEM_BUG_ON(node->timeline != engine->kernel_context->ring->timeline->fence_context);
|
GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Rebuild the llist excluding our node. We may perform this
|
* Rebuild the llist excluding our node. We may perform this
|
||||||
|
@ -568,7 +568,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
||||||
* i915_active_acquire_barrier()
|
* i915_active_acquire_barrier()
|
||||||
*/
|
*/
|
||||||
for_each_engine_masked(engine, i915, mask, tmp) {
|
for_each_engine_masked(engine, i915, mask, tmp) {
|
||||||
u64 idx = engine->kernel_context->ring->timeline->fence_context;
|
u64 idx = engine->kernel_context->timeline->fence_context;
|
||||||
struct active_node *node;
|
struct active_node *node;
|
||||||
|
|
||||||
node = reuse_idle_barrier(ref, idx);
|
node = reuse_idle_barrier(ref, idx);
|
||||||
|
@ -665,7 +665,7 @@ void i915_request_add_active_barriers(struct i915_request *rq)
|
||||||
struct llist_node *node, *next;
|
struct llist_node *node, *next;
|
||||||
|
|
||||||
GEM_BUG_ON(intel_engine_is_virtual(engine));
|
GEM_BUG_ON(intel_engine_is_virtual(engine));
|
||||||
GEM_BUG_ON(rq->timeline != engine->kernel_context->ring->timeline);
|
GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Attach the list of proto-fences to the in-flight request such
|
* Attach the list of proto-fences to the in-flight request such
|
||||||
|
|
|
@ -306,12 +306,12 @@ static bool i915_request_retire(struct i915_request *rq)
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
intel_context_exit(rq->hw_context);
|
|
||||||
intel_context_unpin(rq->hw_context);
|
|
||||||
|
|
||||||
i915_request_remove_from_client(rq);
|
i915_request_remove_from_client(rq);
|
||||||
list_del(&rq->link);
|
list_del(&rq->link);
|
||||||
|
|
||||||
|
intel_context_exit(rq->hw_context);
|
||||||
|
intel_context_unpin(rq->hw_context);
|
||||||
|
|
||||||
free_capture_list(rq);
|
free_capture_list(rq);
|
||||||
i915_sched_node_fini(&rq->sched);
|
i915_sched_node_fini(&rq->sched);
|
||||||
i915_request_put(rq);
|
i915_request_put(rq);
|
||||||
|
@ -608,7 +608,7 @@ out:
|
||||||
struct i915_request *
|
struct i915_request *
|
||||||
__i915_request_create(struct intel_context *ce, gfp_t gfp)
|
__i915_request_create(struct intel_context *ce, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct intel_timeline *tl = ce->ring->timeline;
|
struct intel_timeline *tl = ce->timeline;
|
||||||
struct i915_request *rq;
|
struct i915_request *rq;
|
||||||
u32 seqno;
|
u32 seqno;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -760,7 +760,7 @@ i915_request_create(struct intel_context *ce)
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
|
||||||
/* Check that we do not interrupt ourselves with a new request */
|
/* Check that we do not interrupt ourselves with a new request */
|
||||||
rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
|
rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
|
||||||
|
|
||||||
return rq;
|
return rq;
|
||||||
|
|
||||||
|
|
|
@ -48,26 +48,29 @@ static int populate_ggtt(struct drm_i915_private *i915,
|
||||||
{
|
{
|
||||||
unsigned long unbound, bound, count;
|
unsigned long unbound, bound, count;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
u64 size;
|
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
for (size = 0;
|
do {
|
||||||
size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
|
|
||||||
size += I915_GTT_PAGE_SIZE) {
|
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
|
||||||
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
|
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
|
||||||
if (IS_ERR(obj))
|
if (IS_ERR(obj))
|
||||||
return PTR_ERR(obj);
|
return PTR_ERR(obj);
|
||||||
|
|
||||||
quirk_add(obj, objects);
|
|
||||||
|
|
||||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
|
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
|
||||||
if (IS_ERR(vma))
|
if (IS_ERR(vma)) {
|
||||||
return PTR_ERR(vma);
|
i915_gem_object_put(obj);
|
||||||
|
if (vma == ERR_PTR(-ENOSPC))
|
||||||
|
break;
|
||||||
|
|
||||||
|
return PTR_ERR(vma);
|
||||||
|
}
|
||||||
|
|
||||||
|
quirk_add(obj, objects);
|
||||||
count++;
|
count++;
|
||||||
}
|
} while (1);
|
||||||
|
pr_debug("Filled GGTT with %lu pages [%llu total]\n",
|
||||||
|
count, i915->ggtt.vm.total / PAGE_SIZE);
|
||||||
|
|
||||||
bound = 0;
|
bound = 0;
|
||||||
unbound = 0;
|
unbound = 0;
|
||||||
|
|
Loading…
Add table
Reference in a new issue