2018-04-10 09:12:46 -07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2014-2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
2019-05-28 10:29:49 +01:00
|
|
|
#include "intel_context.h"
|
2019-11-25 10:58:56 +00:00
|
|
|
#include "intel_engine_pm.h"
|
2020-12-16 13:54:52 +00:00
|
|
|
#include "intel_gpu_commands.h"
|
2019-06-21 08:07:48 +01:00
|
|
|
#include "intel_gt.h"
|
2019-10-24 11:03:44 +01:00
|
|
|
#include "intel_ring.h"
|
2018-04-10 09:12:46 -07:00
|
|
|
#include "intel_workarounds.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: Hardware workarounds
|
|
|
|
*
|
|
|
|
* This file is intended as a central place to implement most [1]_ of the
|
|
|
|
* required workarounds for hardware to work as originally intended. They fall
|
|
|
|
* in five basic categories depending on how/when they are applied:
|
|
|
|
*
|
|
|
|
* - Workarounds that touch registers that are saved/restored to/from the HW
|
|
|
|
* context image. The list is emitted (via Load Register Immediate commands)
|
|
|
|
* everytime a new context is created.
|
|
|
|
* - GT workarounds. The list of these WAs is applied whenever these registers
|
|
|
|
* revert to default values (on GPU reset, suspend/resume [2]_, etc..).
|
|
|
|
* - Display workarounds. The list is applied during display clock-gating
|
|
|
|
* initialization.
|
|
|
|
* - Workarounds that whitelist a privileged register, so that UMDs can manage
|
|
|
|
* them directly. This is just a special case of a MMMIO workaround (as we
|
|
|
|
* write the list of these to/be-whitelisted registers to some special HW
|
|
|
|
* registers).
|
|
|
|
* - Workaround batchbuffers, that get executed automatically by the hardware
|
|
|
|
* on every HW context restore.
|
|
|
|
*
|
|
|
|
* .. [1] Please notice that there are other WAs that, due to their nature,
|
|
|
|
* cannot be applied from a central place. Those are peppered around the rest
|
|
|
|
* of the code, as needed.
|
|
|
|
*
|
|
|
|
* .. [2] Technically, some registers are powercontext saved & restored, so they
|
|
|
|
* survive a suspend/resume. In practice, writing them again is not too
|
|
|
|
* costly and simplifies things. We can revisit this in the future.
|
|
|
|
*
|
|
|
|
* Layout
|
2019-05-23 10:06:46 -06:00
|
|
|
* ~~~~~~
|
2018-04-10 09:12:46 -07:00
|
|
|
*
|
|
|
|
* Keep things in this file ordered by WA type, as per the above (context, GT,
|
|
|
|
* display, register whitelist, batchbuffer). Then, inside each type, keep the
|
|
|
|
* following order:
|
|
|
|
*
|
|
|
|
* - Infrastructure functions and macros
|
|
|
|
* - WAs per platform in standard gen/chrono order
|
|
|
|
* - Public functions to init or apply the given workaround type.
|
|
|
|
*/
|
|
|
|
|
2020-08-10 20:21:05 -07:00
|
|
|
/*
|
|
|
|
* KBL revision ID ordering is bizarre; higher revision ID's map to lower
|
|
|
|
* steppings in some cases. So rather than test against the revision ID
|
|
|
|
* directly, let's map that into our own range of increasing ID's that we
|
|
|
|
* can test against in a regular manner.
|
|
|
|
*/
|
|
|
|
|
|
|
|
const struct i915_rev_steppings kbl_revids[] = {
|
|
|
|
[0] = { .gt_stepping = KBL_REVID_A0, .disp_stepping = KBL_REVID_A0 },
|
|
|
|
[1] = { .gt_stepping = KBL_REVID_B0, .disp_stepping = KBL_REVID_B0 },
|
|
|
|
[2] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B0 },
|
|
|
|
[3] = { .gt_stepping = KBL_REVID_D0, .disp_stepping = KBL_REVID_B0 },
|
|
|
|
[4] = { .gt_stepping = KBL_REVID_F0, .disp_stepping = KBL_REVID_C0 },
|
|
|
|
[5] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B1 },
|
|
|
|
[6] = { .gt_stepping = KBL_REVID_D1, .disp_stepping = KBL_REVID_B1 },
|
|
|
|
[7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
|
|
|
|
};
|
|
|
|
|
2021-01-19 11:29:30 -08:00
|
|
|
const struct i915_rev_steppings tgl_uy_revid_step_tbl[] = {
|
|
|
|
[0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A0 },
|
|
|
|
[1] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_C0 },
|
|
|
|
[2] = { .gt_stepping = STEP_B1, .disp_stepping = STEP_C0 },
|
|
|
|
[3] = { .gt_stepping = STEP_C0, .disp_stepping = STEP_D0 },
|
2020-08-27 16:39:43 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
|
2021-01-19 11:29:30 -08:00
|
|
|
const struct i915_rev_steppings tgl_revid_step_tbl[] = {
|
|
|
|
[0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_B0 },
|
|
|
|
[1] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_D0 },
|
2020-08-27 16:39:43 -07:00
|
|
|
};
|
|
|
|
|
2021-01-19 11:29:31 -08:00
|
|
|
const struct i915_rev_steppings adls_revid_step_tbl[] = {
|
|
|
|
[0x0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A0 },
|
|
|
|
[0x1] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A2 },
|
|
|
|
[0x4] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_B0 },
|
|
|
|
[0x8] = { .gt_stepping = STEP_C0, .disp_stepping = STEP_B0 },
|
|
|
|
[0xC] = { .gt_stepping = STEP_D0, .disp_stepping = STEP_C0 },
|
|
|
|
};
|
|
|
|
|
2019-07-12 00:07:45 -07:00
|
|
|
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
|
2018-12-03 13:33:19 +00:00
|
|
|
{
|
|
|
|
wal->name = name;
|
2019-07-12 00:07:45 -07:00
|
|
|
wal->engine_name = engine_name;
|
2018-12-03 13:33:19 +00:00
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:14 +00:00
|
|
|
#define WA_LIST_CHUNK (1 << 4)
|
|
|
|
|
2018-12-03 13:33:19 +00:00
|
|
|
static void wa_init_finish(struct i915_wa_list *wal)
|
|
|
|
{
|
2018-12-03 12:50:14 +00:00
|
|
|
/* Trim unused entries. */
|
|
|
|
if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
|
|
|
|
struct i915_wa *list = kmemdup(wal->list,
|
|
|
|
wal->count * sizeof(*list),
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
|
|
|
if (list) {
|
|
|
|
kfree(wal->list);
|
|
|
|
wal->list = list;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-03 13:33:19 +00:00
|
|
|
if (!wal->count)
|
|
|
|
return;
|
|
|
|
|
2019-07-12 00:07:45 -07:00
|
|
|
DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
|
|
|
|
wal->wa_count, wal->name, wal->engine_name);
|
2018-12-03 13:33:19 +00:00
|
|
|
}
|
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2018-12-03 13:33:57 +00:00
|
|
|
unsigned int addr = i915_mmio_reg_offset(wa->reg);
|
|
|
|
unsigned int start = 0, end = wal->count;
|
2018-12-03 12:50:14 +00:00
|
|
|
const unsigned int grow = WA_LIST_CHUNK;
|
2018-12-03 13:33:57 +00:00
|
|
|
struct i915_wa *wa_;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!is_power_of_2(grow));
|
|
|
|
|
|
|
|
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
|
|
|
|
struct i915_wa *list;
|
|
|
|
|
|
|
|
list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!list) {
|
|
|
|
DRM_ERROR("No space for workaround init!\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-11-13 13:25:10 +00:00
|
|
|
if (wal->list) {
|
2018-12-03 13:33:57 +00:00
|
|
|
memcpy(list, wal->list, sizeof(*wa) * wal->count);
|
2020-11-13 13:25:10 +00:00
|
|
|
kfree(wal->list);
|
|
|
|
}
|
2018-12-03 13:33:57 +00:00
|
|
|
|
|
|
|
wal->list = list;
|
|
|
|
}
|
2018-06-15 13:02:07 +01:00
|
|
|
|
|
|
|
while (start < end) {
|
|
|
|
unsigned int mid = start + (end - start) / 2;
|
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
|
2018-06-15 13:02:07 +01:00
|
|
|
start = mid + 1;
|
2018-12-03 13:33:57 +00:00
|
|
|
} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
|
2018-06-15 13:02:07 +01:00
|
|
|
end = mid;
|
|
|
|
} else {
|
2018-12-03 13:33:57 +00:00
|
|
|
wa_ = &wal->list[mid];
|
2018-06-15 13:02:07 +01:00
|
|
|
|
2020-01-31 23:50:35 +00:00
|
|
|
if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
|
|
|
|
DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
|
2018-12-03 13:33:57 +00:00
|
|
|
i915_mmio_reg_offset(wa_->reg),
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_->clr, wa_->set);
|
2018-06-15 13:02:07 +01:00
|
|
|
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_->set &= ~wa->clr;
|
2018-06-15 13:02:07 +01:00
|
|
|
}
|
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
wal->wa_count++;
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_->set |= wa->set;
|
|
|
|
wa_->clr |= wa->clr;
|
2019-04-17 08:56:29 +01:00
|
|
|
wa_->read |= wa->read;
|
2018-06-15 13:02:07 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
wal->wa_count++;
|
|
|
|
wa_ = &wal->list[wal->count++];
|
|
|
|
*wa_ = *wa;
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
while (wa_-- > wal->list) {
|
|
|
|
GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
|
|
|
|
i915_mmio_reg_offset(wa_[1].reg));
|
|
|
|
if (i915_mmio_reg_offset(wa_[1].reg) >
|
|
|
|
i915_mmio_reg_offset(wa_[0].reg))
|
2018-06-15 13:02:07 +01:00
|
|
|
break;
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
swap(wa_[1], wa_[0]);
|
2018-06-15 13:02:07 +01:00
|
|
|
}
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2020-01-31 23:50:35 +00:00
|
|
|
static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
|
|
|
|
u32 clear, u32 set, u32 read_mask)
|
2018-12-03 13:33:57 +00:00
|
|
|
{
|
|
|
|
struct i915_wa wa = {
|
2019-04-17 08:56:29 +01:00
|
|
|
.reg = reg,
|
2020-01-31 23:50:35 +00:00
|
|
|
.clr = clear,
|
|
|
|
.set = set,
|
2019-11-28 07:40:05 +05:30
|
|
|
.read = read_mask,
|
2018-12-03 13:33:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
_wa_add(wal, &wa);
|
|
|
|
}
|
|
|
|
|
2019-11-28 07:40:05 +05:30
|
|
|
static void
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
|
2019-11-28 07:40:05 +05:30
|
|
|
{
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_add(wal, reg, clear, set, clear);
|
2019-11-28 07:40:05 +05:30
|
|
|
}
|
|
|
|
|
2019-01-31 17:08:42 -08:00
|
|
|
static void
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
|
|
|
|
{
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal, reg, ~0, set);
|
2020-01-31 23:50:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
|
2019-01-31 17:08:42 -08:00
|
|
|
{
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal, reg, set, set);
|
2019-01-31 17:08:42 -08:00
|
|
|
}
|
|
|
|
|
2020-06-11 10:30:15 +01:00
|
|
|
static void
|
|
|
|
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
|
|
|
|
{
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal, reg, clr, 0);
|
2020-06-11 10:30:15 +01:00
|
|
|
}
|
|
|
|
|
2020-12-08 20:52:46 -08:00
|
|
|
/*
|
|
|
|
* WA operations on "masked register". A masked register has the upper 16 bits
|
|
|
|
* documented as "masked" in b-spec. Its purpose is to allow writing to just a
|
|
|
|
* portion of the register without a rmw: you simply write in the upper 16 bits
|
|
|
|
* the mask of bits you are going to modify.
|
|
|
|
*
|
|
|
|
* The wa_masked_* family of functions already does the necessary operations to
|
|
|
|
* calculate the mask based on the parameters passed, so user only has to
|
|
|
|
* provide the lower 16 bits of that register.
|
|
|
|
*/
|
|
|
|
|
2019-01-31 17:08:42 -08:00
|
|
|
static void
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
|
2019-01-31 17:08:42 -08:00
|
|
|
{
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
|
2019-01-31 17:08:42 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
|
2019-01-31 17:08:42 -08:00
|
|
|
{
|
2020-01-31 23:50:35 +00:00
|
|
|
wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
|
2019-01-31 17:08:42 -08:00
|
|
|
}
|
|
|
|
|
2020-12-05 01:25:42 -08:00
|
|
|
static void
|
|
|
|
wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
|
|
|
|
u32 mask, u32 val)
|
|
|
|
{
|
2020-12-08 20:52:44 -08:00
|
|
|
wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask);
|
2020-12-05 01:25:42 -08:00
|
|
|
}
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2020-06-01 08:24:13 +01:00
|
|
|
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
|
|
|
{
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
|
2020-06-01 08:24:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
|
|
|
{
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
|
2020-06-01 08:24:13 +01:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableAsyncFlipPerfMode:bdw,chv */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisablePartialInstShootdown:bdw,chv */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN8_ROW_CHICKEN,
|
|
|
|
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* Use Force Non-Coherent whenever executing a 3D context. This is a
|
|
|
|
* workaround for for a possible hang in the unlikely event a TLB
|
|
|
|
* invalidation occurs during a PSD flush.
|
|
|
|
*/
|
|
|
|
/* WaForceEnableNonCoherent:bdw,chv */
|
|
|
|
/* WaHdcDisableFetchWhenMasked:bdw,chv */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HDC_CHICKEN0,
|
|
|
|
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
|
|
|
|
HDC_FORCE_NON_COHERENT);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
|
|
|
|
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
|
|
|
|
* polygons in the same 8x4 pixel/sample area to be processed without
|
|
|
|
* stalling waiting for the earlier ones to write to Hierarchical Z
|
|
|
|
* buffer."
|
|
|
|
*
|
|
|
|
* This optimization is off by default for BDW and CHV; turn it on.
|
|
|
|
*/
|
2020-12-05 01:25:41 -08:00
|
|
|
wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* Wa4x4STCOptimizationDisable:bdw,chv */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* BSpec recommends 8x4 when MSAA is used,
|
|
|
|
* however in practice 16x4 seems fastest.
|
|
|
|
*
|
|
|
|
* Note that PS/WM thread counts depend on the WIZ hashing
|
|
|
|
* disable bit, which we don't touch here, but it's good
|
|
|
|
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
|
|
|
*/
|
2020-12-05 01:25:42 -08:00
|
|
|
wa_masked_field_set(wal, GEN7_GT_MODE,
|
2018-04-10 09:12:46 -07:00
|
|
|
GEN6_WIZ_HASHING_MASK,
|
|
|
|
GEN6_WIZ_HASHING_16x4);
|
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2018-12-03 13:33:57 +00:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
gen8_ctx_workarounds_init(engine, wal);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableDopClockGating:bdw
|
|
|
|
*
|
2019-12-24 00:40:10 -08:00
|
|
|
* Also see the related UCGTCL1 write in bdw_init_clock_gating()
|
2018-04-10 09:12:46 -07:00
|
|
|
* to disable EUTC clock gating.
|
|
|
|
*/
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
|
|
|
|
DOP_CLOCK_GATING_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HALF_SLICE_CHICKEN3,
|
|
|
|
GEN8_SAMPLER_POWER_BYPASS_DIS);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HDC_CHICKEN0,
|
|
|
|
/* WaForceContextSaveRestoreNonCoherent:bdw */
|
|
|
|
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
|
|
|
|
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
|
|
|
|
(IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2019-05-20 15:25:46 +01:00
|
|
|
gen8_ctx_workarounds_init(engine, wal);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableThreadStallDopClockGating:chv */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* Improve HiZ throughput on CHV. */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2018-12-03 13:33:57 +00:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
|
|
|
|
|
|
|
if (HAS_LLC(i915)) {
|
2018-04-10 09:12:46 -07:00
|
|
|
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
|
|
|
|
*
|
|
|
|
* Must match Display Engine. See
|
|
|
|
* WaCompressedResourceDisplayNewHashMode.
|
|
|
|
*/
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
|
|
|
|
GEN9_PBE_COMPRESSED_HASH_SELECTION);
|
|
|
|
wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
|
|
|
|
GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
|
|
|
|
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN8_ROW_CHICKEN,
|
|
|
|
FLOW_CONTROL_ENABLE |
|
|
|
|
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
|
|
|
|
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
|
|
|
|
GEN9_ENABLE_YV12_BUGFIX |
|
|
|
|
GEN9_ENABLE_GPGPU_PREEMPTION);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
|
|
|
|
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, CACHE_MODE_1,
|
|
|
|
GEN8_4x4_STC_OPTIMIZATION_DISABLE |
|
|
|
|
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
|
2020-12-05 01:25:41 -08:00
|
|
|
wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
|
|
|
|
GEN9_CCS_TLB_PREFETCH_ENABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HDC_CHICKEN0,
|
|
|
|
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
|
|
|
|
HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
|
|
|
|
* both tied to WaForceContextSaveRestoreNonCoherent
|
|
|
|
* in some hsds for skl. We keep the tie for all gen9. The
|
|
|
|
* documentation is a bit hazy and so we want to get common behaviour,
|
|
|
|
* even though there is no clear evidence we would need both on kbl/bxt.
|
|
|
|
* This area has been source of system hangs so we play it safe
|
|
|
|
* and mimic the skl regardless of what bspec says.
|
|
|
|
*
|
|
|
|
* Use Force Non-Coherent whenever executing a 3D context. This
|
|
|
|
* is a workaround for a possible hang in the unlikely event
|
|
|
|
* a TLB invalidation occurs during a PSD flush.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HDC_CHICKEN0,
|
|
|
|
HDC_FORCE_NON_COHERENT);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
|
2020-06-02 15:05:40 +01:00
|
|
|
if (IS_SKYLAKE(i915) ||
|
|
|
|
IS_KABYLAKE(i915) ||
|
|
|
|
IS_COFFEELAKE(i915) ||
|
|
|
|
IS_COMETLAKE(i915))
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HALF_SLICE_CHICKEN3,
|
|
|
|
GEN8_SAMPLER_POWER_BYPASS_DIS);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Supporting preemption with fine-granularity requires changes in the
|
|
|
|
* batch buffer programming. Since we can't break old userspace, we
|
|
|
|
* need to set our default preemption level to safe value. Userspace is
|
|
|
|
* still able to use more fine-grained preemption levels, since in
|
|
|
|
* WaEnablePreemptionGranularityControlByUMD we're whitelisting the
|
|
|
|
* per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
|
|
|
|
* not real HW workarounds, but merely a way to start using preemption
|
|
|
|
* while maintaining old contract with userspace.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
|
2020-12-05 01:25:41 -08:00
|
|
|
wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
|
2020-12-05 01:25:42 -08:00
|
|
|
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
|
2018-04-10 09:12:46 -07:00
|
|
|
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
|
|
|
|
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
|
|
|
|
|
2018-05-10 13:07:08 -07:00
|
|
|
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
|
2018-12-03 13:33:57 +00:00
|
|
|
if (IS_GEN9_LP(i915))
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2020-07-07 17:39:50 -07:00
|
|
|
struct intel_gt *gt = engine->gt;
|
2018-04-10 09:12:46 -07:00
|
|
|
u8 vals[3] = { 0, 0, 0 };
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
u8 ss;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only consider slices where one, and only one, subslice has 7
|
|
|
|
* EUs
|
|
|
|
*/
|
2020-07-07 17:39:50 -07:00
|
|
|
if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
|
2018-04-10 09:12:46 -07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* subslice_7eu[i] != 0 (because of the check above) and
|
|
|
|
* ss_max == 4 (maximum number of subslices possible per slice)
|
|
|
|
*
|
|
|
|
* -> 0 <= ss <= 3;
|
|
|
|
*/
|
2020-07-07 17:39:50 -07:00
|
|
|
ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
|
2018-04-10 09:12:46 -07:00
|
|
|
vals[i] = 3 - ss;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
|
2018-12-03 13:33:57 +00:00
|
|
|
return;
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* Tune IZ hashing. See intel_device_info_runtime_init() */
|
2020-12-05 01:25:42 -08:00
|
|
|
wa_masked_field_set(wal, GEN7_GT_MODE,
|
2018-04-10 09:12:46 -07:00
|
|
|
GEN9_IZ_HASHING_MASK(2) |
|
|
|
|
GEN9_IZ_HASHING_MASK(1) |
|
|
|
|
GEN9_IZ_HASHING_MASK(0),
|
|
|
|
GEN9_IZ_HASHING(2, vals[2]) |
|
|
|
|
GEN9_IZ_HASHING(1, vals[1]) |
|
|
|
|
GEN9_IZ_HASHING(0, vals[0]));
|
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2019-05-20 15:25:46 +01:00
|
|
|
gen9_ctx_workarounds_init(engine, wal);
|
|
|
|
skl_tune_iz_hashing(engine, wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-05-20 15:25:46 +01:00
|
|
|
gen9_ctx_workarounds_init(engine, wal);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaDisableThreadStallDopClockGating:bxt */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN8_ROW_CHICKEN,
|
|
|
|
STALL_DOP_GATING_DISABLE);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaToEnableHwFixForPushConstHWBug:bxt */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
|
|
|
|
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2018-12-03 13:33:57 +00:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
gen9_ctx_workarounds_init(engine, wal);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaToEnableHwFixForPushConstHWBug:kbl */
|
2020-08-10 20:21:05 -07:00
|
|
|
if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
|
|
|
|
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaDisableSbeCacheDispatchPortSharing:kbl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
|
|
|
|
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-05-20 15:25:46 +01:00
|
|
|
gen9_ctx_workarounds_init(engine, wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaToEnableHwFixForPushConstHWBug:glk */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
|
|
|
|
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2019-05-20 15:25:46 +01:00
|
|
|
gen9_ctx_workarounds_init(engine, wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaToEnableHwFixForPushConstHWBug:cfl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
|
|
|
|
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaDisableSbeCacheDispatchPortSharing:cfl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
|
|
|
|
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2018-04-10 09:12:46 -07:00
|
|
|
/* WaForceContextSaveRestoreNonCoherent:cnl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, CNL_HDC_CHICKEN0,
|
|
|
|
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
|
|
|
|
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaPushConstantDereferenceHoldDisable:cnl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* FtrEnableFastAnisoL1BankingFix:cnl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisable3DMidCmdPreemption:cnl */
|
2020-12-05 01:25:41 -08:00
|
|
|
wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
|
|
|
/* WaDisableGPGPUMidCmdPreemption:cnl */
|
2020-12-05 01:25:42 -08:00
|
|
|
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
|
2018-04-10 09:12:46 -07:00
|
|
|
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
|
|
|
|
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
|
|
|
|
|
|
|
|
/* WaDisableEarlyEOT:cnl */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2018-05-08 14:29:23 -07:00
|
|
|
{
|
2018-12-03 13:33:57 +00:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
|
|
|
|
2019-05-20 12:04:42 +01:00
|
|
|
/* WaDisableBankHangMode:icl */
|
|
|
|
wa_write(wal,
|
|
|
|
GEN8_L3CNTLREG,
|
|
|
|
intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
|
|
|
|
GEN8_ERRDETBCTRL);
|
|
|
|
|
2018-05-08 14:29:23 -07:00
|
|
|
/* Wa_1604370585:icl (pre-prod)
|
|
|
|
* Formerly known as WaPushConstantDereferenceHoldDisable
|
|
|
|
*/
|
2018-12-03 13:33:57 +00:00
|
|
|
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
|
|
|
|
PUSH_CONSTANT_DEREF_DISABLE);
|
2018-05-08 14:29:23 -07:00
|
|
|
|
|
|
|
/* WaForceEnableNonCoherent:icl
|
|
|
|
* This is not the same workaround as in early Gen9 platforms, where
|
|
|
|
* lacking this could cause system hangs, but coherency performance
|
|
|
|
* overhead is high and only a few compute workloads really need it
|
|
|
|
* (the register is whitelisted in hardware now, so UMDs can opt in
|
|
|
|
* for coherency if they have a good reason).
|
|
|
|
*/
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
|
2018-05-08 14:29:23 -07:00
|
|
|
|
2018-05-25 15:05:29 -07:00
|
|
|
/* Wa_2006611047:icl (pre-prod)
|
|
|
|
* Formerly known as WaDisableImprovedTdlClkGating
|
|
|
|
*/
|
2018-12-03 13:33:57 +00:00
|
|
|
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
|
|
|
|
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
|
2018-05-25 15:05:29 -07:00
|
|
|
|
2018-05-25 15:05:31 -07:00
|
|
|
/* Wa_2006665173:icl (pre-prod) */
|
2018-12-03 13:33:57 +00:00
|
|
|
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
|
|
|
|
GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
|
2019-01-31 17:08:44 -08:00
|
|
|
|
|
|
|
/* WaEnableFloatBlendOptimization:icl */
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal,
|
|
|
|
GEN10_CACHE_MODE_SS,
|
|
|
|
0, /* write-only, so skip validation */
|
|
|
|
_MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
|
2019-03-05 13:48:26 +01:00
|
|
|
|
|
|
|
/* WaDisableGPGPUMidThreadPreemption:icl */
|
2020-12-05 01:25:42 -08:00
|
|
|
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
|
2019-03-05 13:48:26 +01:00
|
|
|
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
|
|
|
|
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
|
2019-04-25 06:50:05 +01:00
|
|
|
|
|
|
|
/* allow headerless messages for preemptible GPGPU context */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN10_SAMPLER_MODE,
|
|
|
|
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
|
2020-03-11 09:22:57 -07:00
|
|
|
|
|
|
|
/* Wa_1604278689:icl,ehl */
|
|
|
|
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
|
|
|
|
0, /* write-only register; skip validation */
|
|
|
|
0xFFFFFFFF);
|
2020-03-11 09:22:58 -07:00
|
|
|
|
|
|
|
/* Wa_1406306137:icl,ehl */
|
|
|
|
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
|
2018-05-08 14:29:23 -07:00
|
|
|
}
|
|
|
|
|
2020-07-16 15:05:48 -07:00
|
|
|
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
2019-08-17 02:38:42 -07:00
|
|
|
{
|
2020-02-27 14:00:58 -08:00
|
|
|
/*
|
|
|
|
* Wa_1409142259:tgl
|
|
|
|
* Wa_1409347922:tgl
|
|
|
|
* Wa_1409252684:tgl
|
|
|
|
* Wa_1409217633:tgl
|
|
|
|
* Wa_1409207793:tgl
|
|
|
|
* Wa_1409178076:tgl
|
|
|
|
* Wa_1408979724:tgl
|
2020-07-16 15:05:48 -07:00
|
|
|
* Wa_14010443199:rkl
|
|
|
|
* Wa_14010698770:rkl
|
2020-02-27 14:00:58 -08:00
|
|
|
*/
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
|
|
|
|
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
|
2019-11-28 07:40:05 +05:30
|
|
|
|
2020-07-16 15:05:48 -07:00
|
|
|
/* WaDisableGPGPUMidThreadPreemption:gen12 */
|
2020-12-05 01:25:42 -08:00
|
|
|
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
|
2020-07-16 15:05:48 -07:00
|
|
|
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
|
|
|
|
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
gen12_ctx_workarounds_init(engine, wal);
|
|
|
|
|
2019-11-28 07:40:05 +05:30
|
|
|
/*
|
2020-07-16 15:05:48 -07:00
|
|
|
* Wa_1604555607:tgl,rkl
|
|
|
|
*
|
|
|
|
* Note that the implementation of this workaround is further modified
|
|
|
|
* according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
|
2020-02-24 11:12:58 -08:00
|
|
|
* FF_MODE2 register will return the wrong value when read. The default
|
|
|
|
* value for this register is zero for all fields and there are no bit
|
2020-06-03 15:11:50 -07:00
|
|
|
* masks. So instead of doing a RMW we should just write the GS Timer
|
|
|
|
* and TDS timer values for Wa_1604555607 and Wa_16011163337.
|
2019-11-28 07:40:05 +05:30
|
|
|
*/
|
2020-06-03 15:11:50 -07:00
|
|
|
wa_add(wal,
|
|
|
|
FF_MODE2,
|
|
|
|
FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK,
|
|
|
|
FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128,
|
|
|
|
0);
|
2019-08-17 02:38:42 -07:00
|
|
|
}
|
|
|
|
|
2020-10-14 12:19:34 -07:00
|
|
|
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
gen12_ctx_workarounds_init(engine, wal);
|
|
|
|
|
|
|
|
/* Wa_1409044764 */
|
2020-12-05 01:25:41 -08:00
|
|
|
wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
|
|
|
|
DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
|
2020-10-14 12:19:34 -07:00
|
|
|
|
|
|
|
/* Wa_22010493298 */
|
2020-12-05 01:25:40 -08:00
|
|
|
wa_masked_en(wal, HIZ_CHICKEN,
|
|
|
|
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
|
2020-12-05 01:25:39 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa_16011163337
|
|
|
|
*
|
|
|
|
* Like in tgl_ctx_workarounds_init(), read verification is ignored due
|
|
|
|
* to Wa_1608008084.
|
|
|
|
*/
|
|
|
|
wa_add(wal,
|
|
|
|
FF_MODE2,
|
|
|
|
FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0);
|
2020-10-14 12:19:34 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static void
|
|
|
|
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
|
|
|
struct i915_wa_list *wal,
|
|
|
|
const char *name)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2018-12-03 13:33:57 +00:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
if (engine->class != RENDER_CLASS)
|
|
|
|
return;
|
|
|
|
|
2019-07-12 00:07:45 -07:00
|
|
|
wa_init_start(wal, name, engine->name);
|
2018-12-03 13:33:57 +00:00
|
|
|
|
2020-10-14 12:19:34 -07:00
|
|
|
if (IS_DG1(i915))
|
|
|
|
dg1_ctx_workarounds_init(engine, wal);
|
2021-01-29 10:29:45 -08:00
|
|
|
else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) ||
|
|
|
|
IS_TIGERLAKE(i915))
|
2019-08-17 02:38:42 -07:00
|
|
|
tgl_ctx_workarounds_init(engine, wal);
|
2020-07-16 15:05:48 -07:00
|
|
|
else if (IS_GEN(i915, 12))
|
|
|
|
gen12_ctx_workarounds_init(engine, wal);
|
2019-08-17 02:38:42 -07:00
|
|
|
else if (IS_GEN(i915, 11))
|
2019-05-20 15:25:46 +01:00
|
|
|
icl_ctx_workarounds_init(engine, wal);
|
2018-12-03 13:33:57 +00:00
|
|
|
else if (IS_CANNONLAKE(i915))
|
2019-05-20 15:25:46 +01:00
|
|
|
cnl_ctx_workarounds_init(engine, wal);
|
2020-06-02 15:05:40 +01:00
|
|
|
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
|
2019-05-20 15:25:46 +01:00
|
|
|
cfl_ctx_workarounds_init(engine, wal);
|
2019-02-21 15:14:52 -08:00
|
|
|
else if (IS_GEMINILAKE(i915))
|
2019-05-20 15:25:46 +01:00
|
|
|
glk_ctx_workarounds_init(engine, wal);
|
2019-02-21 15:14:52 -08:00
|
|
|
else if (IS_KABYLAKE(i915))
|
2019-05-20 15:25:46 +01:00
|
|
|
kbl_ctx_workarounds_init(engine, wal);
|
2019-02-21 15:14:52 -08:00
|
|
|
else if (IS_BROXTON(i915))
|
2019-05-20 15:25:46 +01:00
|
|
|
bxt_ctx_workarounds_init(engine, wal);
|
2019-02-21 15:14:52 -08:00
|
|
|
else if (IS_SKYLAKE(i915))
|
2019-05-20 15:25:46 +01:00
|
|
|
skl_ctx_workarounds_init(engine, wal);
|
2019-02-21 15:14:52 -08:00
|
|
|
else if (IS_CHERRYVIEW(i915))
|
2019-05-20 15:25:46 +01:00
|
|
|
chv_ctx_workarounds_init(engine, wal);
|
2019-02-21 15:14:52 -08:00
|
|
|
else if (IS_BROADWELL(i915))
|
2019-05-20 15:25:46 +01:00
|
|
|
bdw_ctx_workarounds_init(engine, wal);
|
2020-06-01 08:24:13 +01:00
|
|
|
else if (IS_GEN(i915, 7))
|
|
|
|
gen7_ctx_workarounds_init(engine, wal);
|
|
|
|
else if (IS_GEN(i915, 6))
|
|
|
|
gen6_ctx_workarounds_init(engine, wal);
|
2019-02-21 15:14:52 -08:00
|
|
|
else if (INTEL_GEN(i915) < 8)
|
|
|
|
return;
|
2018-04-10 09:12:47 -07:00
|
|
|
else
|
2018-12-03 13:33:57 +00:00
|
|
|
MISSING_CASE(INTEL_GEN(i915));
|
2018-04-10 09:12:47 -07:00
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
wa_init_finish(wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
|
|
|
|
}
|
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
int intel_engine_emit_ctx_wa(struct i915_request *rq)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2018-12-03 13:33:57 +00:00
|
|
|
struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
|
|
|
|
struct i915_wa *wa;
|
|
|
|
unsigned int i;
|
2018-04-10 09:12:47 -07:00
|
|
|
u32 *cs;
|
2018-12-03 13:33:57 +00:00
|
|
|
int ret;
|
2018-04-10 09:12:47 -07:00
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
if (wal->count == 0)
|
2018-04-10 09:12:47 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
|
2018-04-10 09:12:46 -07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
cs = intel_ring_begin(rq, (wal->count * 2 + 2));
|
2018-04-10 09:12:47 -07:00
|
|
|
if (IS_ERR(cs))
|
|
|
|
return PTR_ERR(cs);
|
|
|
|
|
2018-12-03 13:33:57 +00:00
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
|
|
|
|
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
|
|
|
|
*cs++ = i915_mmio_reg_offset(wa->reg);
|
2020-01-31 23:50:35 +00:00
|
|
|
*cs++ = wa->set;
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
*cs++ = MI_NOOP;
|
|
|
|
|
|
|
|
intel_ring_advance(rq, cs);
|
|
|
|
|
|
|
|
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-11 09:01:39 +01:00
|
|
|
static void
|
2020-06-11 09:01:40 +01:00
|
|
|
gen4_gt_workarounds_init(struct drm_i915_private *i915,
|
|
|
|
struct i915_wa_list *wal)
|
2020-06-11 09:01:39 +01:00
|
|
|
{
|
2020-06-11 09:01:40 +01:00
|
|
|
/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
|
|
|
|
wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
gen4_gt_workarounds_init(i915, wal);
|
2020-06-11 09:01:39 +01:00
|
|
|
|
2020-06-11 09:01:40 +01:00
|
|
|
/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
|
2020-06-11 09:01:39 +01:00
|
|
|
wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
|
2020-06-11 09:01:40 +01:00
|
|
|
}
|
2020-06-11 09:01:39 +01:00
|
|
|
|
2020-06-11 09:01:40 +01:00
|
|
|
static void
|
|
|
|
ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
g4x_gt_workarounds_init(i915, wal);
|
|
|
|
|
|
|
|
wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
|
2020-06-11 09:01:39 +01:00
|
|
|
}
|
|
|
|
|
2020-06-11 09:01:38 +01:00
|
|
|
static void
|
|
|
|
snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-06-11 09:01:36 +01:00
|
|
|
static void
|
|
|
|
ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
|
|
|
|
wa_masked_dis(wal,
|
|
|
|
GEN7_COMMON_SLICE_CHICKEN1,
|
|
|
|
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
|
|
|
|
|
|
|
|
/* WaApplyL3ControlAndL3ChickenMode:ivb */
|
|
|
|
wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
|
|
|
|
wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
|
|
|
|
|
|
|
|
/* WaForceL3Serialization:ivb */
|
|
|
|
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
|
|
|
|
}
|
|
|
|
|
2020-06-11 09:01:37 +01:00
|
|
|
static void
|
|
|
|
vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
/* WaForceL3Serialization:vlv */
|
|
|
|
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* WaIncreaseL3CreditsForVLVB0:vlv
|
|
|
|
* This is the hardware default actually.
|
|
|
|
*/
|
|
|
|
wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
|
|
|
|
}
|
|
|
|
|
2020-06-11 10:30:15 +01:00
|
|
|
static void
|
|
|
|
hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
/* L3 caching of data atomics doesn't work -- disable it. */
|
|
|
|
wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
|
|
|
|
|
|
|
|
wa_add(wal,
|
|
|
|
HSW_ROW_CHICKEN3, 0,
|
|
|
|
_MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
|
|
|
|
0 /* XXX does this reg exist? */);
|
|
|
|
|
|
|
|
/* WaVSRefCountFullforceMissDisable:hsw */
|
|
|
|
wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
|
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-12-03 13:33:19 +00:00
|
|
|
{
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaDisableKillLogic:bxt,skl,kbl */
|
2020-06-02 15:05:40 +01:00
|
|
|
if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GAM_ECOCHK,
|
|
|
|
ECOCHK_DIS_TLB);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
2018-12-03 13:33:19 +00:00
|
|
|
if (HAS_LLC(i915)) {
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
|
|
|
|
*
|
|
|
|
* Must match Display Engine. See
|
|
|
|
* WaCompressedResourceDisplayNewHashMode.
|
|
|
|
*/
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
MMCD_MISC_CTRL,
|
|
|
|
MMCD_PCLA | MMCD_HOTSPOT_EN);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GAM_ECOCHK,
|
|
|
|
BDW_DISABLE_HDC_INVALIDATION);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-01-09 17:32:31 -08:00
|
|
|
gen9_gt_workarounds_init(i915, wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaDisableGafsUnitClkGating:skl */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN7_UCGCTL4,
|
|
|
|
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaInPlaceDecompressionHang:skl */
|
2018-12-03 13:33:19 +00:00
|
|
|
if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN9_GAMT_ECO_REG_RW_IA,
|
|
|
|
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-01-09 17:32:31 -08:00
|
|
|
gen9_gt_workarounds_init(i915, wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaInPlaceDecompressionHang:bxt */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN9_GAMT_ECO_REG_RW_IA,
|
|
|
|
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-01-09 17:32:31 -08:00
|
|
|
gen9_gt_workarounds_init(i915, wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
2018-04-10 09:12:46 -07:00
|
|
|
/* WaDisableDynamicCreditSharing:kbl */
|
2020-08-10 20:21:05 -07:00
|
|
|
if (IS_KBL_GT_REVID(i915, 0, KBL_REVID_B0))
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GAMT_CHKN_BIT_REG,
|
|
|
|
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaDisableGafsUnitClkGating:kbl */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN7_UCGCTL4,
|
|
|
|
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaInPlaceDecompressionHang:kbl */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN9_GAMT_ECO_REG_RW_IA,
|
|
|
|
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-01-09 17:32:31 -08:00
|
|
|
gen9_gt_workarounds_init(i915, wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-01-09 17:32:31 -08:00
|
|
|
gen9_gt_workarounds_init(i915, wal);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaDisableGafsUnitClkGating:cfl */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN7_UCGCTL4,
|
|
|
|
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaInPlaceDecompressionHang:cfl */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN9_GAMT_ECO_REG_RW_IA,
|
|
|
|
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
2019-04-12 21:24:57 +01:00
|
|
|
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-05-18 15:39:57 -07:00
|
|
|
{
|
2020-07-07 17:39:50 -07:00
|
|
|
const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
|
2019-07-17 19:06:21 +01:00
|
|
|
unsigned int slice, subslice;
|
|
|
|
u32 l3_en, mcr, mcr_mask;
|
|
|
|
|
|
|
|
GEM_BUG_ON(INTEL_GEN(i915) < 10);
|
2018-05-18 15:39:57 -07:00
|
|
|
|
2018-05-18 15:41:25 -07:00
|
|
|
/*
|
|
|
|
* WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
|
|
|
|
* L3Banks could be fused off in single slice scenario. If that is
|
|
|
|
* the case, we might need to program MCR select to a valid L3Bank
|
|
|
|
* by default, to make sure we correctly read certain registers
|
|
|
|
* later on (in the range 0xB100 - 0xB3FF).
|
2019-07-17 19:06:21 +01:00
|
|
|
*
|
2018-05-18 15:40:32 -07:00
|
|
|
* WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
|
2018-05-18 15:39:57 -07:00
|
|
|
* Before any MMIO read into slice/subslice specific registers, MCR
|
|
|
|
* packet control register needs to be programmed to point to any
|
|
|
|
* enabled s/ss pair. Otherwise, incorrect values will be returned.
|
|
|
|
* This means each subsequent MMIO read will be forwarded to an
|
|
|
|
* specific s/ss combination, but this is OK since these registers
|
|
|
|
* are consistent across s/ss in almost all cases. In the rare
|
|
|
|
* occasions, such as INSTDONE, where this value is dependent
|
|
|
|
* on s/ss combo, the read should be done with read_subslice_reg.
|
2019-07-17 19:06:21 +01:00
|
|
|
*
|
|
|
|
* Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both
|
|
|
|
* to which subslice, or to which L3 bank, the respective mmio reads
|
|
|
|
* will go, we have to find a common index which works for both
|
|
|
|
* accesses.
|
|
|
|
*
|
|
|
|
* Case where we cannot find a common index fortunately should not
|
|
|
|
* happen in production hardware, so we only emit a warning instead of
|
|
|
|
* implementing something more complex that requires checking the range
|
|
|
|
* of every MMIO read.
|
2018-05-18 15:39:57 -07:00
|
|
|
*/
|
2019-07-17 19:06:21 +01:00
|
|
|
|
|
|
|
if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
|
|
|
|
u32 l3_fuse =
|
|
|
|
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
|
|
|
|
GEN10_L3BANK_MASK;
|
|
|
|
|
2020-03-14 21:33:44 +03:00
|
|
|
drm_dbg(&i915->drm, "L3 fuse = %x\n", l3_fuse);
|
2019-07-17 19:06:21 +01:00
|
|
|
l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
|
|
|
|
} else {
|
|
|
|
l3_en = ~0;
|
|
|
|
}
|
|
|
|
|
|
|
|
slice = fls(sseu->slice_mask) - 1;
|
2019-08-23 09:03:07 -07:00
|
|
|
subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
|
2019-07-17 19:06:21 +01:00
|
|
|
if (!subslice) {
|
2020-03-14 21:33:44 +03:00
|
|
|
drm_warn(&i915->drm,
|
|
|
|
"No common index found between subslice mask %x and L3 bank mask %x!\n",
|
2019-08-23 09:03:07 -07:00
|
|
|
intel_sseu_get_subslices(sseu, slice), l3_en);
|
2019-07-17 19:06:21 +01:00
|
|
|
subslice = fls(l3_en);
|
drm/i915/gt: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch. checkpatch errors/warnings are fixed manually.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
command: spatch --sp-file <script> --dir drivers/gpu/drm/i915/gt \
--linux-spacing --in-place
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200115034455.17658-7-pankaj.laxminarayan.bharadiya@intel.com
2020-01-15 09:14:50 +05:30
|
|
|
drm_WARN_ON(&i915->drm, !subslice);
|
2019-07-17 19:06:21 +01:00
|
|
|
}
|
|
|
|
subslice--;
|
|
|
|
|
|
|
|
if (INTEL_GEN(i915) >= 11) {
|
|
|
|
mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
|
|
|
|
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
|
|
|
|
} else {
|
|
|
|
mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
|
|
|
|
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
|
|
|
|
}
|
|
|
|
|
2020-03-14 21:33:44 +03:00
|
|
|
drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
|
2019-07-17 19:06:21 +01:00
|
|
|
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
|
2018-05-18 15:39:57 -07:00
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-01-09 17:32:31 -08:00
|
|
|
wa_init_mcr(i915, wal);
|
2018-05-18 15:39:57 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaInPlaceDecompressionHang:cnl */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN9_GAMT_ECO_REG_RW_IA,
|
|
|
|
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-05-08 14:29:23 -07:00
|
|
|
{
|
2019-01-09 17:32:31 -08:00
|
|
|
wa_init_mcr(i915, wal);
|
2018-05-18 15:40:32 -07:00
|
|
|
|
2018-05-08 14:29:23 -07:00
|
|
|
/* WaInPlaceDecompressionHang:icl */
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN9_GAMT_ECO_REG_RW_IA,
|
|
|
|
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
2018-05-08 14:29:23 -07:00
|
|
|
|
2018-05-08 14:29:27 -07:00
|
|
|
/* WaModifyGamTlbPartitioning:icl */
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal,
|
|
|
|
GEN11_GACB_PERF_CTRL,
|
|
|
|
GEN11_HASH_CTRL_MASK,
|
|
|
|
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
|
2018-05-08 14:29:28 -07:00
|
|
|
|
2018-05-08 14:29:29 -07:00
|
|
|
/* Wa_1405766107:icl
|
|
|
|
* Formerly known as WaCL2SFHalfMaxAlloc
|
|
|
|
*/
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN11_LSN_UNSLCVC,
|
|
|
|
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
|
|
|
|
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
|
2018-05-08 14:29:30 -07:00
|
|
|
|
|
|
|
/* Wa_220166154:icl
|
|
|
|
* Formerly known as WaDisCtxReload
|
|
|
|
*/
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN8_GAMW_ECO_DEV_RW_IA,
|
|
|
|
GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
|
2018-05-08 14:29:31 -07:00
|
|
|
|
|
|
|
/* Wa_1405779004:icl (pre-prod) */
|
2018-12-03 13:33:19 +00:00
|
|
|
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
|
|
|
|
wa_write_or(wal,
|
|
|
|
SLICE_UNIT_LEVEL_CLKGATE,
|
|
|
|
MSCUNIT_CLKGATE_DIS);
|
2018-05-08 14:29:32 -07:00
|
|
|
|
2018-05-08 14:29:34 -07:00
|
|
|
/* Wa_1406838659:icl (pre-prod) */
|
2018-12-03 13:33:19 +00:00
|
|
|
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
|
|
|
|
wa_write_or(wal,
|
|
|
|
INF_UNIT_LEVEL_CLKGATE,
|
|
|
|
CGPSF_CLKGATE_DIS);
|
2018-05-08 14:29:35 -07:00
|
|
|
|
2018-05-25 15:05:39 -07:00
|
|
|
/* Wa_1406463099:icl
|
|
|
|
* Formerly known as WaGamTlbPendError
|
|
|
|
*/
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_write_or(wal,
|
|
|
|
GAMT_CHKN_BIT_REG,
|
|
|
|
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
2019-10-15 18:44:11 +03:00
|
|
|
|
2020-05-12 11:00:50 -07:00
|
|
|
/* Wa_1607087056:icl,ehl,jsl */
|
|
|
|
if (IS_ICELAKE(i915) ||
|
2020-10-14 00:59:48 +05:30
|
|
|
IS_JSL_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
|
2020-05-12 11:00:50 -07:00
|
|
|
wa_write_or(wal,
|
|
|
|
SLICE_UNIT_LEVEL_CLKGATE,
|
|
|
|
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
|
|
|
|
}
|
2018-05-08 14:29:23 -07:00
|
|
|
}
|
|
|
|
|
2019-08-17 02:38:42 -07:00
|
|
|
static void
|
2020-07-16 15:05:48 -07:00
|
|
|
gen12_gt_workarounds_init(struct drm_i915_private *i915,
|
|
|
|
struct i915_wa_list *wal)
|
2019-08-17 02:38:42 -07:00
|
|
|
{
|
2020-04-14 14:11:18 -07:00
|
|
|
wa_init_mcr(i915, wal);
|
2020-07-16 15:05:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
gen12_gt_workarounds_init(i915, wal);
|
2020-04-14 14:11:18 -07:00
|
|
|
|
2019-10-15 18:44:44 +03:00
|
|
|
/* Wa_1409420604:tgl */
|
2021-01-19 11:29:30 -08:00
|
|
|
if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
|
2019-10-15 18:44:44 +03:00
|
|
|
wa_write_or(wal,
|
|
|
|
SUBSLICE_UNIT_LEVEL_CLKGATE2,
|
|
|
|
CPSSUNIT_CLKGATE_DIS);
|
2019-10-15 18:44:45 +03:00
|
|
|
|
2020-02-27 14:00:57 -08:00
|
|
|
/* Wa_1607087056:tgl also know as BUG:1409180338 */
|
2021-01-19 11:29:30 -08:00
|
|
|
if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
|
2019-10-15 18:44:45 +03:00
|
|
|
wa_write_or(wal,
|
|
|
|
SLICE_UNIT_LEVEL_CLKGATE,
|
|
|
|
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
|
2020-12-10 09:06:15 -08:00
|
|
|
|
|
|
|
/* Wa_1408615072:tgl[a0] */
|
2021-01-19 11:29:30 -08:00
|
|
|
if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
|
2020-12-10 09:06:15 -08:00
|
|
|
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
|
|
|
VSUNIT_CLKGATE_DIS_TGL);
|
2019-08-17 02:38:42 -07:00
|
|
|
}
|
|
|
|
|
2020-10-14 12:19:34 -07:00
|
|
|
static void
|
|
|
|
dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|
|
|
{
|
|
|
|
gen12_gt_workarounds_init(i915, wal);
|
|
|
|
|
|
|
|
/* Wa_1607087056:dg1 */
|
|
|
|
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0))
|
|
|
|
wa_write_or(wal,
|
|
|
|
SLICE_UNIT_LEVEL_CLKGATE,
|
|
|
|
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
|
|
|
|
|
|
|
|
/* Wa_1409420604:dg1 */
|
|
|
|
if (IS_DG1(i915))
|
|
|
|
wa_write_or(wal,
|
|
|
|
SUBSLICE_UNIT_LEVEL_CLKGATE2,
|
|
|
|
CPSSUNIT_CLKGATE_DIS);
|
|
|
|
|
|
|
|
/* Wa_1408615072:dg1 */
|
|
|
|
/* Empirical testing shows this register is unaffected by engine reset. */
|
|
|
|
if (IS_DG1(i915))
|
|
|
|
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
|
|
|
VSUNIT_CLKGATE_DIS_TGL);
|
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2020-10-14 12:19:34 -07:00
|
|
|
if (IS_DG1(i915))
|
|
|
|
dg1_gt_workarounds_init(i915, wal);
|
|
|
|
else if (IS_TIGERLAKE(i915))
|
2019-08-17 02:38:42 -07:00
|
|
|
tgl_gt_workarounds_init(i915, wal);
|
2020-07-16 15:05:48 -07:00
|
|
|
else if (IS_GEN(i915, 12))
|
|
|
|
gen12_gt_workarounds_init(i915, wal);
|
2019-08-17 02:38:42 -07:00
|
|
|
else if (IS_GEN(i915, 11))
|
2019-03-01 09:27:03 -08:00
|
|
|
icl_gt_workarounds_init(i915, wal);
|
2018-12-03 13:33:19 +00:00
|
|
|
else if (IS_CANNONLAKE(i915))
|
2019-01-09 17:32:31 -08:00
|
|
|
cnl_gt_workarounds_init(i915, wal);
|
2020-06-02 15:05:40 +01:00
|
|
|
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
|
2019-03-01 09:27:03 -08:00
|
|
|
cfl_gt_workarounds_init(i915, wal);
|
|
|
|
else if (IS_GEMINILAKE(i915))
|
|
|
|
glk_gt_workarounds_init(i915, wal);
|
|
|
|
else if (IS_KABYLAKE(i915))
|
|
|
|
kbl_gt_workarounds_init(i915, wal);
|
|
|
|
else if (IS_BROXTON(i915))
|
|
|
|
bxt_gt_workarounds_init(i915, wal);
|
|
|
|
else if (IS_SKYLAKE(i915))
|
|
|
|
skl_gt_workarounds_init(i915, wal);
|
2020-06-11 10:30:15 +01:00
|
|
|
else if (IS_HASWELL(i915))
|
|
|
|
hsw_gt_workarounds_init(i915, wal);
|
2020-06-11 09:01:37 +01:00
|
|
|
else if (IS_VALLEYVIEW(i915))
|
|
|
|
vlv_gt_workarounds_init(i915, wal);
|
2020-06-11 09:01:36 +01:00
|
|
|
else if (IS_IVYBRIDGE(i915))
|
|
|
|
ivb_gt_workarounds_init(i915, wal);
|
2020-06-11 09:01:38 +01:00
|
|
|
else if (IS_GEN(i915, 6))
|
|
|
|
snb_gt_workarounds_init(i915, wal);
|
2020-06-11 09:01:39 +01:00
|
|
|
else if (IS_GEN(i915, 5))
|
|
|
|
ilk_gt_workarounds_init(i915, wal);
|
2020-06-11 09:01:40 +01:00
|
|
|
else if (IS_G4X(i915))
|
|
|
|
g4x_gt_workarounds_init(i915, wal);
|
|
|
|
else if (IS_GEN(i915, 4))
|
|
|
|
gen4_gt_workarounds_init(i915, wal);
|
2019-03-01 09:27:03 -08:00
|
|
|
else if (INTEL_GEN(i915) <= 8)
|
|
|
|
return;
|
2018-04-10 09:12:47 -07:00
|
|
|
else
|
2018-12-03 13:33:19 +00:00
|
|
|
MISSING_CASE(INTEL_GEN(i915));
|
2019-01-09 17:32:31 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void intel_gt_init_workarounds(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct i915_wa_list *wal = &i915->gt_wa_list;
|
2018-12-03 13:33:19 +00:00
|
|
|
|
2019-07-12 00:07:45 -07:00
|
|
|
wa_init_start(wal, "GT", "global");
|
2019-01-09 17:32:31 -08:00
|
|
|
gt_init_workarounds(i915, wal);
|
2018-12-03 13:33:19 +00:00
|
|
|
wa_init_finish(wal);
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum forcewake_domains
|
2019-04-12 21:24:57 +01:00
|
|
|
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
|
2018-12-03 13:33:19 +00:00
|
|
|
{
|
|
|
|
enum forcewake_domains fw = 0;
|
|
|
|
struct i915_wa *wa;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
|
2019-04-12 21:24:57 +01:00
|
|
|
fw |= intel_uncore_forcewake_for_reg(uncore,
|
2018-12-03 13:33:19 +00:00
|
|
|
wa->reg,
|
|
|
|
FW_REG_READ |
|
|
|
|
FW_REG_WRITE);
|
|
|
|
|
|
|
|
return fw;
|
|
|
|
}
|
|
|
|
|
2019-04-17 08:56:27 +01:00
|
|
|
static bool
|
|
|
|
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
|
|
|
|
{
|
2020-01-31 23:50:35 +00:00
|
|
|
if ((cur ^ wa->set) & wa->read) {
|
2020-12-31 11:11:03 -08:00
|
|
|
DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
|
2019-04-17 08:56:29 +01:00
|
|
|
name, from, i915_mmio_reg_offset(wa->reg),
|
2020-12-31 11:11:03 -08:00
|
|
|
cur, cur & wa->read, wa->set & wa->read);
|
2019-04-17 08:56:27 +01:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-12-03 13:33:19 +00:00
|
|
|
static void
|
2019-04-12 21:24:57 +01:00
|
|
|
wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
|
2018-12-03 13:33:19 +00:00
|
|
|
{
|
|
|
|
enum forcewake_domains fw;
|
|
|
|
unsigned long flags;
|
|
|
|
struct i915_wa *wa;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!wal->count)
|
|
|
|
return;
|
|
|
|
|
2019-04-12 21:24:57 +01:00
|
|
|
fw = wal_get_fw_for_rmw(uncore, wal);
|
2018-12-03 13:33:19 +00:00
|
|
|
|
2019-04-12 21:24:57 +01:00
|
|
|
spin_lock_irqsave(&uncore->lock, flags);
|
|
|
|
intel_uncore_forcewake_get__locked(uncore, fw);
|
2018-12-03 13:33:19 +00:00
|
|
|
|
|
|
|
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
|
2020-01-31 23:50:35 +00:00
|
|
|
if (wa->clr)
|
|
|
|
intel_uncore_rmw_fw(uncore, wa->reg, wa->clr, wa->set);
|
|
|
|
else
|
|
|
|
intel_uncore_write_fw(uncore, wa->reg, wa->set);
|
2019-04-17 08:56:27 +01:00
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
|
|
|
wa_verify(wa,
|
|
|
|
intel_uncore_read_fw(uncore, wa->reg),
|
|
|
|
wal->name, "application");
|
2018-12-03 13:33:19 +00:00
|
|
|
}
|
|
|
|
|
2019-04-12 21:24:57 +01:00
|
|
|
intel_uncore_forcewake_put__locked(uncore, fw);
|
|
|
|
spin_unlock_irqrestore(&uncore->lock, flags);
|
2018-12-03 13:33:19 +00:00
|
|
|
}
|
|
|
|
|
2019-06-21 08:07:48 +01:00
|
|
|
void intel_gt_apply_workarounds(struct intel_gt *gt)
|
2018-12-03 13:33:19 +00:00
|
|
|
{
|
2019-06-21 08:07:48 +01:00
|
|
|
wa_list_apply(gt->uncore, >->i915->gt_wa_list);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-04-12 21:24:57 +01:00
|
|
|
static bool wa_list_verify(struct intel_uncore *uncore,
|
2018-12-03 12:50:10 +00:00
|
|
|
const struct i915_wa_list *wal,
|
|
|
|
const char *from)
|
|
|
|
{
|
|
|
|
struct i915_wa *wa;
|
|
|
|
unsigned int i;
|
|
|
|
bool ok = true;
|
|
|
|
|
|
|
|
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
|
2019-04-12 21:24:57 +01:00
|
|
|
ok &= wa_verify(wa,
|
|
|
|
intel_uncore_read(uncore, wa->reg),
|
|
|
|
wal->name, from);
|
2018-12-03 12:50:10 +00:00
|
|
|
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
2019-06-21 08:07:48 +01:00
|
|
|
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
|
2018-12-03 12:50:10 +00:00
|
|
|
{
|
2019-06-21 08:07:48 +01:00
|
|
|
return wa_list_verify(gt->uncore, >->i915->gt_wa_list, from);
|
2018-12-03 12:50:10 +00:00
|
|
|
}
|
|
|
|
|
2021-01-09 16:34:54 +00:00
|
|
|
__maybe_unused
|
2019-07-12 00:07:43 -07:00
|
|
|
static inline bool is_nonpriv_flags_valid(u32 flags)
|
|
|
|
{
|
|
|
|
/* Check only valid flag bits are set */
|
|
|
|
if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* NB: Only 3 out of 4 enum values are valid for access field */
|
|
|
|
if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
static void
|
2019-06-17 18:01:05 -07:00
|
|
|
whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
|
2018-04-14 13:27:54 +01:00
|
|
|
{
|
2018-12-03 12:50:12 +00:00
|
|
|
struct i915_wa wa = {
|
|
|
|
.reg = reg
|
|
|
|
};
|
2018-04-10 09:12:47 -07:00
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
|
|
|
|
return;
|
2018-04-10 09:12:47 -07:00
|
|
|
|
2019-07-12 00:07:43 -07:00
|
|
|
if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
|
|
|
|
return;
|
|
|
|
|
2019-06-17 18:01:05 -07:00
|
|
|
wa.reg.reg |= flags;
|
2018-12-03 13:33:57 +00:00
|
|
|
_wa_add(wal, &wa);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-06-17 18:01:05 -07:00
|
|
|
static void
|
|
|
|
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
|
|
|
|
{
|
2019-07-12 00:07:43 -07:00
|
|
|
whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
|
2019-06-17 18:01:05 -07:00
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
static void gen9_whitelist_build(struct i915_wa_list *w)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
|
|
|
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
|
2018-04-14 13:27:54 +01:00
|
|
|
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
|
2018-04-14 13:27:54 +01:00
|
|
|
whitelist_reg(w, GEN8_CS_CHICKEN1);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
|
2018-04-14 13:27:54 +01:00
|
|
|
whitelist_reg(w, GEN8_HDC_CHICKEN1);
|
2019-09-10 18:48:01 -07:00
|
|
|
|
|
|
|
/* WaSendPushConstantsFromMMIO:skl,bxt */
|
|
|
|
whitelist_reg(w, COMMON_SLICE_CHICKEN2);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-06-17 18:01:06 -07:00
|
|
|
static void skl_whitelist_build(struct intel_engine_cs *engine)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-06-17 18:01:06 -07:00
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
|
|
|
if (engine->class != RENDER_CLASS)
|
|
|
|
return;
|
|
|
|
|
2018-04-14 13:27:54 +01:00
|
|
|
gen9_whitelist_build(w);
|
2018-04-10 09:12:47 -07:00
|
|
|
|
|
|
|
/* WaDisableLSQCROPERFforOCL:skl */
|
2018-04-14 13:27:54 +01:00
|
|
|
whitelist_reg(w, GEN8_L3SQCREG4);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2019-06-17 18:01:06 -07:00
|
|
|
static void bxt_whitelist_build(struct intel_engine_cs *engine)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2019-06-17 18:01:06 -07:00
|
|
|
if (engine->class != RENDER_CLASS)
|
|
|
|
return;
|
|
|
|
|
|
|
|
gen9_whitelist_build(&engine->whitelist);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2019-06-17 18:01:06 -07:00
|
|
|
static void kbl_whitelist_build(struct intel_engine_cs *engine)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-06-17 18:01:06 -07:00
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
|
|
|
if (engine->class != RENDER_CLASS)
|
|
|
|
return;
|
|
|
|
|
2018-04-14 13:27:54 +01:00
|
|
|
gen9_whitelist_build(w);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaDisableLSQCROPERFforOCL:kbl */
|
2018-04-14 13:27:54 +01:00
|
|
|
whitelist_reg(w, GEN8_L3SQCREG4);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2019-06-17 18:01:06 -07:00
|
|
|
static void glk_whitelist_build(struct intel_engine_cs *engine)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2019-06-17 18:01:06 -07:00
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
|
|
|
if (engine->class != RENDER_CLASS)
|
|
|
|
return;
|
|
|
|
|
2018-04-14 13:27:54 +01:00
|
|
|
gen9_whitelist_build(w);
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
|
2018-04-14 13:27:54 +01:00
|
|
|
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2019-06-17 18:01:06 -07:00
|
|
|
static void cfl_whitelist_build(struct intel_engine_cs *engine)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-06-28 15:07:19 +03:00
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
2019-06-17 18:01:06 -07:00
|
|
|
if (engine->class != RENDER_CLASS)
|
|
|
|
return;
|
|
|
|
|
2019-06-28 15:07:19 +03:00
|
|
|
gen9_whitelist_build(w);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
|
|
|
|
*
|
|
|
|
* This covers 4 register which are next to one another :
|
|
|
|
* - PS_INVOCATION_COUNT
|
|
|
|
* - PS_INVOCATION_COUNT_UDW
|
|
|
|
* - PS_DEPTH_COUNT
|
|
|
|
* - PS_DEPTH_COUNT_UDW
|
|
|
|
*/
|
|
|
|
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
|
2019-07-12 00:07:43 -07:00
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD |
|
2019-06-28 15:07:19 +03:00
|
|
|
RING_FORCE_TO_NONPRIV_RANGE_4);
|
2018-04-10 09:12:47 -07:00
|
|
|
}
|
|
|
|
|
2020-06-02 16:48:39 +01:00
|
|
|
static void cml_whitelist_build(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
|
|
|
if (engine->class != RENDER_CLASS)
|
|
|
|
whitelist_reg_ext(w,
|
|
|
|
RING_CTX_TIMESTAMP(engine->mmio_base),
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
|
|
|
|
|
|
|
cfl_whitelist_build(engine);
|
|
|
|
}
|
|
|
|
|
2019-06-17 18:01:06 -07:00
|
|
|
static void cnl_whitelist_build(struct intel_engine_cs *engine)
|
2018-04-10 09:12:47 -07:00
|
|
|
{
|
2019-06-17 18:01:06 -07:00
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
|
|
|
if (engine->class != RENDER_CLASS)
|
|
|
|
return;
|
|
|
|
|
2018-04-10 09:12:47 -07:00
|
|
|
/* WaEnablePreemptionGranularityControlByUMD:cnl */
|
2018-04-14 13:27:54 +01:00
|
|
|
whitelist_reg(w, GEN8_CS_CHICKEN1);
|
|
|
|
}
|
|
|
|
|
2019-06-17 18:01:06 -07:00
|
|
|
static void icl_whitelist_build(struct intel_engine_cs *engine)
|
2018-05-08 14:29:23 -07:00
|
|
|
{
|
2019-06-17 18:01:06 -07:00
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
2019-06-17 18:01:07 -07:00
|
|
|
switch (engine->class) {
|
|
|
|
case RENDER_CLASS:
|
|
|
|
/* WaAllowUMDToModifyHalfSliceChicken7:icl */
|
|
|
|
whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
|
|
|
|
|
|
|
|
/* WaAllowUMDToModifySamplerMode:icl */
|
|
|
|
whitelist_reg(w, GEN10_SAMPLER_MODE);
|
|
|
|
|
|
|
|
/* WaEnableStateCacheRedirectToCS:icl */
|
|
|
|
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
|
2019-06-28 15:07:20 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
|
|
|
|
*
|
|
|
|
* This covers 4 register which are next to one another :
|
|
|
|
* - PS_INVOCATION_COUNT
|
|
|
|
* - PS_INVOCATION_COUNT_UDW
|
|
|
|
* - PS_DEPTH_COUNT
|
|
|
|
* - PS_DEPTH_COUNT_UDW
|
|
|
|
*/
|
|
|
|
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
|
2019-07-12 00:07:43 -07:00
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD |
|
2019-06-28 15:07:20 +03:00
|
|
|
RING_FORCE_TO_NONPRIV_RANGE_4);
|
2019-06-17 18:01:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIDEO_DECODE_CLASS:
|
|
|
|
/* hucStatusRegOffset */
|
|
|
|
whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
|
2019-07-12 00:07:43 -07:00
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
2019-06-17 18:01:07 -07:00
|
|
|
/* hucUKernelHdrInfoRegOffset */
|
|
|
|
whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
|
2019-07-12 00:07:43 -07:00
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
2019-06-17 18:01:07 -07:00
|
|
|
/* hucStatus2RegOffset */
|
|
|
|
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
|
2019-07-12 00:07:43 -07:00
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
2020-06-02 16:48:39 +01:00
|
|
|
whitelist_reg_ext(w,
|
|
|
|
RING_CTX_TIMESTAMP(engine->mmio_base),
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
2019-06-17 18:01:07 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-06-02 16:48:39 +01:00
|
|
|
whitelist_reg_ext(w,
|
|
|
|
RING_CTX_TIMESTAMP(engine->mmio_base),
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
2019-06-17 18:01:07 -07:00
|
|
|
break;
|
|
|
|
}
|
2018-05-08 14:29:23 -07:00
|
|
|
}
|
|
|
|
|
2019-08-17 02:38:42 -07:00
|
|
|
static void tgl_whitelist_build(struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-10-24 13:38:58 +03:00
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
|
|
|
switch (engine->class) {
|
|
|
|
case RENDER_CLASS:
|
|
|
|
/*
|
|
|
|
* WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
|
2020-02-27 14:01:00 -08:00
|
|
|
* Wa_1408556865:tgl
|
2019-10-24 13:38:58 +03:00
|
|
|
*
|
|
|
|
* This covers 4 registers which are next to one another :
|
|
|
|
* - PS_INVOCATION_COUNT
|
|
|
|
* - PS_INVOCATION_COUNT_UDW
|
|
|
|
* - PS_DEPTH_COUNT
|
|
|
|
* - PS_DEPTH_COUNT_UDW
|
|
|
|
*/
|
|
|
|
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD |
|
|
|
|
RING_FORCE_TO_NONPRIV_RANGE_4);
|
2020-02-12 11:17:28 -08:00
|
|
|
|
|
|
|
/* Wa_1808121037:tgl */
|
|
|
|
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
|
2020-02-27 14:00:52 -08:00
|
|
|
|
|
|
|
/* Wa_1806527549:tgl */
|
|
|
|
whitelist_reg(w, HIZ_CHICKEN);
|
2019-10-24 13:38:58 +03:00
|
|
|
break;
|
|
|
|
default:
|
2020-06-02 16:48:39 +01:00
|
|
|
whitelist_reg_ext(w,
|
|
|
|
RING_CTX_TIMESTAMP(engine->mmio_base),
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
2019-10-24 13:38:58 +03:00
|
|
|
break;
|
|
|
|
}
|
2019-08-17 02:38:42 -07:00
|
|
|
}
|
|
|
|
|
2020-10-14 12:19:34 -07:00
|
|
|
static void dg1_whitelist_build(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
|
|
|
|
|
|
|
tgl_whitelist_build(engine);
|
|
|
|
|
|
|
|
/* GEN:BUG:1409280441:dg1 */
|
|
|
|
if (IS_DG1_REVID(engine->i915, DG1_REVID_A0, DG1_REVID_A0) &&
|
|
|
|
(engine->class == RENDER_CLASS ||
|
|
|
|
engine->class == COPY_ENGINE_CLASS))
|
|
|
|
whitelist_reg_ext(w, RING_ID(engine->mmio_base),
|
|
|
|
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
|
2018-04-14 13:27:54 +01:00
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2018-12-03 12:50:12 +00:00
|
|
|
struct i915_wa_list *w = &engine->whitelist;
|
2018-04-14 13:27:54 +01:00
|
|
|
|
2019-07-12 00:07:45 -07:00
|
|
|
wa_init_start(w, "whitelist", engine->name);
|
2018-04-14 13:27:54 +01:00
|
|
|
|
2020-10-14 12:19:34 -07:00
|
|
|
if (IS_DG1(i915))
|
|
|
|
dg1_whitelist_build(engine);
|
|
|
|
else if (IS_GEN(i915, 12))
|
2019-08-17 02:38:42 -07:00
|
|
|
tgl_whitelist_build(engine);
|
|
|
|
else if (IS_GEN(i915, 11))
|
2019-06-17 18:01:06 -07:00
|
|
|
icl_whitelist_build(engine);
|
2018-04-14 13:27:54 +01:00
|
|
|
else if (IS_CANNONLAKE(i915))
|
2019-06-17 18:01:06 -07:00
|
|
|
cnl_whitelist_build(engine);
|
2020-06-02 16:48:39 +01:00
|
|
|
else if (IS_COMETLAKE(i915))
|
|
|
|
cml_whitelist_build(engine);
|
|
|
|
else if (IS_COFFEELAKE(i915))
|
2019-06-17 18:01:06 -07:00
|
|
|
cfl_whitelist_build(engine);
|
2019-03-01 09:27:03 -08:00
|
|
|
else if (IS_GEMINILAKE(i915))
|
2019-06-17 18:01:06 -07:00
|
|
|
glk_whitelist_build(engine);
|
2019-03-01 09:27:03 -08:00
|
|
|
else if (IS_KABYLAKE(i915))
|
2019-06-17 18:01:06 -07:00
|
|
|
kbl_whitelist_build(engine);
|
2019-03-01 09:27:03 -08:00
|
|
|
else if (IS_BROXTON(i915))
|
2019-06-17 18:01:06 -07:00
|
|
|
bxt_whitelist_build(engine);
|
2019-03-01 09:27:03 -08:00
|
|
|
else if (IS_SKYLAKE(i915))
|
2019-06-17 18:01:06 -07:00
|
|
|
skl_whitelist_build(engine);
|
2019-03-01 09:27:03 -08:00
|
|
|
else if (INTEL_GEN(i915) <= 8)
|
|
|
|
return;
|
2018-04-14 13:27:54 +01:00
|
|
|
else
|
|
|
|
MISSING_CASE(INTEL_GEN(i915));
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
wa_init_finish(w);
|
2018-04-10 09:12:46 -07:00
|
|
|
}
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
|
2018-04-10 09:12:46 -07:00
|
|
|
{
|
2018-12-03 12:50:12 +00:00
|
|
|
const struct i915_wa_list *wal = &engine->whitelist;
|
2019-04-12 21:24:57 +01:00
|
|
|
struct intel_uncore *uncore = engine->uncore;
|
2018-04-14 13:27:54 +01:00
|
|
|
const u32 base = engine->mmio_base;
|
2018-12-03 12:50:12 +00:00
|
|
|
struct i915_wa *wa;
|
2018-04-14 13:27:54 +01:00
|
|
|
unsigned int i;
|
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
if (!wal->count)
|
2018-04-14 13:27:54 +01:00
|
|
|
return;
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-12-03 12:50:12 +00:00
|
|
|
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
|
2019-04-12 21:24:57 +01:00
|
|
|
intel_uncore_write(uncore,
|
|
|
|
RING_FORCE_TO_NONPRIV(base, i),
|
|
|
|
i915_mmio_reg_offset(wa->reg));
|
2018-04-10 09:12:46 -07:00
|
|
|
|
2018-04-14 13:27:54 +01:00
|
|
|
/* And clear the rest just in case of garbage */
|
|
|
|
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
|
2019-04-12 21:24:57 +01:00
|
|
|
intel_uncore_write(uncore,
|
|
|
|
RING_FORCE_TO_NONPRIV(base, i),
|
|
|
|
i915_mmio_reg_offset(RING_NOPID(base)));
|
2018-04-14 13:27:54 +01:00
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
2018-12-03 13:33:41 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
|
|
|
|
2020-10-14 12:19:34 -07:00
|
|
|
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
2021-01-19 11:29:30 -08:00
|
|
|
IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) {
|
2020-02-27 14:00:56 -08:00
|
|
|
/*
|
2020-10-14 12:19:34 -07:00
|
|
|
* Wa_1607138336:tgl[a0],dg1[a0]
|
|
|
|
* Wa_1607063988:tgl[a0],dg1[a0]
|
2020-02-27 14:00:56 -08:00
|
|
|
*/
|
2019-10-15 18:44:47 +03:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN9_CTX_PREEMPT_REG,
|
|
|
|
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
|
2020-10-14 12:19:34 -07:00
|
|
|
}
|
2019-10-15 18:44:48 +03:00
|
|
|
|
2021-01-19 11:29:30 -08:00
|
|
|
if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) {
|
2019-11-13 15:19:53 -08:00
|
|
|
/*
|
|
|
|
* Wa_1606679103:tgl
|
|
|
|
* (see also Wa_1606682166:icl)
|
|
|
|
*/
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN7_SARCHKMD,
|
|
|
|
GEN7_DISABLE_SAMPLER_PREFETCH);
|
2019-10-15 18:44:43 +03:00
|
|
|
}
|
|
|
|
|
2021-01-29 10:29:45 -08:00
|
|
|
if (IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
|
|
|
|
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
|
|
|
/* Wa_1606931601:tgl,rkl,dg1,adl-s */
|
2020-02-27 14:00:54 -08:00
|
|
|
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
|
|
|
|
|
2020-10-14 12:19:34 -07:00
|
|
|
/*
|
|
|
|
* Wa_1407928979:tgl A*
|
|
|
|
* Wa_18011464164:tgl[B0+],dg1[B0+]
|
|
|
|
* Wa_22010931296:tgl[B0+],dg1[B0+]
|
2021-01-29 10:29:45 -08:00
|
|
|
* Wa_14010919138:rkl,dg1,adl-s
|
2020-10-14 12:19:34 -07:00
|
|
|
*/
|
|
|
|
wa_write_or(wal, GEN7_FF_THREAD_MODE,
|
|
|
|
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
2020-10-26 21:32:28 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa_1606700617:tgl,dg1
|
2021-01-29 10:29:45 -08:00
|
|
|
* Wa_22010271021:tgl,rkl,dg1, adl-s
|
2020-10-26 21:32:28 -07:00
|
|
|
*/
|
|
|
|
wa_masked_en(wal,
|
|
|
|
GEN9_CS_DEBUG_MODE1,
|
|
|
|
FF_DOP_CLOCK_GATE_DISABLE);
|
2020-10-14 12:19:34 -07:00
|
|
|
}
|
|
|
|
|
2021-01-29 10:29:45 -08:00
|
|
|
if (IS_ALDERLAKE_S(i915) || IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
2020-10-14 12:19:34 -07:00
|
|
|
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
2021-01-29 10:29:45 -08:00
|
|
|
/* Wa_1409804808:tgl,rkl,dg1[a0],adl-s */
|
2020-02-27 14:00:51 -08:00
|
|
|
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
|
|
|
|
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
|
2020-03-05 10:12:04 -08:00
|
|
|
|
2020-03-26 16:49:55 -07:00
|
|
|
/*
|
|
|
|
* Wa_1409085225:tgl
|
2021-01-29 10:29:45 -08:00
|
|
|
* Wa_14010229206:tgl,rkl,dg1[a0],adl-s
|
2020-03-26 16:49:55 -07:00
|
|
|
*/
|
|
|
|
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
|
2021-01-29 10:29:45 -08:00
|
|
|
}
|
|
|
|
|
2020-07-08 14:29:47 -07:00
|
|
|
|
2021-01-29 10:29:45 -08:00
|
|
|
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
|
|
|
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
2020-07-16 15:05:48 -07:00
|
|
|
/*
|
|
|
|
* Wa_1607030317:tgl
|
|
|
|
* Wa_1607186500:tgl
|
2020-10-14 12:19:34 -07:00
|
|
|
* Wa_1607297627:tgl,rkl,dg1[a0]
|
|
|
|
*
|
|
|
|
* On TGL and RKL there are multiple entries for this WA in the
|
|
|
|
* BSpec; some indicate this is an A0-only WA, others indicate
|
|
|
|
* it applies to all steppings so we trust the "all steppings."
|
|
|
|
* For DG1 this only applies to A0.
|
2020-07-16 15:05:48 -07:00
|
|
|
*/
|
|
|
|
wa_masked_en(wal,
|
|
|
|
GEN6_RC_SLEEP_PSMI_CONTROL,
|
|
|
|
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
|
|
|
|
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
|
2020-02-27 14:00:51 -08:00
|
|
|
}
|
|
|
|
|
2021-01-29 10:29:45 -08:00
|
|
|
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
|
|
|
/* Wa_1406941453:tgl,rkl,dg1 */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
GEN10_SAMPLER_MODE,
|
|
|
|
ENABLE_SMALLPL);
|
|
|
|
}
|
|
|
|
|
2019-04-12 11:09:20 -07:00
|
|
|
if (IS_GEN(i915, 11)) {
|
2018-12-03 13:33:41 +00:00
|
|
|
/* This is not an Wa. Enable for better image quality */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
_3D_CHICKEN3,
|
|
|
|
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
|
|
|
|
|
|
|
|
/* WaPipelineFlushCoherentLines:icl */
|
2019-07-17 19:06:23 +01:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN8_L3SQCREG4,
|
|
|
|
GEN8_LQSC_FLUSH_COHERENT_LINES);
|
2018-12-03 13:33:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa_1405543622:icl
|
|
|
|
* Formerly known as WaGAPZPriorityScheme
|
|
|
|
*/
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN8_GARBCNTL,
|
|
|
|
GEN11_ARBITRATION_PRIO_ORDER_MASK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa_1604223664:icl
|
|
|
|
* Formerly known as WaL3BankAddressHashing
|
|
|
|
*/
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal,
|
|
|
|
GEN8_GARBCNTL,
|
|
|
|
GEN11_HASH_CTRL_EXCL_MASK,
|
|
|
|
GEN11_HASH_CTRL_EXCL_BIT0);
|
|
|
|
wa_write_clr_set(wal,
|
|
|
|
GEN11_GLBLINVL,
|
|
|
|
GEN11_BANK_HASH_ADDR_EXCL_MASK,
|
|
|
|
GEN11_BANK_HASH_ADDR_EXCL_BIT0);
|
2018-12-03 13:33:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa_1405733216:icl
|
|
|
|
* Formerly known as WaDisableCleanEvicts
|
|
|
|
*/
|
2019-07-17 19:06:23 +01:00
|
|
|
wa_write_or(wal,
|
|
|
|
GEN8_L3SQCREG4,
|
|
|
|
GEN11_LQSC_CLEAN_EVICT_DISABLE);
|
2018-12-03 13:33:41 +00:00
|
|
|
|
|
|
|
/* WaForwardProgressSoftReset:icl */
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN10_SCRATCH_LNCF2,
|
|
|
|
PMFLUSHDONE_LNICRSDROP |
|
|
|
|
PMFLUSH_GAPL3UNBLOCK |
|
|
|
|
PMFLUSHDONE_LNEBLK);
|
|
|
|
|
|
|
|
/* Wa_1406609255:icl (pre-prod) */
|
|
|
|
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN7_SARCHKMD,
|
2019-06-25 10:06:55 +01:00
|
|
|
GEN7_DISABLE_DEMAND_PREFETCH);
|
|
|
|
|
|
|
|
/* Wa_1606682166:icl */
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN7_SARCHKMD,
|
|
|
|
GEN7_DISABLE_SAMPLER_PREFETCH);
|
2019-07-17 19:06:24 +01:00
|
|
|
|
|
|
|
/* Wa_1409178092:icl */
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal,
|
|
|
|
GEN11_SCRATCH2,
|
|
|
|
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
|
|
|
|
0);
|
2020-03-02 15:14:20 -08:00
|
|
|
|
|
|
|
/* WaEnable32PlaneMode:icl */
|
|
|
|
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
|
|
|
|
GEN11_ENABLE_32_PLANE_MODE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa_1408615072:icl,ehl (vsunit)
|
|
|
|
* Wa_1407596294:icl,ehl (hsunit)
|
|
|
|
*/
|
2020-03-06 09:11:39 -08:00
|
|
|
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
|
|
|
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
2020-03-02 15:14:20 -08:00
|
|
|
|
|
|
|
/* Wa_1407352427:icl,ehl */
|
2020-03-06 09:11:39 -08:00
|
|
|
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
|
|
|
PSDUNIT_CLKGATE_DIS);
|
2020-03-11 09:22:59 -07:00
|
|
|
|
|
|
|
/* Wa_1406680159:icl,ehl */
|
|
|
|
wa_write_or(wal,
|
|
|
|
SUBSLICE_UNIT_LEVEL_CLKGATE,
|
|
|
|
GWUNIT_CLKGATE_DIS);
|
2020-03-11 09:23:00 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wa_1408767742:icl[a2..forever],ehl[all]
|
|
|
|
* Wa_1605460711:icl[a0..c0]
|
|
|
|
*/
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN7_FF_THREAD_MODE,
|
|
|
|
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
2020-05-19 09:25:34 -07:00
|
|
|
|
|
|
|
/* Wa_22010271021:ehl */
|
2020-10-14 00:59:48 +05:30
|
|
|
if (IS_JSL_EHL(i915))
|
2020-05-19 09:25:34 -07:00
|
|
|
wa_masked_en(wal,
|
|
|
|
GEN9_CS_DEBUG_MODE1,
|
|
|
|
FF_DOP_CLOCK_GATE_DISABLE);
|
2018-12-03 13:33:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-04 15:31:44 +00:00
|
|
|
if (IS_GEN_RANGE(i915, 9, 12)) {
|
|
|
|
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
|
2018-12-03 13:33:41 +00:00
|
|
|
wa_masked_en(wal,
|
|
|
|
GEN7_FF_SLICE_CS_CHICKEN1,
|
|
|
|
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
|
|
|
|
}
|
|
|
|
|
2020-06-02 15:05:40 +01:00
|
|
|
if (IS_SKYLAKE(i915) ||
|
|
|
|
IS_KABYLAKE(i915) ||
|
|
|
|
IS_COFFEELAKE(i915) ||
|
|
|
|
IS_COMETLAKE(i915)) {
|
2018-12-03 13:33:41 +00:00
|
|
|
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN8_GARBCNTL,
|
|
|
|
GEN9_GAPS_TSV_CREDIT_DISABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_BROXTON(i915)) {
|
|
|
|
/* WaDisablePooledEuLoadBalancingFix:bxt */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
FF_SLICE_CS_CHICKEN2,
|
|
|
|
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
|
|
|
|
}
|
|
|
|
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-12 10:10:43 -08:00
|
|
|
if (IS_GEN(i915, 9)) {
|
2018-12-03 13:33:41 +00:00
|
|
|
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
GEN9_CSFE_CHICKEN1_RCS,
|
|
|
|
GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
|
|
|
|
|
|
|
|
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
|
|
|
|
wa_write_or(wal,
|
|
|
|
BDW_SCRATCH1,
|
|
|
|
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
|
|
|
|
|
|
|
|
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
|
|
|
|
if (IS_GEN9_LP(i915))
|
drm/i915/gt: rename wa_write_masked_or()
The use of "masked" in this function is due to its history. Once upon a
time it received a mask and a value as parameter. Since
commit eeec73f8a4a4 ("drm/i915/gt: Skip rmw for masked registers")
that is not true anymore and now there is a clear and a set parameter.
Depending on the case, that can still be thought as a mask and value,
but there are some subtle differences: what we clear doesn't need to be
the same bits we are setting, particularly when we are using masked
registers.
The fact that we also have "masked registers", i.e. registers whose mask
is stored in the upper 16 bits of the register, makes it even more
confusing, because "masked" in wa_write_masked_or() has little to do
with masked registers, but rather refers to the old mask parameter the
function received (that can also, but not exclusively, be used to write
to masked register).
Avoid the ambiguity and misnomer by renaming it to something else,
hopefully less confusing: wa_write_clr_set(), to designate that we are
doing both clr and set operations in the register.
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201209045246.2905675-2-lucas.demarchi@intel.com
2020-12-08 20:52:45 -08:00
|
|
|
wa_write_clr_set(wal,
|
|
|
|
GEN8_L3SQCREG1,
|
|
|
|
L3_PRIO_CREDITS_MASK,
|
|
|
|
L3_GENERAL_PRIO_CREDITS(62) |
|
|
|
|
L3_HIGH_PRIO_CREDITS(2));
|
2018-12-03 13:33:41 +00:00
|
|
|
|
|
|
|
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
|
|
|
|
wa_write_or(wal,
|
|
|
|
GEN8_L3SQCREG4,
|
|
|
|
GEN8_LQSC_FLUSH_COHERENT_LINES);
|
|
|
|
}
|
2020-02-01 19:40:04 +00:00
|
|
|
|
2021-01-04 11:49:14 +00:00
|
|
|
if (IS_HASWELL(i915)) {
|
|
|
|
/* WaSampleCChickenBitEnable:hsw */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
|
|
|
|
|
|
|
|
wa_masked_dis(wal,
|
|
|
|
CACHE_MODE_0_GEN7,
|
|
|
|
/* enable HiZ Raw Stall Optimization */
|
|
|
|
HIZ_RAW_STALL_OPT_DISABLE);
|
|
|
|
|
|
|
|
/* WaDisable4x2SubspanOptimization:hsw */
|
|
|
|
wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
|
2021-01-13 22:51:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_VALLEYVIEW(i915)) {
|
|
|
|
/* WaDisableEarlyCull:vlv */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
_3D_CHICKEN3,
|
|
|
|
_3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
|
2021-01-04 11:49:14 +00:00
|
|
|
|
|
|
|
/*
|
2021-01-13 22:51:44 +00:00
|
|
|
* WaVSThreadDispatchOverride:ivb,vlv
|
2021-01-04 11:49:14 +00:00
|
|
|
*
|
2021-01-13 22:51:44 +00:00
|
|
|
* This actually overrides the dispatch
|
|
|
|
* mode for all thread types.
|
2021-01-04 11:49:14 +00:00
|
|
|
*/
|
2021-01-13 22:51:44 +00:00
|
|
|
wa_write_clr_set(wal,
|
|
|
|
GEN7_FF_THREAD_MODE,
|
|
|
|
GEN7_FF_SCHED_MASK,
|
|
|
|
GEN7_FF_TS_SCHED_HW |
|
|
|
|
GEN7_FF_VS_SCHED_HW |
|
|
|
|
GEN7_FF_DS_SCHED_HW);
|
|
|
|
|
|
|
|
/* WaPsdDispatchEnable:vlv */
|
|
|
|
/* WaDisablePSDDualDispatchEnable:vlv */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
GEN7_HALF_SLICE_CHICKEN1,
|
|
|
|
GEN7_MAX_PS_THREAD_DEP |
|
|
|
|
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
|
2021-01-04 11:49:14 +00:00
|
|
|
}
|
|
|
|
|
2021-01-13 22:51:44 +00:00
|
|
|
if (IS_IVYBRIDGE(i915)) {
|
|
|
|
/* WaDisableEarlyCull:ivb */
|
2021-01-13 22:51:43 +00:00
|
|
|
wa_masked_en(wal,
|
|
|
|
_3D_CHICKEN3,
|
|
|
|
_3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
|
|
|
|
|
2021-01-13 22:51:44 +00:00
|
|
|
if (0) { /* causes HiZ corruption on ivb:gt1 */
|
|
|
|
/* enable HiZ Raw Stall Optimization */
|
|
|
|
wa_masked_dis(wal,
|
|
|
|
CACHE_MODE_0_GEN7,
|
|
|
|
HIZ_RAW_STALL_OPT_DISABLE);
|
|
|
|
}
|
|
|
|
|
2021-01-13 22:51:43 +00:00
|
|
|
/*
|
|
|
|
* WaVSThreadDispatchOverride:ivb,vlv
|
|
|
|
*
|
|
|
|
* This actually overrides the dispatch
|
|
|
|
* mode for all thread types.
|
|
|
|
*/
|
|
|
|
wa_write_clr_set(wal,
|
|
|
|
GEN7_FF_THREAD_MODE,
|
|
|
|
GEN7_FF_SCHED_MASK,
|
|
|
|
GEN7_FF_TS_SCHED_HW |
|
|
|
|
GEN7_FF_VS_SCHED_HW |
|
|
|
|
GEN7_FF_DS_SCHED_HW);
|
|
|
|
|
2021-01-13 22:51:44 +00:00
|
|
|
/* WaDisablePSDDualDispatchEnable:ivb */
|
|
|
|
if (IS_IVB_GT1(i915))
|
|
|
|
wa_masked_en(wal,
|
|
|
|
GEN7_HALF_SLICE_CHICKEN1,
|
|
|
|
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_GEN(i915, 7)) {
|
|
|
|
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
GFX_MODE_GEN7,
|
|
|
|
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
|
|
|
|
|
|
|
|
/* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
|
2021-01-13 22:51:43 +00:00
|
|
|
wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* BSpec says this must be set, even though
|
2021-01-13 22:51:44 +00:00
|
|
|
* WaDisable4x2SubspanOptimization:ivb,hsw
|
2021-01-13 22:51:43 +00:00
|
|
|
* WaDisable4x2SubspanOptimization isn't listed for VLV.
|
|
|
|
*/
|
|
|
|
wa_masked_en(wal,
|
|
|
|
CACHE_MODE_1,
|
|
|
|
PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* BSpec recommends 8x4 when MSAA is used,
|
|
|
|
* however in practice 16x4 seems fastest.
|
|
|
|
*
|
|
|
|
* Note that PS/WM thread counts depend on the WIZ hashing
|
|
|
|
* disable bit, which we don't touch here, but it's good
|
|
|
|
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
|
|
|
*/
|
|
|
|
wa_add(wal, GEN7_GT_MODE, 0,
|
|
|
|
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK,
|
|
|
|
GEN6_WIZ_HASHING_16x4),
|
|
|
|
GEN6_WIZ_HASHING_16x4);
|
|
|
|
}
|
|
|
|
|
2020-02-01 19:40:04 +00:00
|
|
|
if (IS_GEN_RANGE(i915, 6, 7))
|
|
|
|
/*
|
|
|
|
* We need to disable the AsyncFlip performance optimisations in
|
|
|
|
* order to use MI_WAIT_FOR_EVENT within the CS. It should
|
|
|
|
* already be programmed to '1' on all products.
|
|
|
|
*
|
|
|
|
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
|
|
|
|
*/
|
|
|
|
wa_masked_en(wal,
|
|
|
|
MI_MODE,
|
|
|
|
ASYNC_FLIP_PERF_DISABLE);
|
|
|
|
|
|
|
|
if (IS_GEN(i915, 6)) {
|
|
|
|
/*
|
|
|
|
* Required for the hardware to program scanline values for
|
|
|
|
* waiting
|
|
|
|
* WaEnableFlushTlbInvalidationMode:snb
|
|
|
|
*/
|
|
|
|
wa_masked_en(wal,
|
|
|
|
GFX_MODE,
|
|
|
|
GFX_TLB_INVALIDATE_EXPLICIT);
|
|
|
|
|
2021-01-04 11:49:13 +00:00
|
|
|
/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
|
|
|
|
wa_masked_en(wal,
|
|
|
|
_3D_CHICKEN,
|
|
|
|
_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
|
|
|
|
|
|
|
|
wa_masked_en(wal,
|
|
|
|
_3D_CHICKEN3,
|
|
|
|
/* WaStripsFansDisableFastClipPerformanceFix:snb */
|
|
|
|
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
|
|
|
|
/*
|
|
|
|
* Bspec says:
|
|
|
|
* "This bit must be set if 3DSTATE_CLIP clip mode is set
|
|
|
|
* to normal and 3DSTATE_SF number of SF output attributes
|
|
|
|
* is more than 16."
|
|
|
|
*/
|
|
|
|
_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* BSpec recommends 8x4 when MSAA is used,
|
|
|
|
* however in practice 16x4 seems fastest.
|
|
|
|
*
|
|
|
|
* Note that PS/WM thread counts depend on the WIZ hashing
|
|
|
|
* disable bit, which we don't touch here, but it's good
|
|
|
|
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
|
|
|
*/
|
|
|
|
wa_add(wal,
|
|
|
|
GEN6_GT_MODE, 0,
|
|
|
|
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
|
|
|
|
GEN6_WIZ_HASHING_16x4);
|
|
|
|
|
|
|
|
/* WaDisable_RenderCache_OperationalFlush:snb */
|
|
|
|
wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
|
|
|
|
|
2020-02-01 19:40:04 +00:00
|
|
|
/*
|
|
|
|
* From the Sandybridge PRM, volume 1 part 3, page 24:
|
|
|
|
* "If this bit is set, STCunit will have LRA as replacement
|
|
|
|
* policy. [...] This bit must be reset. LRA replacement
|
|
|
|
* policy is not supported."
|
|
|
|
*/
|
|
|
|
wa_masked_dis(wal,
|
|
|
|
CACHE_MODE_0,
|
|
|
|
CM0_STC_EVICT_DISABLE_LRA_SNB);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_GEN_RANGE(i915, 4, 6))
|
|
|
|
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
|
|
|
|
wa_add(wal, MI_MODE,
|
|
|
|
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
|
|
|
|
/* XXX bit doesn't stick on Broadwater */
|
|
|
|
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
|
2020-06-01 08:24:13 +01:00
|
|
|
|
|
|
|
if (IS_GEN(i915, 4))
|
|
|
|
/*
|
|
|
|
* Disable CONSTANT_BUFFER before it is loaded from the context
|
|
|
|
* image. For as it is loaded, it is executed and the stored
|
|
|
|
* address may no longer be valid, leading to a GPU hang.
|
|
|
|
*
|
|
|
|
* This imposes the requirement that userspace reload their
|
|
|
|
* CONSTANT_BUFFER on every batch, fortunately a requirement
|
|
|
|
* they are already accustomed to from before contexts were
|
|
|
|
* enabled.
|
|
|
|
*/
|
|
|
|
wa_add(wal, ECOSKPD,
|
|
|
|
0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
|
|
|
|
0 /* XXX bit doesn't stick on Broadwater */);
|
2018-12-03 13:33:41 +00:00
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
2018-12-03 13:33:41 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
|
|
|
|
|
|
|
/* WaKBLVECSSemaphoreWaitPoll:kbl */
|
2020-08-10 20:21:05 -07:00
|
|
|
if (IS_KBL_GT_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
|
2018-12-03 13:33:41 +00:00
|
|
|
wa_write(wal,
|
|
|
|
RING_SEMA_WAIT_POLL(engine->mmio_base),
|
|
|
|
1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-09 17:32:31 -08:00
|
|
|
static void
|
|
|
|
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|
|
|
{
|
2020-02-01 19:40:04 +00:00
|
|
|
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
|
2019-01-09 17:32:31 -08:00
|
|
|
return;
|
|
|
|
|
2019-07-05 13:43:24 +01:00
|
|
|
if (engine->class == RENDER_CLASS)
|
2019-01-09 17:32:31 -08:00
|
|
|
rcs_engine_wa_init(engine, wal);
|
|
|
|
else
|
|
|
|
xcs_engine_wa_init(engine, wal);
|
|
|
|
}
|
|
|
|
|
2018-12-03 13:33:41 +00:00
|
|
|
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct i915_wa_list *wal = &engine->wa_list;
|
|
|
|
|
2020-02-01 19:40:04 +00:00
|
|
|
if (INTEL_GEN(engine->i915) < 4)
|
2018-12-03 13:33:41 +00:00
|
|
|
return;
|
|
|
|
|
2019-07-12 00:07:45 -07:00
|
|
|
wa_init_start(wal, "engine", engine->name);
|
2019-01-09 17:32:31 -08:00
|
|
|
engine_init_workarounds(engine, wal);
|
2018-12-03 13:33:41 +00:00
|
|
|
wa_init_finish(wal);
|
|
|
|
}
|
|
|
|
|
|
|
|
void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-04-12 21:24:57 +01:00
|
|
|
wa_list_apply(engine->uncore, &engine->wa_list);
|
2018-12-03 13:33:41 +00:00
|
|
|
}
|
|
|
|
|
2020-10-09 12:44:42 -07:00
|
|
|
struct mcr_range {
|
2020-03-11 09:22:55 -07:00
|
|
|
u32 start;
|
|
|
|
u32 end;
|
2020-10-09 12:44:42 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct mcr_range mcr_ranges_gen8[] = {
|
2020-03-11 09:22:55 -07:00
|
|
|
{ .start = 0x5500, .end = 0x55ff },
|
|
|
|
{ .start = 0x7000, .end = 0x7fff },
|
|
|
|
{ .start = 0x9400, .end = 0x97ff },
|
|
|
|
{ .start = 0xb000, .end = 0xb3ff },
|
|
|
|
{ .start = 0xe000, .end = 0xe7ff },
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2020-10-09 12:44:42 -07:00
|
|
|
static const struct mcr_range mcr_ranges_gen12[] = {
|
|
|
|
{ .start = 0x8150, .end = 0x815f },
|
|
|
|
{ .start = 0x9520, .end = 0x955f },
|
|
|
|
{ .start = 0xb100, .end = 0xb3ff },
|
|
|
|
{ .start = 0xde80, .end = 0xe8ff },
|
|
|
|
{ .start = 0x24a00, .end = 0x24a7f },
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2019-07-17 19:06:22 +01:00
|
|
|
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
|
|
|
{
|
2020-10-09 12:44:42 -07:00
|
|
|
const struct mcr_range *mcr_ranges;
|
2020-03-11 09:22:55 -07:00
|
|
|
int i;
|
|
|
|
|
2020-10-09 12:44:42 -07:00
|
|
|
if (INTEL_GEN(i915) >= 12)
|
|
|
|
mcr_ranges = mcr_ranges_gen12;
|
|
|
|
else if (INTEL_GEN(i915) >= 8)
|
|
|
|
mcr_ranges = mcr_ranges_gen8;
|
|
|
|
else
|
2020-03-11 09:22:55 -07:00
|
|
|
return false;
|
|
|
|
|
2019-07-17 19:06:22 +01:00
|
|
|
/*
|
2020-03-11 09:22:55 -07:00
|
|
|
* Registers in these ranges are affected by the MCR selector
|
2019-07-17 19:06:22 +01:00
|
|
|
* which only controls CPU initiated MMIO. Routing does not
|
|
|
|
* work for CS access so we cannot verify them on this path.
|
|
|
|
*/
|
2020-10-09 12:44:42 -07:00
|
|
|
for (i = 0; mcr_ranges[i].start; i++)
|
|
|
|
if (offset >= mcr_ranges[i].start &&
|
|
|
|
offset <= mcr_ranges[i].end)
|
2020-03-11 09:22:55 -07:00
|
|
|
return true;
|
2019-07-17 19:06:22 +01:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-04-17 08:56:28 +01:00
|
|
|
static int
|
|
|
|
wa_list_srm(struct i915_request *rq,
|
|
|
|
const struct i915_wa_list *wal,
|
|
|
|
struct i915_vma *vma)
|
|
|
|
{
|
2020-06-02 23:09:53 +01:00
|
|
|
struct drm_i915_private *i915 = rq->engine->i915;
|
2019-07-17 19:06:22 +01:00
|
|
|
unsigned int i, count = 0;
|
2019-04-17 08:56:28 +01:00
|
|
|
const struct i915_wa *wa;
|
|
|
|
u32 srm, *cs;
|
|
|
|
|
|
|
|
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
2019-07-17 19:06:22 +01:00
|
|
|
if (INTEL_GEN(i915) >= 8)
|
2019-04-17 08:56:28 +01:00
|
|
|
srm++;
|
|
|
|
|
2019-07-17 19:06:22 +01:00
|
|
|
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
|
|
|
|
if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
cs = intel_ring_begin(rq, 4 * count);
|
2019-04-17 08:56:28 +01:00
|
|
|
if (IS_ERR(cs))
|
|
|
|
return PTR_ERR(cs);
|
|
|
|
|
|
|
|
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
|
2019-07-17 19:06:22 +01:00
|
|
|
u32 offset = i915_mmio_reg_offset(wa->reg);
|
|
|
|
|
|
|
|
if (mcr_range(i915, offset))
|
|
|
|
continue;
|
|
|
|
|
2019-04-17 08:56:28 +01:00
|
|
|
*cs++ = srm;
|
2019-07-17 19:06:22 +01:00
|
|
|
*cs++ = offset;
|
2019-04-17 08:56:28 +01:00
|
|
|
*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
|
|
|
|
*cs++ = 0;
|
|
|
|
}
|
|
|
|
intel_ring_advance(rq, cs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-20 15:25:46 +01:00
|
|
|
static int engine_wa_list_verify(struct intel_context *ce,
|
2019-04-17 08:56:28 +01:00
|
|
|
const struct i915_wa_list * const wal,
|
|
|
|
const char *from)
|
|
|
|
{
|
|
|
|
const struct i915_wa *wa;
|
|
|
|
struct i915_request *rq;
|
|
|
|
struct i915_vma *vma;
|
2020-08-19 16:08:56 +02:00
|
|
|
struct i915_gem_ww_ctx ww;
|
2019-04-17 08:56:28 +01:00
|
|
|
unsigned int i;
|
|
|
|
u32 *results;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!wal->count)
|
|
|
|
return 0;
|
|
|
|
|
2020-12-19 02:03:43 +00:00
|
|
|
vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
|
|
|
|
wal->count * sizeof(u32));
|
2019-04-17 08:56:28 +01:00
|
|
|
if (IS_ERR(vma))
|
|
|
|
return PTR_ERR(vma);
|
|
|
|
|
2019-11-25 10:58:56 +00:00
|
|
|
intel_engine_pm_get(ce->engine);
|
2020-08-19 16:08:56 +02:00
|
|
|
i915_gem_ww_ctx_init(&ww, false);
|
|
|
|
retry:
|
|
|
|
err = i915_gem_object_lock(vma->obj, &ww);
|
|
|
|
if (err == 0)
|
|
|
|
err = intel_context_pin_ww(ce, &ww);
|
|
|
|
if (err)
|
|
|
|
goto err_pm;
|
|
|
|
|
|
|
|
rq = i915_request_create(ce);
|
2019-04-17 08:56:28 +01:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
2020-08-19 16:08:56 +02:00
|
|
|
goto err_unpin;
|
2019-04-17 08:56:28 +01:00
|
|
|
}
|
|
|
|
|
2020-01-31 14:26:10 +00:00
|
|
|
err = i915_request_await_object(rq, vma->obj, true);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
2020-08-19 16:08:56 +02:00
|
|
|
if (err == 0)
|
|
|
|
err = wa_list_srm(rq, wal, vma);
|
2019-04-17 08:56:28 +01:00
|
|
|
|
2019-11-21 09:33:26 +00:00
|
|
|
i915_request_get(rq);
|
2020-08-19 16:08:56 +02:00
|
|
|
if (err)
|
|
|
|
i915_request_set_error_once(rq, err);
|
2019-04-17 08:56:28 +01:00
|
|
|
i915_request_add(rq);
|
2020-08-19 16:08:56 +02:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto err_rq;
|
|
|
|
|
2019-06-18 08:41:30 +01:00
|
|
|
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
|
2019-04-17 08:56:28 +01:00
|
|
|
err = -ETIME;
|
2019-11-21 09:33:26 +00:00
|
|
|
goto err_rq;
|
2019-04-17 08:56:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(results)) {
|
|
|
|
err = PTR_ERR(results);
|
2019-11-21 09:33:26 +00:00
|
|
|
goto err_rq;
|
2019-04-17 08:56:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
2019-07-17 19:06:22 +01:00
|
|
|
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
|
2020-06-02 23:09:53 +01:00
|
|
|
if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
|
2019-07-17 19:06:22 +01:00
|
|
|
continue;
|
|
|
|
|
2019-04-17 08:56:28 +01:00
|
|
|
if (!wa_verify(wa, results[i], wal->name, from))
|
|
|
|
err = -ENXIO;
|
2019-07-17 19:06:22 +01:00
|
|
|
}
|
2019-04-17 08:56:28 +01:00
|
|
|
|
|
|
|
i915_gem_object_unpin_map(vma->obj);
|
|
|
|
|
2019-11-21 09:33:26 +00:00
|
|
|
err_rq:
|
|
|
|
i915_request_put(rq);
|
2020-08-19 16:08:56 +02:00
|
|
|
err_unpin:
|
|
|
|
intel_context_unpin(ce);
|
|
|
|
err_pm:
|
|
|
|
if (err == -EDEADLK) {
|
|
|
|
err = i915_gem_ww_ctx_backoff(&ww);
|
|
|
|
if (!err)
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
i915_gem_ww_ctx_fini(&ww);
|
|
|
|
intel_engine_pm_put(ce->engine);
|
2019-04-17 08:56:28 +01:00
|
|
|
i915_vma_unpin(vma);
|
|
|
|
i915_vma_put(vma);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
|
|
|
|
const char *from)
|
|
|
|
{
|
2019-05-20 15:25:46 +01:00
|
|
|
return engine_wa_list_verify(engine->kernel_context,
|
|
|
|
&engine->wa_list,
|
|
|
|
from);
|
2019-04-17 08:56:28 +01:00
|
|
|
}
|
|
|
|
|
2018-04-14 13:27:54 +01:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
2019-04-24 18:48:39 +01:00
|
|
|
#include "selftest_workarounds.c"
|
2018-04-14 13:27:54 +01:00
|
|
|
#endif
|