2020-01-07 13:40:09 +00:00
|
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
/*
|
|
|
|
* Copyright © 2020 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/set_memory.h>
|
2020-01-09 09:23:03 +08:00
|
|
|
#include <asm/smp.h>
|
2022-06-17 16:05:59 -07:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/stop_machine.h>
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2022-11-22 12:31:26 +05:30
|
|
|
#include <drm/drm_managed.h>
|
2024-05-30 16:19:06 +03:00
|
|
|
#include <drm/intel/i915_drm.h>
|
2024-05-30 16:19:02 +03:00
|
|
|
#include <drm/intel/intel-gtt.h>
|
2020-02-25 15:31:31 +02:00
|
|
|
|
2021-02-03 17:12:31 +00:00
|
|
|
#include "gem/i915_gem_lmem.h"
|
|
|
|
|
2023-09-26 10:37:40 +02:00
|
|
|
#include "intel_context.h"
|
2022-06-17 16:05:59 -07:00
|
|
|
#include "intel_ggtt_gmch.h"
|
2023-09-26 10:37:40 +02:00
|
|
|
#include "intel_gpu_commands.h"
|
2020-01-07 13:40:09 +00:00
|
|
|
#include "intel_gt.h"
|
2022-01-27 15:43:33 -08:00
|
|
|
#include "intel_gt_regs.h"
|
2022-08-05 17:59:58 +02:00
|
|
|
#include "intel_pci_config.h"
|
2023-09-26 10:37:40 +02:00
|
|
|
#include "intel_ring.h"
|
2020-01-07 13:40:09 +00:00
|
|
|
#include "i915_drv.h"
|
2022-08-05 17:59:59 +02:00
|
|
|
#include "i915_pci.h"
|
2024-02-03 00:43:28 +02:00
|
|
|
#include "i915_reg.h"
|
2023-09-26 10:37:40 +02:00
|
|
|
#include "i915_request.h"
|
2020-01-07 13:40:09 +00:00
|
|
|
#include "i915_scatterlist.h"
|
2022-03-29 10:02:04 +01:00
|
|
|
#include "i915_utils.h"
|
2020-01-07 13:40:09 +00:00
|
|
|
#include "i915_vgpu.h"
|
|
|
|
|
|
|
|
#include "intel_gtt.h"
|
2021-05-06 19:19:24 +03:00
|
|
|
#include "gen8_ppgtt.h"
|
2023-09-26 10:37:40 +02:00
|
|
|
#include "intel_engine_pm.h"
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
|
|
|
|
unsigned long color,
|
|
|
|
u64 *start,
|
|
|
|
u64 *end)
|
|
|
|
{
|
|
|
|
if (i915_node_color_differs(node, color))
|
|
|
|
*start += I915_GTT_PAGE_SIZE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Also leave a space between the unallocated reserved node after the
|
|
|
|
* GTT and any objects within the GTT, i.e. we use the color adjustment
|
|
|
|
* to insert a guard page to prevent prefetches crossing over the
|
|
|
|
* GTT boundary.
|
|
|
|
*/
|
|
|
|
node = list_next_entry(node, node_list);
|
|
|
|
if (node->color != color)
|
|
|
|
*end -= I915_GTT_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ggtt_init_hw(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = ggtt->vm.i915;
|
|
|
|
|
|
|
|
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
|
|
|
|
|
|
|
|
ggtt->vm.is_ggtt = true;
|
|
|
|
|
|
|
|
/* Only VLV supports read-only GGTT mappings */
|
|
|
|
ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
|
|
|
|
|
|
|
|
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
|
|
|
|
ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
|
|
|
|
|
|
|
|
if (ggtt->mappable_end) {
|
|
|
|
if (!io_mapping_init_wc(&ggtt->iomap,
|
|
|
|
ggtt->gmadr.start,
|
|
|
|
ggtt->mappable_end)) {
|
|
|
|
ggtt->vm.cleanup(&ggtt->vm);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
|
|
|
|
ggtt->mappable_end);
|
|
|
|
}
|
|
|
|
|
2020-03-16 11:38:43 +00:00
|
|
|
intel_ggtt_init_fences(ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_ggtt_init_hw - Initialize GGTT hardware
|
|
|
|
* @i915: i915 device
|
|
|
|
*/
|
|
|
|
int i915_ggtt_init_hw(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we use page colouring to enforce a guard page at the
|
|
|
|
* end of the address space. This is required as the CS may prefetch
|
|
|
|
* beyond the end of the batch buffer, across the page boundary,
|
|
|
|
* and beyond the end of the GTT if we do not provide a guard.
|
|
|
|
*/
|
2021-12-21 21:59:46 +02:00
|
|
|
ret = ggtt_init_hw(to_gt(i915)->ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-11-01 20:35:50 +02:00
|
|
|
/**
|
|
|
|
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
|
|
|
|
* @vm: The VM to suspend the mappings for
|
2024-11-27 08:11:16 +02:00
|
|
|
* @evict_all: Evict all VMAs
|
2021-11-01 20:35:50 +02:00
|
|
|
*
|
|
|
|
* Suspend the memory mappings for all objects mapped to HW via the GGTT or a
|
|
|
|
* DPT page table.
|
|
|
|
*/
|
2024-11-27 08:11:16 +02:00
|
|
|
void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
2020-05-28 09:24:27 +01:00
|
|
|
struct i915_vma *vma, *vn;
|
2022-03-04 09:26:39 +01:00
|
|
|
int save_skip_rewrite;
|
2020-01-30 18:17:10 +00:00
|
|
|
|
2021-11-01 20:35:50 +02:00
|
|
|
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
|
|
|
|
|
2022-01-14 14:23:18 +01:00
|
|
|
retry:
|
|
|
|
i915_gem_drain_freed_objects(vm->i915);
|
|
|
|
|
2021-11-01 20:35:50 +02:00
|
|
|
mutex_lock(&vm->mutex);
|
2020-05-28 09:24:27 +01:00
|
|
|
|
2022-03-04 09:26:39 +01:00
|
|
|
/*
|
|
|
|
* Skip rewriting PTE on VMA unbind.
|
|
|
|
* FIXME: Use an argument to i915_vma_unbind() instead?
|
|
|
|
*/
|
|
|
|
save_skip_rewrite = vm->skip_pte_rewrite;
|
|
|
|
vm->skip_pte_rewrite = true;
|
2020-05-28 09:24:27 +01:00
|
|
|
|
2021-11-01 20:35:50 +02:00
|
|
|
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
|
2022-01-14 14:23:18 +01:00
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
|
2020-05-28 09:24:27 +01:00
|
|
|
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
2020-01-30 18:17:10 +00:00
|
|
|
|
2022-01-14 14:23:18 +01:00
|
|
|
if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
|
2020-05-28 09:24:27 +01:00
|
|
|
continue;
|
|
|
|
|
2022-01-14 14:23:18 +01:00
|
|
|
/* unlikely to race when GPU is idle, so no worry about slowpath.. */
|
|
|
|
if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
|
|
|
|
/*
|
|
|
|
* No dead objects should appear here, GPU should be
|
|
|
|
* completely idle, and userspace suspended
|
|
|
|
*/
|
|
|
|
i915_gem_object_get(obj);
|
|
|
|
|
|
|
|
mutex_unlock(&vm->mutex);
|
|
|
|
|
|
|
|
i915_gem_object_lock(obj, NULL);
|
2022-03-04 09:26:39 +01:00
|
|
|
GEM_WARN_ON(i915_vma_unbind(vma));
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
i915_gem_object_put(obj);
|
2022-03-04 09:26:39 +01:00
|
|
|
|
|
|
|
vm->skip_pte_rewrite = save_skip_rewrite;
|
2022-01-14 14:23:18 +01:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2024-11-27 08:11:16 +02:00
|
|
|
if (evict_all || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_vma_wait_for_bind(vma);
|
|
|
|
|
2022-01-10 18:22:17 +01:00
|
|
|
__i915_vma_evict(vma, false);
|
2020-05-28 09:24:27 +01:00
|
|
|
drm_mm_remove_node(&vma->node);
|
|
|
|
}
|
2022-01-14 14:23:18 +01:00
|
|
|
|
|
|
|
i915_gem_object_unlock(obj);
|
2020-05-28 09:24:27 +01:00
|
|
|
}
|
|
|
|
|
2022-12-01 00:58:05 +01:00
|
|
|
vm->clear_range(vm, 0, vm->total);
|
2020-05-28 09:24:27 +01:00
|
|
|
|
2022-03-04 09:26:39 +01:00
|
|
|
vm->skip_pte_rewrite = save_skip_rewrite;
|
2021-11-01 20:35:50 +02:00
|
|
|
|
|
|
|
mutex_unlock(&vm->mutex);
|
2024-11-27 08:11:16 +02:00
|
|
|
|
|
|
|
drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list));
|
2021-11-01 20:35:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
|
|
|
|
{
|
2022-11-22 12:31:26 +05:30
|
|
|
struct intel_gt *gt;
|
|
|
|
|
2024-11-27 08:11:16 +02:00
|
|
|
i915_ggtt_suspend_vm(&ggtt->vm, false);
|
2021-11-01 20:35:50 +02:00
|
|
|
ggtt->invalidate(ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2022-11-22 12:31:26 +05:30
|
|
|
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
|
|
|
|
intel_gt_check_and_clear_faults(gt);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
|
|
|
|
|
|
|
|
spin_lock_irq(&uncore->lock);
|
|
|
|
intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
|
|
|
intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
|
|
|
|
spin_unlock_irq(&uncore->lock);
|
|
|
|
}
|
|
|
|
|
2023-10-18 11:38:15 +02:00
|
|
|
static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
|
|
|
|
* will be dropped. For WC mappings in general we have 64 byte burst
|
|
|
|
* writes when the WC buffer is flushed, so we can't use it, but have to
|
|
|
|
* resort to an uncached mapping. The WC issue is easily caught by the
|
|
|
|
* readback check when writing GTT PTE entries.
|
|
|
|
*/
|
|
|
|
if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
|
|
|
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that as an uncached mmio write, this will flush the
|
|
|
|
* WCB of the writes into the GGTT before it triggers the invalidate.
|
2023-10-18 11:38:15 +02:00
|
|
|
*
|
|
|
|
* Only perform this when GGTT is mapped as WC, see ggtt_probe_common().
|
2020-01-07 13:40:09 +00:00
|
|
|
*/
|
2023-10-18 11:38:15 +02:00
|
|
|
if (needs_wc_ggtt_mapping(ggtt->vm.i915))
|
|
|
|
intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
|
|
|
|
GFX_FLSH_CNTL_EN);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2023-10-17 11:08:02 -07:00
|
|
|
static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
|
2023-12-29 11:27:31 +01:00
|
|
|
with_intel_runtime_pm_if_active(uncore->rpm, wakeref)
|
|
|
|
intel_guc_invalidate_tlb_guc(gt_to_guc(gt));
|
2023-10-17 11:08:02 -07:00
|
|
|
}
|
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = ggtt->vm.i915;
|
2023-10-17 11:08:02 -07:00
|
|
|
struct intel_gt *gt;
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
gen8_ggtt_invalidate(ggtt);
|
|
|
|
|
2023-10-17 11:08:02 -07:00
|
|
|
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
|
2023-12-29 11:27:31 +01:00
|
|
|
if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt)))
|
2023-10-17 11:08:02 -07:00
|
|
|
guc_ggtt_ct_invalidate(gt);
|
2023-10-25 21:43:08 -07:00
|
|
|
else if (GRAPHICS_VER(i915) >= 12)
|
2022-11-22 12:31:26 +05:30
|
|
|
intel_uncore_write_fw(gt->uncore,
|
|
|
|
GEN12_GUC_TLB_INV_CR,
|
|
|
|
GEN12_GUC_TLB_INV_CR_INVALIDATE);
|
2023-10-25 21:43:08 -07:00
|
|
|
else
|
2023-10-17 11:08:02 -07:00
|
|
|
intel_uncore_write_fw(gt->uncore,
|
|
|
|
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
2022-11-22 12:31:26 +05:30
|
|
|
}
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2023-04-24 11:29:01 -07:00
|
|
|
static u64 mtl_ggtt_pte_encode(dma_addr_t addr,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2023-04-24 11:29:01 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(addr & ~GEN12_GGTT_PTE_ADDR_MASK);
|
|
|
|
|
|
|
|
if (flags & PTE_LM)
|
|
|
|
pte |= GEN12_GGTT_PTE_LM;
|
|
|
|
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
if (pat_index & BIT(0))
|
2023-04-24 11:29:01 -07:00
|
|
|
pte |= MTL_GGTT_PTE_PAT0;
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
|
|
|
|
if (pat_index & BIT(1))
|
|
|
|
pte |= MTL_GGTT_PTE_PAT1;
|
2023-04-24 11:29:01 -07:00
|
|
|
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
2021-05-06 19:19:24 +03:00
|
|
|
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2021-05-06 19:19:24 +03:00
|
|
|
u32 flags)
|
drm/i915/ggtt: do not set bits 1-11 in gen12 ptes
On TGL, bits 2-4 in the GGTT PTE are not ignored anymore and are
instead used for some extra VT-d capabilities. We don't (yet?) have
support for those capabilities, but, given that we shared the pte_encode
function betweed GGTT and PPGTT, we still set those bits to the PPGTT
PPAT values. The DMA engine gets very confused when those bits are
set while the iommu is enabled, leading to errors. E.g. when loading
the GuC we get:
[ 9.796218] DMAR: DRHD: handling fault status reg 2
[ 9.796235] DMAR: [DMA Write] Request device [00:02.0] PASID ffffffff fault addr 0 [fault reason 02] Present bit in context entry is clear
[ 9.899215] [drm:intel_guc_fw_upload [i915]] *ERROR* GuC firmware signature verification failed
To fix this, just have dedicated gen8_pte_encode function per type of
gtt. Also, explicitly set vm->pte_encode for gen8_ppgtt, even if we
don't use it, to make sure we don't accidentally assign it to the GGTT
one, like we do for gen6_ppgtt, in case we need it in the future.
Reported-by: "Sodhi, Vunny" <vunny.sodhi@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200226185657.26445-1-daniele.ceraolospurio@intel.com
2020-02-26 10:56:57 -08:00
|
|
|
{
|
2021-12-06 13:52:45 -08:00
|
|
|
gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
|
2021-02-03 17:12:31 +00:00
|
|
|
|
|
|
|
if (flags & PTE_LM)
|
|
|
|
pte |= GEN12_GGTT_PTE_LM;
|
|
|
|
|
|
|
|
return pte;
|
drm/i915/ggtt: do not set bits 1-11 in gen12 ptes
On TGL, bits 2-4 in the GGTT PTE are not ignored anymore and are
instead used for some extra VT-d capabilities. We don't (yet?) have
support for those capabilities, but, given that we shared the pte_encode
function betweed GGTT and PPGTT, we still set those bits to the PPGTT
PPAT values. The DMA engine gets very confused when those bits are
set while the iommu is enabled, leading to errors. E.g. when loading
the GuC we get:
[ 9.796218] DMAR: DRHD: handling fault status reg 2
[ 9.796235] DMAR: [DMA Write] Request device [00:02.0] PASID ffffffff fault addr 0 [fault reason 02] Present bit in context entry is clear
[ 9.899215] [drm:intel_guc_fw_upload [i915]] *ERROR* GuC firmware signature verification failed
To fix this, just have dedicated gen8_pte_encode function per type of
gtt. Also, explicitly set vm->pte_encode for gen8_ppgtt, even if we
don't use it, to make sure we don't accidentally assign it to the GGTT
one, like we do for gen6_ppgtt, in case we need it in the future.
Reported-by: "Sodhi, Vunny" <vunny.sodhi@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200226185657.26445-1-daniele.ceraolospurio@intel.com
2020-02-26 10:56:57 -08:00
|
|
|
}
|
|
|
|
|
2025-03-13 16:08:32 +02:00
|
|
|
static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local)
|
|
|
|
{
|
|
|
|
*is_present = pte & GEN8_PAGE_PRESENT;
|
|
|
|
*is_local = pte & GEN12_GGTT_PTE_LM;
|
|
|
|
|
|
|
|
return pte & GEN12_GGTT_PTE_ADDR_MASK;
|
|
|
|
}
|
|
|
|
|
2023-09-26 10:37:40 +02:00
|
|
|
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
struct intel_gt *gt = ggtt->vm.gt;
|
|
|
|
|
|
|
|
return intel_gt_is_bind_context_ready(gt);
|
|
|
|
}
|
|
|
|
|
2023-10-30 18:40:13 +01:00
|
|
|
static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref)
|
2023-09-26 10:37:40 +02:00
|
|
|
{
|
|
|
|
struct intel_context *ce;
|
|
|
|
struct intel_gt *gt = ggtt->vm.gt;
|
|
|
|
|
|
|
|
if (intel_gt_is_wedged(gt))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ce = gt->engine[BCS0]->bind_context;
|
|
|
|
GEM_BUG_ON(!ce);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the GT is not awake already at this stage then fallback
|
|
|
|
* to pci based GGTT update otherwise __intel_wakeref_get_first()
|
|
|
|
* would conflict with fs_reclaim trying to allocate memory while
|
|
|
|
* doing rpm_resume().
|
|
|
|
*/
|
2023-10-30 18:40:13 +01:00
|
|
|
*wakeref = intel_gt_pm_get_if_awake(gt);
|
|
|
|
if (!*wakeref)
|
2023-09-26 10:37:40 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
intel_engine_pm_get(ce->engine);
|
|
|
|
|
|
|
|
return ce;
|
|
|
|
}
|
|
|
|
|
2023-10-30 18:40:13 +01:00
|
|
|
static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref)
|
2023-09-26 10:37:40 +02:00
|
|
|
{
|
|
|
|
intel_engine_pm_put(ce->engine);
|
2023-10-30 18:40:13 +01:00
|
|
|
intel_gt_pm_put(ce->engine->gt, wakeref);
|
2023-09-26 10:37:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
|
|
|
|
struct sg_table *pages, u32 num_entries,
|
|
|
|
const gen8_pte_t pte)
|
|
|
|
{
|
|
|
|
struct i915_sched_attr attr = {};
|
|
|
|
struct intel_gt *gt = ggtt->vm.gt;
|
|
|
|
const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode;
|
|
|
|
struct sgt_iter iter;
|
|
|
|
struct i915_request *rq;
|
|
|
|
struct intel_context *ce;
|
2023-10-30 18:40:13 +01:00
|
|
|
intel_wakeref_t wakeref;
|
2023-09-26 10:37:40 +02:00
|
|
|
u32 *cs;
|
|
|
|
|
|
|
|
if (!num_entries)
|
|
|
|
return true;
|
|
|
|
|
2023-10-30 18:40:13 +01:00
|
|
|
ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref);
|
2023-09-26 10:37:40 +02:00
|
|
|
if (!ce)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (pages)
|
|
|
|
iter = __sgt_iter(pages->sgl, true);
|
|
|
|
|
|
|
|
while (num_entries) {
|
|
|
|
int count = 0;
|
|
|
|
dma_addr_t addr;
|
|
|
|
/*
|
|
|
|
* MI_UPDATE_GTT can update 512 entries in a single command but
|
|
|
|
* that end up with engine reset, 511 works.
|
|
|
|
*/
|
|
|
|
u32 n_ptes = min_t(u32, 511, num_entries);
|
|
|
|
|
|
|
|
if (mutex_lock_interruptible(&ce->timeline->mutex))
|
|
|
|
goto put_ce;
|
|
|
|
|
|
|
|
intel_context_enter(ce);
|
|
|
|
rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC);
|
|
|
|
intel_context_exit(ce);
|
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
GT_TRACE(gt, "Failed to get bind request\n");
|
|
|
|
mutex_unlock(&ce->timeline->mutex);
|
|
|
|
goto put_ce;
|
|
|
|
}
|
|
|
|
|
|
|
|
cs = intel_ring_begin(rq, 2 * n_ptes + 2);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
GT_TRACE(gt, "Failed to ring space for GGTT bind\n");
|
|
|
|
i915_request_set_error_once(rq, PTR_ERR(cs));
|
|
|
|
/* once a request is created, it must be queued */
|
|
|
|
goto queue_err_rq;
|
|
|
|
}
|
|
|
|
|
|
|
|
*cs++ = MI_UPDATE_GTT | (2 * n_ptes);
|
|
|
|
*cs++ = offset << 12;
|
|
|
|
|
|
|
|
if (pages) {
|
|
|
|
for_each_sgt_daddr_next(addr, iter) {
|
|
|
|
if (count == n_ptes)
|
|
|
|
break;
|
|
|
|
*cs++ = lower_32_bits(pte | addr);
|
|
|
|
*cs++ = upper_32_bits(pte | addr);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
/* fill remaining with scratch pte, if any */
|
|
|
|
if (count < n_ptes) {
|
|
|
|
memset64((u64 *)cs, scratch_pte,
|
|
|
|
n_ptes - count);
|
|
|
|
cs += (n_ptes - count) * 2;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
memset64((u64 *)cs, pte, n_ptes);
|
|
|
|
cs += n_ptes * 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
intel_ring_advance(rq, cs);
|
|
|
|
queue_err_rq:
|
|
|
|
i915_request_get(rq);
|
|
|
|
__i915_request_commit(rq);
|
|
|
|
__i915_request_queue(rq, &attr);
|
|
|
|
|
|
|
|
mutex_unlock(&ce->timeline->mutex);
|
|
|
|
/* This will break if the request is complete or after engine reset */
|
|
|
|
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
|
|
|
|
if (rq->fence.error)
|
|
|
|
goto err_rq;
|
|
|
|
|
|
|
|
i915_request_put(rq);
|
|
|
|
|
|
|
|
num_entries -= n_ptes;
|
|
|
|
offset += n_ptes;
|
|
|
|
}
|
|
|
|
|
2023-10-30 18:40:13 +01:00
|
|
|
gen8_ggtt_bind_put_ce(ce, wakeref);
|
2023-09-26 10:37:40 +02:00
|
|
|
return true;
|
|
|
|
|
|
|
|
err_rq:
|
|
|
|
i915_request_put(rq);
|
|
|
|
put_ce:
|
2023-10-30 18:40:13 +01:00
|
|
|
gen8_ggtt_bind_put_ce(ce, wakeref);
|
2023-09-26 10:37:40 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
|
|
|
|
{
|
|
|
|
writeq(pte, addr);
|
|
|
|
}
|
|
|
|
|
2025-03-13 16:08:32 +02:00
|
|
|
static gen8_pte_t gen8_get_pte(void __iomem *addr)
|
|
|
|
{
|
|
|
|
return readq(addr);
|
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
|
|
|
|
dma_addr_t addr,
|
|
|
|
u64 offset,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
gen8_pte_t __iomem *pte =
|
|
|
|
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
|
|
|
|
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags));
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
ggtt->invalidate(ggtt);
|
|
|
|
}
|
|
|
|
|
2025-03-13 16:08:32 +02:00
|
|
|
static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
|
|
|
|
u64 offset, bool *is_present, bool *is_local)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
gen8_pte_t __iomem *pte =
|
|
|
|
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
|
|
|
|
|
|
|
|
return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
|
|
|
|
}
|
|
|
|
|
2023-09-26 10:37:40 +02:00
|
|
|
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
|
|
|
|
dma_addr_t addr, u64 offset,
|
|
|
|
unsigned int pat_index, u32 flags)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
gen8_pte_t pte;
|
|
|
|
|
|
|
|
pte = ggtt->vm.pte_encode(addr, pat_index, flags);
|
|
|
|
if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
|
|
|
|
gen8_ggtt_bind_ptes(ggtt, offset, NULL, 1, pte))
|
|
|
|
return ggtt->invalidate(ggtt);
|
|
|
|
|
|
|
|
gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags);
|
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
|
|
|
struct i915_vma_resource *vma_res,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
|
2022-06-17 16:05:59 -07:00
|
|
|
gen8_pte_t __iomem *gte;
|
|
|
|
gen8_pte_t __iomem *end;
|
|
|
|
struct sgt_iter iter;
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we ignore PTE_READ_ONLY here. The caller must be careful
|
|
|
|
* not to allow the user to override access to a read only page.
|
|
|
|
*/
|
|
|
|
|
|
|
|
gte = (gen8_pte_t __iomem *)ggtt->gsm;
|
drm/i915: Introduce guard pages to i915_vma
Introduce the concept of padding the i915_vma with guard pages before
and after. The major consequence is that all ordinary uses of i915_vma
must use i915_vma_offset/i915_vma_size and not i915_vma.node.start/size
directly, as the drm_mm_node will include the guard pages that surround
our object.
The biggest connundrum is how exactly to mix requesting a fixed address
with guard pages, particularly through the existing uABI. The user does
not know about guard pages, so such must be transparent to the user, and
so the execobj.offset must be that of the object itself excluding the
guard. So a PIN_OFFSET_FIXED must then be exclusive of the guard pages.
The caveat is that some placements will be impossible with guard pages,
as wrap arounds need to be avoided, and the vma itself will require a
larger node. We must not report EINVAL but ENOSPC as these are unavailable
locations within the GTT rather than conflicting user requirements.
In the next patch, we start using guard pages for scanout objects. While
these are limited to GGTT vma, on a few platforms these vma (or at least
an alias of the vma) is shared with userspace, so we may leak the
existence of such guards if we are not careful to ensure that the
execobj.offset is transparent and excludes the guards. (On such platforms
like ivb, without full-ppgtt, userspace has to use relocations so the
presence of more untouchable regions within its GTT such be of no further
issue.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tejas Upadhyay <tejaskumarx.surendrakumar.upadhyay@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221201203912.346110-1-andi.shyti@linux.intel.com
2022-12-01 21:39:12 +01:00
|
|
|
gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
|
|
|
|
end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
|
|
|
|
while (gte < end)
|
|
|
|
gen8_set_pte(gte++, vm->scratch[0]->encode);
|
|
|
|
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
|
|
|
|
gen8_set_pte(gte++, pte_encode | addr);
|
|
|
|
GEM_BUG_ON(gte > end);
|
|
|
|
|
|
|
|
/* Fill the allocated but "unused" space beyond the end of the buffer */
|
|
|
|
while (gte < end)
|
|
|
|
gen8_set_pte(gte++, vm->scratch[0]->encode);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want to flush the TLBs only after we're certain all the PTE
|
|
|
|
* updates have finished.
|
|
|
|
*/
|
|
|
|
ggtt->invalidate(ggtt);
|
|
|
|
}
|
|
|
|
|
2023-09-26 10:37:40 +02:00
|
|
|
static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
|
|
|
|
struct i915_vma_resource *vma_res,
|
|
|
|
unsigned int pat_index, u32 flags)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
gen8_pte_t scratch_pte = vm->scratch[0]->encode;
|
|
|
|
gen8_pte_t pte_encode;
|
|
|
|
u64 start, end;
|
|
|
|
|
|
|
|
pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
|
|
|
|
start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
|
|
|
|
end = start + vma_res->guard / I915_GTT_PAGE_SIZE;
|
|
|
|
if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
start = end;
|
|
|
|
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
|
|
|
|
if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages,
|
|
|
|
vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
start += vma_res->node_size / I915_GTT_PAGE_SIZE;
|
|
|
|
if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
err:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
|
|
|
|
struct i915_vma_resource *vma_res,
|
|
|
|
unsigned int pat_index, u32 flags)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
|
|
|
|
if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
|
|
|
|
__gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags))
|
|
|
|
return ggtt->invalidate(ggtt);
|
|
|
|
|
|
|
|
gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags);
|
|
|
|
}
|
|
|
|
|
2023-03-10 10:23:49 +01:00
|
|
|
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
|
|
|
|
u64 start, u64 length)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
|
|
|
|
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
|
|
|
|
const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
|
|
|
|
gen8_pte_t __iomem *gtt_base =
|
|
|
|
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
|
|
|
|
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (WARN(num_entries > max_entries,
|
|
|
|
"First entry = %d; Num entries = %d (max=%d)\n",
|
|
|
|
first_entry, num_entries, max_entries))
|
|
|
|
num_entries = max_entries;
|
|
|
|
|
|
|
|
for (i = 0; i < num_entries; i++)
|
|
|
|
gen8_set_pte(>t_base[i], scratch_pte);
|
|
|
|
}
|
|
|
|
|
2023-09-26 10:37:40 +02:00
|
|
|
static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm,
|
|
|
|
u64 start, u64 length)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
|
|
|
|
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
|
|
|
|
const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
|
|
|
|
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
|
|
|
|
|
|
|
|
if (WARN(num_entries > max_entries,
|
|
|
|
"First entry = %d; Num entries = %d (max=%d)\n",
|
|
|
|
first_entry, num_entries, max_entries))
|
|
|
|
num_entries = max_entries;
|
|
|
|
|
|
|
|
if (should_update_ggtt_with_bind(ggtt) && gen8_ggtt_bind_ptes(ggtt, first_entry,
|
|
|
|
NULL, num_entries, scratch_pte))
|
|
|
|
return ggtt->invalidate(ggtt);
|
|
|
|
|
|
|
|
gen8_ggtt_clear_range(vm, start, length);
|
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
|
|
|
|
dma_addr_t addr,
|
|
|
|
u64 offset,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
gen6_pte_t __iomem *pte =
|
|
|
|
(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
|
|
|
|
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
iowrite32(vm->pte_encode(addr, pat_index, flags), pte);
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
ggtt->invalidate(ggtt);
|
|
|
|
}
|
|
|
|
|
2025-03-13 16:08:32 +02:00
|
|
|
static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
|
|
|
|
u64 offset,
|
|
|
|
bool *is_present, bool *is_local)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
gen6_pte_t __iomem *pte =
|
|
|
|
(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
|
|
|
|
|
|
|
|
return vm->pte_decode(ioread32(pte), is_present, is_local);
|
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
/*
|
|
|
|
* Binds an object into the global gtt with the specified cache level.
|
|
|
|
* The object will be accessible to the GPU via commands whose operands
|
|
|
|
* reference offsets within the global GTT as well as accessible by the GPU
|
|
|
|
* through the GMADR mapped BAR (i915->mm.gtt->gtt).
|
|
|
|
*/
|
|
|
|
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
|
|
|
struct i915_vma_resource *vma_res,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
gen6_pte_t __iomem *gte;
|
|
|
|
gen6_pte_t __iomem *end;
|
|
|
|
struct sgt_iter iter;
|
|
|
|
dma_addr_t addr;
|
|
|
|
|
|
|
|
gte = (gen6_pte_t __iomem *)ggtt->gsm;
|
drm/i915: Introduce guard pages to i915_vma
Introduce the concept of padding the i915_vma with guard pages before
and after. The major consequence is that all ordinary uses of i915_vma
must use i915_vma_offset/i915_vma_size and not i915_vma.node.start/size
directly, as the drm_mm_node will include the guard pages that surround
our object.
The biggest connundrum is how exactly to mix requesting a fixed address
with guard pages, particularly through the existing uABI. The user does
not know about guard pages, so such must be transparent to the user, and
so the execobj.offset must be that of the object itself excluding the
guard. So a PIN_OFFSET_FIXED must then be exclusive of the guard pages.
The caveat is that some placements will be impossible with guard pages,
as wrap arounds need to be avoided, and the vma itself will require a
larger node. We must not report EINVAL but ENOSPC as these are unavailable
locations within the GTT rather than conflicting user requirements.
In the next patch, we start using guard pages for scanout objects. While
these are limited to GGTT vma, on a few platforms these vma (or at least
an alias of the vma) is shared with userspace, so we may leak the
existence of such guards if we are not careful to ensure that the
execobj.offset is transparent and excludes the guards. (On such platforms
like ivb, without full-ppgtt, userspace has to use relocations so the
presence of more untouchable regions within its GTT such be of no further
issue.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tejas Upadhyay <tejaskumarx.surendrakumar.upadhyay@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221201203912.346110-1-andi.shyti@linux.intel.com
2022-12-01 21:39:12 +01:00
|
|
|
gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
|
2022-06-17 16:05:59 -07:00
|
|
|
|
drm/i915: Introduce guard pages to i915_vma
Introduce the concept of padding the i915_vma with guard pages before
and after. The major consequence is that all ordinary uses of i915_vma
must use i915_vma_offset/i915_vma_size and not i915_vma.node.start/size
directly, as the drm_mm_node will include the guard pages that surround
our object.
The biggest connundrum is how exactly to mix requesting a fixed address
with guard pages, particularly through the existing uABI. The user does
not know about guard pages, so such must be transparent to the user, and
so the execobj.offset must be that of the object itself excluding the
guard. So a PIN_OFFSET_FIXED must then be exclusive of the guard pages.
The caveat is that some placements will be impossible with guard pages,
as wrap arounds need to be avoided, and the vma itself will require a
larger node. We must not report EINVAL but ENOSPC as these are unavailable
locations within the GTT rather than conflicting user requirements.
In the next patch, we start using guard pages for scanout objects. While
these are limited to GGTT vma, on a few platforms these vma (or at least
an alias of the vma) is shared with userspace, so we may leak the
existence of such guards if we are not careful to ensure that the
execobj.offset is transparent and excludes the guards. (On such platforms
like ivb, without full-ppgtt, userspace has to use relocations so the
presence of more untouchable regions within its GTT such be of no further
issue.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tejas Upadhyay <tejaskumarx.surendrakumar.upadhyay@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221201203912.346110-1-andi.shyti@linux.intel.com
2022-12-01 21:39:12 +01:00
|
|
|
end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
|
|
|
|
while (gte < end)
|
|
|
|
iowrite32(vm->scratch[0]->encode, gte++);
|
|
|
|
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
|
2022-06-17 16:05:59 -07:00
|
|
|
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
iowrite32(vm->pte_encode(addr, pat_index, flags), gte++);
|
2022-06-17 16:05:59 -07:00
|
|
|
GEM_BUG_ON(gte > end);
|
|
|
|
|
|
|
|
/* Fill the allocated but "unused" space beyond the end of the buffer */
|
|
|
|
while (gte < end)
|
|
|
|
iowrite32(vm->scratch[0]->encode, gte++);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want to flush the TLBs only after we're certain all the PTE
|
|
|
|
* updates have finished.
|
|
|
|
*/
|
|
|
|
ggtt->invalidate(ggtt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nop_clear_range(struct i915_address_space *vm,
|
|
|
|
u64 start, u64 length)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Make sure the internal GAM fifo has been cleared of all GTT
|
|
|
|
* writes before exiting stop_machine(). This guarantees that
|
|
|
|
* any aperture accesses waiting to start in another process
|
|
|
|
* cannot back up behind the GTT writes causing a hang.
|
|
|
|
* The register can be any arbitrary GAM register.
|
|
|
|
*/
|
|
|
|
intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct insert_page {
|
|
|
|
struct i915_address_space *vm;
|
|
|
|
dma_addr_t addr;
|
|
|
|
u64 offset;
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index;
|
2022-06-17 16:05:59 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
|
|
|
|
{
|
|
|
|
struct insert_page *arg = _arg;
|
|
|
|
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset,
|
|
|
|
arg->pat_index, 0);
|
2022-06-17 16:05:59 -07:00
|
|
|
bxt_vtd_ggtt_wa(arg->vm);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
|
|
|
|
dma_addr_t addr,
|
|
|
|
u64 offset,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 unused)
|
|
|
|
{
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
struct insert_page arg = { vm, addr, offset, pat_index };
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct insert_entries {
|
|
|
|
struct i915_address_space *vm;
|
|
|
|
struct i915_vma_resource *vma_res;
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index;
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
|
|
|
|
{
|
|
|
|
struct insert_entries *arg = _arg;
|
|
|
|
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
gen8_ggtt_insert_entries(arg->vm, arg->vma_res,
|
|
|
|
arg->pat_index, arg->flags);
|
2022-06-17 16:05:59 -07:00
|
|
|
bxt_vtd_ggtt_wa(arg->vm);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
|
|
|
|
struct i915_vma_resource *vma_res,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
struct insert_entries arg = { vm, vma_res, pat_index, flags };
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
|
|
|
|
u64 start, u64 length)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
|
|
|
|
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
|
|
|
|
gen6_pte_t scratch_pte, __iomem *gtt_base =
|
|
|
|
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
|
|
|
|
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (WARN(num_entries > max_entries,
|
|
|
|
"First entry = %d; Num entries = %d (max=%d)\n",
|
|
|
|
first_entry, num_entries, max_entries))
|
|
|
|
num_entries = max_entries;
|
|
|
|
|
|
|
|
scratch_pte = vm->scratch[0]->encode;
|
|
|
|
for (i = 0; i < num_entries; i++)
|
|
|
|
iowrite32(scratch_pte, >t_base[i]);
|
|
|
|
}
|
|
|
|
|
2022-03-30 16:48:08 -07:00
|
|
|
void intel_ggtt_bind_vma(struct i915_address_space *vm,
|
2022-06-17 16:05:59 -07:00
|
|
|
struct i915_vm_pt_stash *stash,
|
|
|
|
struct i915_vma_resource *vma_res,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
|
|
|
u32 pte_flags;
|
|
|
|
|
2022-01-10 18:22:15 +01:00
|
|
|
if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
|
2020-07-29 17:42:17 +01:00
|
|
|
return;
|
2020-05-25 08:53:36 +01:00
|
|
|
|
2022-01-10 18:22:15 +01:00
|
|
|
vma_res->bound_flags |= flags;
|
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
|
|
|
|
pte_flags = 0;
|
2022-01-10 18:22:15 +01:00
|
|
|
if (vma_res->bi.readonly)
|
2020-01-07 13:40:09 +00:00
|
|
|
pte_flags |= PTE_READ_ONLY;
|
2022-01-10 18:22:15 +01:00
|
|
|
if (vma_res->bi.lmem)
|
2021-02-03 17:12:31 +00:00
|
|
|
pte_flags |= PTE_LM;
|
2020-01-07 13:40:09 +00:00
|
|
|
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
|
2022-01-10 18:22:15 +01:00
|
|
|
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2022-03-30 16:48:08 -07:00
|
|
|
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
|
2022-06-17 16:05:59 -07:00
|
|
|
struct i915_vma_resource *vma_res)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
2022-01-10 18:22:15 +01:00
|
|
|
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2025-03-13 16:08:32 +02:00
|
|
|
dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
|
|
|
|
u64 offset, bool *is_present, bool *is_local)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
|
|
|
|
return ggtt->vm.read_entry(vm, offset, is_present, is_local);
|
|
|
|
}
|
|
|
|
|
2023-09-02 17:10:39 +02:00
|
|
|
/*
|
|
|
|
* Reserve the top of the GuC address space for firmware images. Addresses
|
|
|
|
* beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
|
|
|
|
* which makes for a suitable range to hold GuC/HuC firmware images if the
|
|
|
|
* size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
|
|
|
|
* is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
|
|
|
|
* of the same size anyway, which is far more than needed, to keep the logic
|
|
|
|
* in uc_fw_ggtt_offset() simple.
|
|
|
|
*/
|
|
|
|
#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
|
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
|
|
|
|
{
|
2023-09-02 17:10:39 +02:00
|
|
|
u64 offset;
|
2020-01-07 13:40:09 +00:00
|
|
|
int ret;
|
|
|
|
|
2020-02-18 14:33:19 -08:00
|
|
|
if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
|
2020-01-07 13:40:09 +00:00
|
|
|
return 0;
|
|
|
|
|
2023-09-02 17:10:39 +02:00
|
|
|
GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
|
|
|
|
offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2023-09-02 17:10:39 +02:00
|
|
|
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
|
|
|
|
GUC_TOP_RESERVE_SIZE, offset,
|
|
|
|
I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
|
2020-01-07 13:40:09 +00:00
|
|
|
if (ret)
|
2020-01-28 10:14:31 +03:00
|
|
|
drm_dbg(&ggtt->vm.i915->drm,
|
|
|
|
"Failed to reserve top of GGTT for GuC\n");
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
if (drm_mm_node_allocated(&ggtt->uc_fw))
|
|
|
|
drm_mm_remove_node(&ggtt->uc_fw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
ggtt_release_guc_top(ggtt);
|
|
|
|
if (drm_mm_node_allocated(&ggtt->error_capture))
|
|
|
|
drm_mm_remove_node(&ggtt->error_capture);
|
2020-01-10 12:30:56 +00:00
|
|
|
mutex_destroy(&ggtt->error_mutex);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int init_ggtt(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Let GEM Manage all of the aperture.
|
|
|
|
*
|
|
|
|
* However, leave one page at the end still bound to the scratch page.
|
|
|
|
* There are a number of places where the hardware apparently prefetches
|
|
|
|
* past the end of the object, and we've seen multiple hangs with the
|
|
|
|
* GPU head pointer stuck in a batchbuffer bound at the last page of the
|
|
|
|
* aperture. One page should be enough to keep any prefetching inside
|
|
|
|
* of the aperture.
|
|
|
|
*/
|
|
|
|
unsigned long hole_start, hole_end;
|
|
|
|
struct drm_mm_node *entry;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GuC requires all resources that we're sharing with it to be placed in
|
|
|
|
* non-WOPCM memory. If GuC is not present or not in use we still need a
|
|
|
|
* small bias as ring wraparound at offset 0 sometimes hangs. No idea
|
|
|
|
* why.
|
|
|
|
*/
|
|
|
|
ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
|
2022-11-07 18:05:58 -08:00
|
|
|
intel_wopcm_guc_size(&ggtt->vm.gt->wopcm));
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
ret = intel_vgt_balloon(ggtt);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-01-10 12:30:56 +00:00
|
|
|
mutex_init(&ggtt->error_mutex);
|
2020-01-07 13:40:09 +00:00
|
|
|
if (ggtt->mappable_end) {
|
2021-01-25 12:50:33 +00:00
|
|
|
/*
|
|
|
|
* Reserve a mappable slot for our lockless error capture.
|
|
|
|
*
|
|
|
|
* We strongly prefer taking address 0x0 in order to protect
|
|
|
|
* other critical buffers against accidental overwrites,
|
|
|
|
* as writing to address 0 is a very common mistake.
|
|
|
|
*
|
|
|
|
* Since 0 may already be in use by the system (e.g. the BIOS
|
|
|
|
* framebuffer), we let the reservation fail quietly and hope
|
|
|
|
* 0 remains reserved always.
|
|
|
|
*
|
|
|
|
* If we fail to reserve 0, and then fail to find any space
|
|
|
|
* for an error-capture, remain silent. We can afford not
|
|
|
|
* to reserve an error_capture node as we have fallback
|
|
|
|
* paths, and we trust that 0 will remain reserved. However,
|
|
|
|
* the only likely reason for failure to insert is a driver
|
|
|
|
* bug, which we expect to cause other failures...
|
2023-03-10 10:23:50 +01:00
|
|
|
*
|
|
|
|
* Since CPU can perform speculative reads on error capture
|
|
|
|
* (write-combining allows it) add scratch page after error
|
|
|
|
* capture to avoid DMAR errors.
|
2021-01-25 12:50:33 +00:00
|
|
|
*/
|
2023-03-10 10:23:50 +01:00
|
|
|
ggtt->error_capture.size = 2 * I915_GTT_PAGE_SIZE;
|
2021-01-25 12:50:33 +00:00
|
|
|
ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
|
|
|
|
if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
|
|
|
|
drm_mm_insert_node_in_range(&ggtt->vm.mm,
|
|
|
|
&ggtt->error_capture,
|
|
|
|
ggtt->error_capture.size, 0,
|
|
|
|
ggtt->error_capture.color,
|
|
|
|
0, ggtt->mappable_end,
|
|
|
|
DRM_MM_INSERT_LOW);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
2023-03-10 10:23:50 +01:00
|
|
|
if (drm_mm_node_allocated(&ggtt->error_capture)) {
|
|
|
|
u64 start = ggtt->error_capture.start;
|
|
|
|
u64 size = ggtt->error_capture.size;
|
|
|
|
|
|
|
|
ggtt->vm.scratch_range(&ggtt->vm, start, size);
|
2021-01-25 12:50:33 +00:00
|
|
|
drm_dbg(&ggtt->vm.i915->drm,
|
|
|
|
"Reserved GGTT:[%llx, %llx] for use by error capture\n",
|
2023-03-10 10:23:50 +01:00
|
|
|
start, start + size);
|
|
|
|
}
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The upper portion of the GuC address space has a sizeable hole
|
|
|
|
* (several MB) that is inaccessible by GuC. Reserve this range within
|
|
|
|
* GGTT as it can comfortably hold GuC/HuC firmware images.
|
|
|
|
*/
|
|
|
|
ret = ggtt_reserve_guc_top(ggtt);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/* Clear any non-preallocated blocks */
|
|
|
|
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
|
2021-01-25 12:50:33 +00:00
|
|
|
drm_dbg(&ggtt->vm.i915->drm,
|
|
|
|
"clearing unused GTT space: [%lx, %lx]\n",
|
|
|
|
hole_start, hole_end);
|
2020-01-07 13:40:09 +00:00
|
|
|
ggtt->vm.clear_range(&ggtt->vm, hole_start,
|
|
|
|
hole_end - hole_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And finally clear the reserved guard page */
|
|
|
|
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
cleanup_init_ggtt(ggtt);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-07-29 17:42:17 +01:00
|
|
|
static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
|
|
|
|
struct i915_vm_pt_stash *stash,
|
2022-01-10 18:22:15 +01:00
|
|
|
struct i915_vma_resource *vma_res,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
unsigned int pat_index,
|
2020-07-29 17:42:17 +01:00
|
|
|
u32 flags)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
|
|
|
u32 pte_flags;
|
|
|
|
|
|
|
|
/* Currently applicable only to VLV */
|
|
|
|
pte_flags = 0;
|
2022-01-10 18:22:15 +01:00
|
|
|
if (vma_res->bi.readonly)
|
2020-01-07 13:40:09 +00:00
|
|
|
pte_flags |= PTE_READ_ONLY;
|
|
|
|
|
2020-07-29 17:42:17 +01:00
|
|
|
if (flags & I915_VMA_LOCAL_BIND)
|
|
|
|
ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
stash, vma_res, pat_index, flags);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2020-01-10 14:44:18 +00:00
|
|
|
if (flags & I915_VMA_GLOBAL_BIND)
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
|
2022-01-10 18:22:15 +01:00
|
|
|
|
|
|
|
vma_res->bound_flags |= flags;
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2020-07-03 11:25:19 +01:00
|
|
|
static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
|
2022-01-10 18:22:15 +01:00
|
|
|
struct i915_vma_resource *vma_res)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
2022-01-10 18:22:15 +01:00
|
|
|
if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
|
|
|
|
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2022-01-10 18:22:15 +01:00
|
|
|
if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
|
|
|
|
ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
|
|
|
|
{
|
2020-07-29 17:42:17 +01:00
|
|
|
struct i915_vm_pt_stash stash = {};
|
2020-01-07 13:40:09 +00:00
|
|
|
struct i915_ppgtt *ppgtt;
|
|
|
|
int err;
|
|
|
|
|
2021-09-22 08:25:25 +02:00
|
|
|
ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
|
2020-01-07 13:40:09 +00:00
|
|
|
if (IS_ERR(ppgtt))
|
|
|
|
return PTR_ERR(ppgtt);
|
|
|
|
|
|
|
|
if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
|
|
|
|
err = -ENODEV;
|
|
|
|
goto err_ppgtt;
|
|
|
|
}
|
|
|
|
|
2020-07-29 17:42:17 +01:00
|
|
|
err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
|
|
|
|
if (err)
|
|
|
|
goto err_ppgtt;
|
|
|
|
|
2021-03-23 16:50:29 +01:00
|
|
|
i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
|
2021-04-27 09:54:13 +01:00
|
|
|
err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
|
2021-03-23 16:50:29 +01:00
|
|
|
i915_gem_object_unlock(ppgtt->vm.scratch[0]);
|
2020-07-29 17:42:18 +01:00
|
|
|
if (err)
|
|
|
|
goto err_stash;
|
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
/*
|
|
|
|
* Note we only pre-allocate as far as the end of the global
|
|
|
|
* GTT. On 48b / 4-level page-tables, the difference is very,
|
|
|
|
* very significant! We have to preallocate as GVT/vgpu does
|
|
|
|
* not like the page directory disappearing.
|
|
|
|
*/
|
2020-07-29 17:42:17 +01:00
|
|
|
ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
ggtt->alias = ppgtt;
|
|
|
|
ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
|
|
|
|
|
2022-03-30 16:48:08 -07:00
|
|
|
GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
|
2020-01-07 13:40:09 +00:00
|
|
|
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
|
|
|
|
|
2022-03-30 16:48:08 -07:00
|
|
|
GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
|
2020-01-07 13:40:09 +00:00
|
|
|
ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
|
|
|
|
|
2020-07-29 17:42:17 +01:00
|
|
|
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
|
2020-01-07 13:40:09 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-07-29 17:42:18 +01:00
|
|
|
err_stash:
|
|
|
|
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
|
2020-01-07 13:40:09 +00:00
|
|
|
err_ppgtt:
|
|
|
|
i915_vm_put(&ppgtt->vm);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
struct i915_ppgtt *ppgtt;
|
|
|
|
|
|
|
|
ppgtt = fetch_and_zero(&ggtt->alias);
|
|
|
|
if (!ppgtt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
i915_vm_put(&ppgtt->vm);
|
|
|
|
|
2022-03-30 16:48:08 -07:00
|
|
|
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
|
|
|
|
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int i915_init_ggtt(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2021-12-21 21:59:46 +02:00
|
|
|
ret = init_ggtt(to_gt(i915)->ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
|
2021-12-21 21:59:46 +02:00
|
|
|
ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
if (ret)
|
2021-12-21 21:59:46 +02:00
|
|
|
cleanup_init_ggtt(to_gt(i915)->ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
struct i915_vma *vma, *vn;
|
|
|
|
|
|
|
|
flush_workqueue(ggtt->vm.i915->wq);
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_gem_drain_freed_objects(ggtt->vm.i915);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
mutex_lock(&ggtt->vm.mutex);
|
|
|
|
|
2022-03-04 09:26:39 +01:00
|
|
|
ggtt->vm.skip_pte_rewrite = true;
|
|
|
|
|
2022-01-14 14:23:18 +01:00
|
|
|
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
|
|
|
bool trylock;
|
|
|
|
|
|
|
|
trylock = i915_gem_object_trylock(obj, NULL);
|
|
|
|
WARN_ON(!trylock);
|
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
WARN_ON(__i915_vma_unbind(vma));
|
2022-01-14 14:23:18 +01:00
|
|
|
if (trylock)
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
}
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
if (drm_mm_node_allocated(&ggtt->error_capture))
|
|
|
|
drm_mm_remove_node(&ggtt->error_capture);
|
2020-01-10 12:30:56 +00:00
|
|
|
mutex_destroy(&ggtt->error_mutex);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
ggtt_release_guc_top(ggtt);
|
|
|
|
intel_vgt_deballoon(ggtt);
|
|
|
|
|
|
|
|
ggtt->vm.cleanup(&ggtt->vm);
|
|
|
|
|
|
|
|
mutex_unlock(&ggtt->vm.mutex);
|
|
|
|
i915_address_space_fini(&ggtt->vm);
|
|
|
|
|
|
|
|
arch_phys_wc_del(ggtt->mtrr);
|
|
|
|
|
|
|
|
if (ggtt->iomap.size)
|
|
|
|
io_mapping_fini(&ggtt->iomap);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_ggtt_driver_release - Clean up GGTT hardware initialization
|
|
|
|
* @i915: i915 device
|
|
|
|
*/
|
|
|
|
void i915_ggtt_driver_release(struct drm_i915_private *i915)
|
|
|
|
{
|
2021-12-21 21:59:46 +02:00
|
|
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2020-03-16 11:38:46 +00:00
|
|
|
fini_aliasing_ppgtt(ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2020-03-16 11:38:46 +00:00
|
|
|
intel_ggtt_fini_fences(ggtt);
|
|
|
|
ggtt_cleanup_hw(ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2021-06-01 09:46:41 +02:00
|
|
|
/**
|
|
|
|
* i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
|
|
|
|
* all free objects have been drained.
|
|
|
|
* @i915: i915 device
|
|
|
|
*/
|
|
|
|
void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
|
|
|
|
{
|
2021-12-21 21:59:46 +02:00
|
|
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
2021-06-01 09:46:41 +02:00
|
|
|
|
|
|
|
GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
|
|
|
|
dma_resv_fini(&ggtt->vm._resv);
|
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
|
|
|
|
{
|
|
|
|
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
|
|
|
|
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
|
|
|
|
return snb_gmch_ctl << 20;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
|
|
|
|
{
|
|
|
|
bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
|
|
|
|
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
|
|
|
|
if (bdw_gmch_ctl)
|
|
|
|
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
|
|
|
|
if (bdw_gmch_ctl > 4)
|
|
|
|
bdw_gmch_ctl = 4;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return bdw_gmch_ctl << 20;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
|
|
|
|
{
|
|
|
|
gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
|
|
|
|
gmch_ctrl &= SNB_GMCH_GGMS_MASK;
|
|
|
|
|
|
|
|
if (gmch_ctrl)
|
|
|
|
return 1 << (20 + gmch_ctrl);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
|
|
|
|
* GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
|
|
|
|
*/
|
|
|
|
GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
|
|
|
|
return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
return gen6_gttmmadr_size(i915) / 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = ggtt->vm.i915;
|
2024-02-03 00:43:28 +02:00
|
|
|
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
|
2022-06-17 16:05:59 -07:00
|
|
|
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
|
|
|
phys_addr_t phys_addr;
|
|
|
|
u32 pte_flags;
|
|
|
|
int ret;
|
|
|
|
|
2022-10-05 22:56:46 +03:00
|
|
|
GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
|
2024-02-03 00:43:28 +02:00
|
|
|
|
|
|
|
if (i915_direct_stolen_access(i915)) {
|
|
|
|
drm_dbg(&i915->drm, "Using direct GSM access\n");
|
2024-02-03 00:43:30 +02:00
|
|
|
phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
|
2024-02-03 00:43:28 +02:00
|
|
|
} else {
|
|
|
|
phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
|
|
|
|
}
|
2022-06-17 16:05:59 -07:00
|
|
|
|
2023-10-18 11:38:15 +02:00
|
|
|
if (needs_wc_ggtt_mapping(i915))
|
2022-06-17 16:05:59 -07:00
|
|
|
ggtt->gsm = ioremap_wc(phys_addr, size);
|
2023-10-18 11:38:15 +02:00
|
|
|
else
|
|
|
|
ggtt->gsm = ioremap(phys_addr, size);
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
if (!ggtt->gsm) {
|
|
|
|
drm_err(&i915->drm, "Failed to map the ggtt page table\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
kref_init(&ggtt->vm.resv_ref);
|
|
|
|
ret = setup_scratch_page(&ggtt->vm);
|
|
|
|
if (ret) {
|
|
|
|
drm_err(&i915->drm, "Scratch setup failed\n");
|
|
|
|
/* iounmap will also get called at remove, but meh */
|
|
|
|
iounmap(ggtt->gsm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
pte_flags = 0;
|
|
|
|
if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
|
|
|
|
pte_flags |= PTE_LM;
|
|
|
|
|
|
|
|
ggtt->vm.scratch[0]->encode =
|
|
|
|
ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
i915_gem_get_pat_index(i915,
|
|
|
|
I915_CACHE_NONE),
|
|
|
|
pte_flags);
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen6_gmch_remove(struct i915_address_space *vm)
|
|
|
|
{
|
|
|
|
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
|
|
|
|
|
|
|
iounmap(ggtt->gsm);
|
|
|
|
free_scratch(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct resource pci_resource(struct pci_dev *pdev, int bar)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
2023-01-16 19:34:22 +02:00
|
|
|
return DEFINE_RES_MEM(pci_resource_start(pdev, bar),
|
|
|
|
pci_resource_len(pdev, bar));
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = ggtt->vm.i915;
|
|
|
|
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
|
|
|
unsigned int size;
|
|
|
|
u16 snb_gmch_ctl;
|
|
|
|
|
2022-10-05 18:41:59 +03:00
|
|
|
if (!HAS_LMEM(i915) && !HAS_LMEMBAR_SMEM_STOLEN(i915)) {
|
2022-10-05 22:56:46 +03:00
|
|
|
if (!i915_pci_resource_valid(pdev, GEN4_GMADR_BAR))
|
2022-08-05 17:59:59 +02:00
|
|
|
return -ENXIO;
|
|
|
|
|
2022-10-05 22:56:46 +03:00
|
|
|
ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR);
|
2022-06-17 16:05:59 -07:00
|
|
|
ggtt->mappable_end = resource_size(&ggtt->gmadr);
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
|
|
|
if (IS_CHERRYVIEW(i915))
|
|
|
|
size = chv_get_total_gtt_size(snb_gmch_ctl);
|
|
|
|
else
|
|
|
|
size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
|
|
|
|
|
|
|
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
|
|
|
|
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
|
|
|
|
ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
|
|
|
|
|
|
|
|
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
|
|
|
|
ggtt->vm.cleanup = gen6_gmch_remove;
|
|
|
|
ggtt->vm.insert_page = gen8_ggtt_insert_page;
|
|
|
|
ggtt->vm.clear_range = nop_clear_range;
|
2023-03-10 10:23:49 +01:00
|
|
|
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
|
2025-03-13 16:08:32 +02:00
|
|
|
ggtt->vm.read_entry = gen8_ggtt_read_entry;
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Serialize GTT updates with aperture access on BXT if VT-d is on,
|
|
|
|
* and always on CHV.
|
|
|
|
*/
|
|
|
|
if (intel_vm_no_concurrent_access_wa(i915)) {
|
|
|
|
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
|
|
|
|
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
|
2022-06-24 13:08:21 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Calling stop_machine() version of GGTT update function
|
|
|
|
* at error capture/reset path will raise lockdep warning.
|
|
|
|
* Allow calling gen8_ggtt_insert_* directly at reset path
|
|
|
|
* which is safe from parallel GGTT updates.
|
|
|
|
*/
|
|
|
|
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
|
|
|
|
ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
ggtt->vm.bind_async_flags =
|
|
|
|
I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
|
|
|
|
}
|
|
|
|
|
2023-09-26 10:37:40 +02:00
|
|
|
if (i915_ggtt_require_binder(i915)) {
|
|
|
|
ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind;
|
|
|
|
ggtt->vm.insert_page = gen8_ggtt_insert_page_bind;
|
|
|
|
ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind;
|
|
|
|
/*
|
|
|
|
* On GPU is hung, we might bind VMAs for error capture.
|
|
|
|
* Fallback to CPU GGTT updates in that case.
|
|
|
|
*/
|
|
|
|
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
|
|
|
|
}
|
|
|
|
|
2023-10-17 11:08:02 -07:00
|
|
|
if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
|
2022-11-10 09:58:23 -08:00
|
|
|
ggtt->invalidate = guc_ggtt_invalidate;
|
|
|
|
else
|
|
|
|
ggtt->invalidate = gen8_ggtt_invalidate;
|
2022-06-17 16:05:59 -07:00
|
|
|
|
|
|
|
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
|
|
|
|
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
|
|
|
|
|
2023-04-24 11:29:01 -07:00
|
|
|
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
|
|
|
|
ggtt->vm.pte_encode = mtl_ggtt_pte_encode;
|
|
|
|
else
|
|
|
|
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
|
2022-06-17 16:05:59 -07:00
|
|
|
|
2025-03-13 16:08:32 +02:00
|
|
|
ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
return ggtt_probe_common(ggtt, size);
|
|
|
|
}
|
|
|
|
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
/*
|
|
|
|
* For pre-gen8 platforms pat_index is the same as enum i915_cache_level,
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
* so the switch-case statements in these PTE encode functions are still valid.
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
* See translation table LEGACY_CACHELEVEL.
|
|
|
|
*/
|
2022-06-17 16:05:59 -07:00
|
|
|
static u64 snb_pte_encode(dma_addr_t addr,
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
|
|
|
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
switch (pat_index) {
|
2022-06-17 16:05:59 -07:00
|
|
|
case I915_CACHE_L3_LLC:
|
|
|
|
case I915_CACHE_LLC:
|
|
|
|
pte |= GEN6_PTE_CACHE_LLC;
|
|
|
|
break;
|
|
|
|
case I915_CACHE_NONE:
|
|
|
|
pte |= GEN6_PTE_UNCACHED;
|
|
|
|
break;
|
|
|
|
default:
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
MISSING_CASE(pat_index);
|
2022-06-17 16:05:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 ivb_pte_encode(dma_addr_t addr,
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
|
|
|
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
switch (pat_index) {
|
2022-06-17 16:05:59 -07:00
|
|
|
case I915_CACHE_L3_LLC:
|
|
|
|
pte |= GEN7_PTE_CACHE_L3_LLC;
|
|
|
|
break;
|
|
|
|
case I915_CACHE_LLC:
|
|
|
|
pte |= GEN6_PTE_CACHE_LLC;
|
|
|
|
break;
|
|
|
|
case I915_CACHE_NONE:
|
|
|
|
pte |= GEN6_PTE_UNCACHED;
|
|
|
|
break;
|
|
|
|
default:
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
MISSING_CASE(pat_index);
|
2022-06-17 16:05:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 byt_pte_encode(dma_addr_t addr,
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
|
|
|
|
|
|
|
if (!(flags & PTE_READ_ONLY))
|
|
|
|
pte |= BYT_PTE_WRITEABLE;
|
|
|
|
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
if (pat_index != I915_CACHE_NONE)
|
2022-06-17 16:05:59 -07:00
|
|
|
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
|
|
|
|
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 hsw_pte_encode(dma_addr_t addr,
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
|
|
|
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
if (pat_index != I915_CACHE_NONE)
|
2022-06-17 16:05:59 -07:00
|
|
|
pte |= HSW_WB_LLC_AGE3;
|
|
|
|
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 iris_pte_encode(dma_addr_t addr,
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
unsigned int pat_index,
|
2022-06-17 16:05:59 -07:00
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
|
|
|
|
|
drm/i915/gt: Fix second parameter type of pre-gen8 pte_encode callbacks
When booting a kernel compiled with CONFIG_CFI_CLANG (kCFI), there is a
CFI failure in ggtt_probe_common() when trying to call hsw_pte_encode()
via an indirect call:
[ 5.030027] CFI failure at ggtt_probe_common+0xd1/0x130 [i915] (target: hsw_pte_encode+0x0/0x30 [i915]; expected type: 0xf5c1d0fc)
With kCFI, indirect calls are validated against their expected type
versus actual type and failures occur when the two types do not match.
clang's -Wincompatible-function-pointer-types-strict can catch this at
compile time but it is not enabled for the kernel yet:
drivers/gpu/drm/i915/gt/intel_ggtt.c:1155:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = iris_pte_encode;
^ ~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1157:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = hsw_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1159:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = byt_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1161:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = ivb_pte_encode;
^ ~~~~~~~~~~~~~~
drivers/gpu/drm/i915/gt/intel_ggtt.c:1163:23: error: incompatible function pointer types assigning to 'u64 (*)(dma_addr_t, unsigned int, u32)' (aka 'unsigned long long (*)(unsigned int, unsigned int, unsigned int)') from 'u64 (dma_addr_t,
enum i915_cache_level, u32)' (aka 'unsigned long long (unsigned int, enum i915_cache_level, unsigned int)') [-Werror,-Wincompatible-function-pointer-types-strict]
ggtt->vm.pte_encode = snb_pte_encode;
^ ~~~~~~~~~~~~~~
5 errors generated.
In this case, the pre-gen8 pte_encode functions have a second parameter
type of 'enum i915_cache_level' whereas the function pointer prototype
in 'struct i915_address_space' expects a second parameter type of
'unsigned int'.
Update the second parameter of the callbacks and the comment above them
noting that these statements are still valid, which matches other
functions and files, to clear up the kCFI failures at run time.
Fixes: 9275277d5324 ("drm/i915: use pat_index instead of cache_level")
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Fei Yang <fei.yang@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230530-i915-gt-cache_level-wincompatible-function-pointer-types-strict-v1-1-54501d598229@kernel.org
2023-05-30 11:24:38 -07:00
|
|
|
switch (pat_index) {
|
2022-06-17 16:05:59 -07:00
|
|
|
case I915_CACHE_NONE:
|
|
|
|
break;
|
|
|
|
case I915_CACHE_WT:
|
|
|
|
pte |= HSW_WT_ELLC_LLC_AGE3;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pte |= HSW_WB_ELLC_LLC_AGE3;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
2025-03-13 16:08:32 +02:00
|
|
|
static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local)
|
|
|
|
{
|
|
|
|
*is_present = pte & GEN6_PTE_VALID;
|
|
|
|
*is_local = false;
|
|
|
|
|
|
|
|
return ((pte & 0xff0) << 28) | (pte & ~0xfff);
|
|
|
|
}
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = ggtt->vm.i915;
|
|
|
|
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
|
|
|
unsigned int size;
|
|
|
|
u16 snb_gmch_ctl;
|
|
|
|
|
2022-10-05 22:56:46 +03:00
|
|
|
if (!i915_pci_resource_valid(pdev, GEN4_GMADR_BAR))
|
2022-08-05 17:59:59 +02:00
|
|
|
return -ENXIO;
|
|
|
|
|
2022-10-05 22:56:46 +03:00
|
|
|
ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR);
|
2022-06-17 16:05:59 -07:00
|
|
|
ggtt->mappable_end = resource_size(&ggtt->gmadr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 64/512MB is the current min/max we actually know of, but this is
|
|
|
|
* just a coarse sanity check.
|
|
|
|
*/
|
|
|
|
if (ggtt->mappable_end < (64 << 20) ||
|
|
|
|
ggtt->mappable_end > (512 << 20)) {
|
|
|
|
drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
|
|
|
|
&ggtt->mappable_end);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
|
|
|
|
|
|
|
size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
|
|
|
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
|
|
|
|
|
|
|
|
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
|
|
|
|
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
|
|
|
|
|
|
|
|
ggtt->vm.clear_range = nop_clear_range;
|
drm/i915: Refine VT-d scanout workaround
VT-d may cause overfetch of the scanout PTE, both before and after the
vma (depending on the scanout orientation). bspec recommends that we
provide a tile-row in either directions, and suggests using 168 PTE,
warning that the accesses will wrap around the ends of the GGTT.
Currently, we fill the entire GGTT with scratch pages when using VT-d to
always ensure there are valid entries around every vma, including
scanout. However, writing every PTE is slow as on recent devices we
perform 8MiB of uncached writes, incurring an extra 100ms during resume.
If instead we focus on only putting guard pages around scanout, we can
avoid touching the whole GGTT. To avoid having to introduce extra nodes
around each scanout vma, we adjust the scanout drm_mm_node to be smaller
than the allocated space, and fixup the extra PTE during dma binding.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tejas Upadhyay <tejaskumarx.surendrakumar.upadhyay@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221130235805.221010-5-andi.shyti@linux.intel.com
2022-12-01 00:58:04 +01:00
|
|
|
if (!HAS_FULL_PPGTT(i915))
|
2022-06-17 16:05:59 -07:00
|
|
|
ggtt->vm.clear_range = gen6_ggtt_clear_range;
|
2023-03-10 10:23:49 +01:00
|
|
|
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
|
2022-06-17 16:05:59 -07:00
|
|
|
ggtt->vm.insert_page = gen6_ggtt_insert_page;
|
|
|
|
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
|
2025-03-13 16:08:32 +02:00
|
|
|
ggtt->vm.read_entry = gen6_ggtt_read_entry;
|
2022-06-17 16:05:59 -07:00
|
|
|
ggtt->vm.cleanup = gen6_gmch_remove;
|
|
|
|
|
|
|
|
ggtt->invalidate = gen6_ggtt_invalidate;
|
|
|
|
|
|
|
|
if (HAS_EDRAM(i915))
|
|
|
|
ggtt->vm.pte_encode = iris_pte_encode;
|
|
|
|
else if (IS_HASWELL(i915))
|
|
|
|
ggtt->vm.pte_encode = hsw_pte_encode;
|
|
|
|
else if (IS_VALLEYVIEW(i915))
|
|
|
|
ggtt->vm.pte_encode = byt_pte_encode;
|
|
|
|
else if (GRAPHICS_VER(i915) >= 7)
|
|
|
|
ggtt->vm.pte_encode = ivb_pte_encode;
|
|
|
|
else
|
|
|
|
ggtt->vm.pte_encode = snb_pte_encode;
|
|
|
|
|
2025-03-13 16:08:32 +02:00
|
|
|
ggtt->vm.pte_decode = gen6_pte_decode;
|
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
|
|
|
|
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
|
|
|
|
|
|
|
|
return ggtt_probe_common(ggtt, size);
|
|
|
|
}
|
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = gt->i915;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ggtt->vm.gt = gt;
|
|
|
|
ggtt->vm.i915 = i915;
|
2021-01-28 14:31:24 +01:00
|
|
|
ggtt->vm.dma = i915->drm.dev;
|
2021-06-01 09:46:41 +02:00
|
|
|
dma_resv_init(&ggtt->vm._resv);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
if (GRAPHICS_VER(i915) >= 8)
|
|
|
|
ret = gen8_gmch_probe(ggtt);
|
|
|
|
else if (GRAPHICS_VER(i915) >= 6)
|
|
|
|
ret = gen6_gmch_probe(ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
else
|
2022-06-17 16:05:59 -07:00
|
|
|
ret = intel_ggtt_gmch_probe(ggtt);
|
|
|
|
|
2021-03-23 16:50:29 +01:00
|
|
|
if (ret) {
|
2021-06-01 09:46:41 +02:00
|
|
|
dma_resv_fini(&ggtt->vm._resv);
|
2020-01-07 13:40:09 +00:00
|
|
|
return ret;
|
2021-03-23 16:50:29 +01:00
|
|
|
}
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
if ((ggtt->vm.total - 1) >> 32) {
|
drm/i915/ggtt: convert to drm_device based logging macros.
Converts various instances of the printk based drm logging macros to use
the struct drm_device based logging macros in i915/gt/intel_ggtt.c.
This change was done using the following coccinelle script that matches
based on the existence of a drm_i915_private device:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
New checkpatch warnings were fixed manually.
Note that this converts DRM_DEBUG_DRIVER to drm_dbg()
References: https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200314183344.17603-2-wambui.karugax@gmail.com
2020-03-14 21:33:38 +03:00
|
|
|
drm_err(&i915->drm,
|
|
|
|
"We never expected a Global GTT with more than 32bits"
|
|
|
|
" of address space! Found %lldM!\n",
|
|
|
|
ggtt->vm.total >> 20);
|
2020-01-07 13:40:09 +00:00
|
|
|
ggtt->vm.total = 1ULL << 32;
|
|
|
|
ggtt->mappable_end =
|
|
|
|
min_t(u64, ggtt->mappable_end, ggtt->vm.total);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ggtt->mappable_end > ggtt->vm.total) {
|
drm/i915/ggtt: convert to drm_device based logging macros.
Converts various instances of the printk based drm logging macros to use
the struct drm_device based logging macros in i915/gt/intel_ggtt.c.
This change was done using the following coccinelle script that matches
based on the existence of a drm_i915_private device:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
New checkpatch warnings were fixed manually.
Note that this converts DRM_DEBUG_DRIVER to drm_dbg()
References: https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200314183344.17603-2-wambui.karugax@gmail.com
2020-03-14 21:33:38 +03:00
|
|
|
drm_err(&i915->drm,
|
|
|
|
"mappable aperture extends past end of GGTT,"
|
|
|
|
" aperture=%pa, total=%llx\n",
|
|
|
|
&ggtt->mappable_end, ggtt->vm.total);
|
2020-01-07 13:40:09 +00:00
|
|
|
ggtt->mappable_end = ggtt->vm.total;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* GMADR is the PCI mmio aperture into the global GTT. */
|
drm/i915/ggtt: convert to drm_device based logging macros.
Converts various instances of the printk based drm logging macros to use
the struct drm_device based logging macros in i915/gt/intel_ggtt.c.
This change was done using the following coccinelle script that matches
based on the existence of a drm_i915_private device:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
New checkpatch warnings were fixed manually.
Note that this converts DRM_DEBUG_DRIVER to drm_dbg()
References: https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200314183344.17603-2-wambui.karugax@gmail.com
2020-03-14 21:33:38 +03:00
|
|
|
drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
|
|
|
|
drm_dbg(&i915->drm, "GMADR size = %lluM\n",
|
|
|
|
(u64)ggtt->mappable_end >> 20);
|
|
|
|
drm_dbg(&i915->drm, "DSM size = %lluM\n",
|
|
|
|
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_ggtt_probe_hw - Probe GGTT hardware location
|
|
|
|
* @i915: i915 device
|
|
|
|
*/
|
|
|
|
int i915_ggtt_probe_hw(struct drm_i915_private *i915)
|
|
|
|
{
|
2022-11-22 12:31:26 +05:30
|
|
|
struct intel_gt *gt;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
for_each_gt(gt, i915, i) {
|
|
|
|
ret = intel_gt_assign_ggtt(gt);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2021-12-21 21:59:46 +02:00
|
|
|
ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
|
2020-01-07 13:40:09 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2022-03-29 10:02:04 +01:00
|
|
|
if (i915_vtd_active(i915))
|
2020-04-02 14:48:18 +03:00
|
|
|
drm_info(&i915->drm, "VT-d active for gfx access\n");
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-22 12:31:26 +05:30
|
|
|
struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
2022-11-22 12:31:26 +05:30
|
|
|
struct i915_ggtt *ggtt;
|
2022-06-17 16:05:59 -07:00
|
|
|
|
2022-11-22 12:31:26 +05:30
|
|
|
ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL);
|
|
|
|
if (!ggtt)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2022-11-22 12:31:26 +05:30
|
|
|
INIT_LIST_HEAD(&ggtt->gt_list);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2022-11-22 12:31:26 +05:30
|
|
|
return ggtt;
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
|
|
|
|
{
|
2022-06-17 16:05:59 -07:00
|
|
|
if (GRAPHICS_VER(i915) < 6)
|
|
|
|
return intel_ggtt_gmch_enable_hw(i915);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2022-06-17 16:05:59 -07:00
|
|
|
return 0;
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2021-11-01 20:35:50 +02:00
|
|
|
/**
|
|
|
|
* i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
|
|
|
|
* @vm: The VM to restore the mappings for
|
2024-11-27 08:11:16 +02:00
|
|
|
* @all_evicted: Were all VMAs expected to be evicted on suspend?
|
2021-11-01 20:35:50 +02:00
|
|
|
*
|
|
|
|
* Restore the memory mappings for all objects mapped to HW via the GGTT or a
|
|
|
|
* DPT page table.
|
|
|
|
*
|
|
|
|
* Returns %true if restoring the mapping for any object that was in a write
|
|
|
|
* domain before suspend.
|
|
|
|
*/
|
2024-11-27 08:11:16 +02:00
|
|
|
bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted)
|
2020-01-07 13:40:09 +00:00
|
|
|
{
|
2020-01-10 11:04:00 +00:00
|
|
|
struct i915_vma *vma;
|
2021-11-01 20:35:50 +02:00
|
|
|
bool write_domain_objs = false;
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2021-11-01 20:35:50 +02:00
|
|
|
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2024-11-27 08:11:16 +02:00
|
|
|
if (all_evicted) {
|
|
|
|
drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-12-01 00:58:05 +01:00
|
|
|
/* First fill our portion of the GTT with scratch pages */
|
|
|
|
vm->clear_range(vm, 0, vm->total);
|
2020-01-07 13:40:09 +00:00
|
|
|
|
|
|
|
/* clflush objects bound into the GGTT and rebind them. */
|
2021-11-01 20:35:50 +02:00
|
|
|
list_for_each_entry(vma, &vm->bound_list, vm_link) {
|
2020-01-07 13:40:09 +00:00
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
2020-07-29 17:42:17 +01:00
|
|
|
unsigned int was_bound =
|
|
|
|
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
|
2020-01-07 13:40:09 +00:00
|
|
|
|
2020-07-29 17:42:17 +01:00
|
|
|
GEM_BUG_ON(!was_bound);
|
2022-12-01 00:58:05 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the bound flags of the vma resource to allow
|
|
|
|
* ptes to be repopulated.
|
|
|
|
*/
|
|
|
|
vma->resource->bound_flags = 0;
|
|
|
|
vma->ops->bind_vma(vm, NULL, vma->resource,
|
drm/i915: use pat_index instead of cache_level
Currently the KMD is using enum i915_cache_level to set caching policy for
buffer objects. This is flaky because the PAT index which really controls
the caching behavior in PTE has far more levels than what's defined in the
enum. In addition, the PAT index is platform dependent, having to translate
between i915_cache_level and PAT index is not reliable, and makes the code
more complicated.
From UMD's perspective there is also a necessity to set caching policy for
performance fine tuning. It's much easier for the UMD to directly use PAT
index because the behavior of each PAT index is clearly defined in Bspec.
Having the abstracted i915_cache_level sitting in between would only cause
more ambiguity. PAT is expected to work much like MOCS already works today,
and by design userspace is expected to select the index that exactly
matches the desired behavior described in the hardware specification.
For these reasons this patch replaces i915_cache_level with PAT index. Also
note, the cache_level is not completely removed yet, because the KMD still
has the need of creating buffer objects with simple cache settings such as
cached, uncached, or writethrough. For kernel objects, cache_level is used
for simplicity and backward compatibility. For Pre-gen12 platforms PAT can
have 1:1 mapping to i915_cache_level, so these two are interchangeable. see
the use of LEGACY_CACHELEVEL.
One consequence of this change is that gen8_pte_encode is no longer working
for gen12 platforms due to the fact that gen12 platforms has different PAT
definitions. In the meantime the mtl_pte_encode introduced specfically for
MTL becomes generic for all gen12 platforms. This patch renames the MTL
PTE encode function into gen12_pte_encode and apply it to all gen12. Even
though this change looks unrelated, but separating them would temporarily
break gen12 PTE encoding, thus squash them in one patch.
Special note: this patch changes the way caching behavior is controlled in
the sense that some objects are left to be managed by userspace. For such
objects we need to be careful not to change the userspace settings.There
are kerneldoc and comments added around obj->cache_coherent, cache_dirty,
and how to bypass the checkings by i915_gem_object_has_cache_level. For
full understanding, these changes need to be looked at together with the
two follow-up patches, one disables the {set|get}_caching ioctl's and the
other adds set_pat extension to the GEM_CREATE uAPI.
Bspec: 63019
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Fei Yang <fei.yang@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
2023-05-09 09:52:00 -07:00
|
|
|
obj ? obj->pat_index :
|
|
|
|
i915_gem_get_pat_index(vm->i915,
|
|
|
|
I915_CACHE_NONE),
|
2022-12-01 00:58:05 +01:00
|
|
|
was_bound);
|
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
if (obj) { /* only used during resume => exclusive access */
|
2021-11-01 20:35:50 +02:00
|
|
|
write_domain_objs |= fetch_and_zero(&obj->write_domain);
|
2020-01-07 13:40:09 +00:00
|
|
|
obj->read_domains |= I915_GEM_DOMAIN_GTT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-01 20:35:50 +02:00
|
|
|
return write_domain_objs;
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_ggtt_resume(struct i915_ggtt *ggtt)
|
|
|
|
{
|
2022-11-22 12:31:26 +05:30
|
|
|
struct intel_gt *gt;
|
2021-11-01 20:35:50 +02:00
|
|
|
bool flush;
|
|
|
|
|
2022-11-22 12:31:26 +05:30
|
|
|
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
|
|
|
|
intel_gt_check_and_clear_faults(gt);
|
2021-11-01 20:35:50 +02:00
|
|
|
|
2024-11-27 08:11:16 +02:00
|
|
|
flush = i915_ggtt_resume_vm(&ggtt->vm, false);
|
2021-11-01 20:35:50 +02:00
|
|
|
|
2023-03-10 10:23:50 +01:00
|
|
|
if (drm_mm_node_allocated(&ggtt->error_capture))
|
|
|
|
ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
|
|
|
|
ggtt->error_capture.size);
|
|
|
|
|
2023-05-31 16:54:09 -07:00
|
|
|
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
|
|
|
|
intel_uc_resume_mappings(>->uc);
|
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
ggtt->invalidate(ggtt);
|
|
|
|
|
|
|
|
if (flush)
|
|
|
|
wbinvd_on_all_cpus();
|
|
|
|
|
2020-03-16 11:38:44 +00:00
|
|
|
intel_ggtt_restore_fences(ggtt);
|
2020-01-07 13:40:09 +00:00
|
|
|
}
|