mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

The BIOS FB takeover code wants to read out the PTEs (or at least one of them) to figure out where the FB is located in memory. Currently we only do that for systems with LMEMBAR, and we've open coded the PTE decoding in the display code. Introduce a more proper abstract interface (intel_ggtt_read_entry()) for this purpose, and implement it for all platforms. Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250313140838.29742-5-ville.syrjala@linux.intel.com Reviewed-by: Jouni Högander <jouni.hogander@intel.com>
139 lines
3.3 KiB
C
139 lines
3.3 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#include "intel_ggtt_gmch.h"
|
|
|
|
#include <drm/intel/intel-gtt.h>
|
|
|
|
#include <linux/agp_backend.h>
|
|
|
|
#include "i915_drv.h"
|
|
#include "i915_utils.h"
|
|
#include "intel_gtt.h"
|
|
#include "intel_gt_regs.h"
|
|
#include "intel_gt.h"
|
|
|
|
static void gmch_ggtt_insert_page(struct i915_address_space *vm,
|
|
dma_addr_t addr,
|
|
u64 offset,
|
|
unsigned int pat_index,
|
|
u32 unused)
|
|
{
|
|
unsigned int flags = (pat_index == I915_CACHE_NONE) ?
|
|
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
|
|
|
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
|
|
}
|
|
|
|
static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm,
|
|
u64 offset, bool *is_present, bool *is_local)
|
|
{
|
|
return intel_gmch_gtt_read_entry(offset >> PAGE_SHIFT,
|
|
is_present, is_local);
|
|
}
|
|
|
|
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
|
|
struct i915_vma_resource *vma_res,
|
|
unsigned int pat_index,
|
|
u32 unused)
|
|
{
|
|
unsigned int flags = (pat_index == I915_CACHE_NONE) ?
|
|
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
|
|
|
intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
|
|
flags);
|
|
}
|
|
|
|
static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
|
|
{
|
|
intel_gmch_gtt_flush();
|
|
}
|
|
|
|
static void gmch_ggtt_clear_range(struct i915_address_space *vm,
|
|
u64 start, u64 length)
|
|
{
|
|
intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
|
|
}
|
|
|
|
static void gmch_ggtt_remove(struct i915_address_space *vm)
|
|
{
|
|
intel_gmch_remove();
|
|
}
|
|
|
|
/*
|
|
* Certain Gen5 chipsets require idling the GPU before unmapping anything from
|
|
* the GTT when VT-d is enabled.
|
|
*/
|
|
static bool needs_idle_maps(struct drm_i915_private *i915)
|
|
{
|
|
/*
|
|
* Query intel_iommu to see if we need the workaround. Presumably that
|
|
* was loaded first.
|
|
*/
|
|
if (!i915_vtd_active(i915))
|
|
return false;
|
|
|
|
if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
|
|
{
|
|
struct drm_i915_private *i915 = ggtt->vm.i915;
|
|
phys_addr_t gmadr_base;
|
|
int ret;
|
|
|
|
ret = intel_gmch_probe(i915->gmch.pdev, to_pci_dev(i915->drm.dev), NULL);
|
|
if (!ret) {
|
|
drm_err(&i915->drm, "failed to set up gmch\n");
|
|
return -EIO;
|
|
}
|
|
|
|
intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
|
|
|
|
ggtt->gmadr = DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
|
|
|
|
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
|
|
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
|
|
|
|
if (needs_idle_maps(i915)) {
|
|
drm_notice(&i915->drm,
|
|
"Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
|
|
ggtt->do_idle_maps = true;
|
|
}
|
|
|
|
ggtt->vm.insert_page = gmch_ggtt_insert_page;
|
|
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
|
|
ggtt->vm.clear_range = gmch_ggtt_clear_range;
|
|
ggtt->vm.scratch_range = gmch_ggtt_clear_range;
|
|
ggtt->vm.read_entry = gmch_ggtt_read_entry;
|
|
ggtt->vm.cleanup = gmch_ggtt_remove;
|
|
|
|
ggtt->invalidate = gmch_ggtt_invalidate;
|
|
|
|
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
|
|
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
|
|
|
|
if (unlikely(ggtt->do_idle_maps))
|
|
drm_notice(&i915->drm,
|
|
"Applying Ironlake quirks for intel_iommu\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
|
|
{
|
|
if (!intel_gmch_enable_gtt())
|
|
return -EIO;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void intel_ggtt_gmch_flush(void)
|
|
{
|
|
intel_gmch_gtt_flush();
|
|
}
|