mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

- The 4 patch series "mm: ksm: prevent KSM from breaking merging of new VMAs" from Lorenzo Stoakes addresses an issue with KSM's PR_SET_MEMORY_MERGE mode: newly mapped VMAs were not eligible for merging with existing adjacent VMAs. - The 4 patch series "mm/damon: introduce DAMON_STAT for simple and practical access monitoring" from SeongJae Park adds a new kernel module which simplifies the setup and usage of DAMON in production environments. - The 6 patch series "stop passing a writeback_control to swap/shmem writeout" from Christoph Hellwig is a cleanup to the writeback code which removes a couple of pointers from struct writeback_control. - The 7 patch series "drivers/base/node.c: optimization and cleanups" from Donet Tom contains largely uncorrelated cleanups to the NUMA node setup and management code. - The 4 patch series "mm: userfaultfd: assorted fixes and cleanups" from Tal Zussman does some maintenance work on the userfaultfd code. - The 5 patch series "Readahead tweaks for larger folios" from Ryan Roberts implements some tuneups for pagecache readahead when it is reading into order>0 folios. - The 4 patch series "selftests/mm: Tweaks to the cow test" from Mark Brown provides some cleanups and consistency improvements to the selftests code. - The 4 patch series "Optimize mremap() for large folios" from Dev Jain does that. A 37% reduction in execution time was measured in a memset+mremap+munmap microbenchmark. - The 5 patch series "Remove zero_user()" from Matthew Wilcox expunges zero_user() in favor of the more modern memzero_page(). - The 3 patch series "mm/huge_memory: vmf_insert_folio_*() and vmf_insert_pfn_pud() fixes" from David Hildenbrand addresses some warts which David noticed in the huge page code. These were not known to be causing any issues at this time. - The 3 patch series "mm/damon: use alloc_migrate_target() for DAMOS_MIGRATE_{HOT,COLD" from SeongJae Park provides some cleanup and consolidation work in DAMON. - The 3 patch series "use vm_flags_t consistently" from Lorenzo Stoakes uses vm_flags_t in places where we were inappropriately using other types. - The 3 patch series "mm/memfd: Reserve hugetlb folios before allocation" from Vivek Kasireddy increases the reliability of large page allocation in the memfd code. - The 14 patch series "mm: Remove pXX_devmap page table bit and pfn_t type" from Alistair Popple removes several now-unneeded PFN_* flags. - The 5 patch series "mm/damon: decouple sysfs from core" from SeongJae Park implememnts some cleanup and maintainability work in the DAMON sysfs layer. - The 5 patch series "madvise cleanup" from Lorenzo Stoakes does quite a lot of cleanup/maintenance work in the madvise() code. - The 4 patch series "madvise anon_name cleanups" from Vlastimil Babka provides additional cleanups on top or Lorenzo's effort. - The 11 patch series "Implement numa node notifier" from Oscar Salvador creates a standalone notifier for NUMA node memory state changes. Previously these were lumped under the more general memory on/offline notifier. - The 6 patch series "Make MIGRATE_ISOLATE a standalone bit" from Zi Yan cleans up the pageblock isolation code and fixes a potential issue which doesn't seem to cause any problems in practice. - The 5 patch series "selftests/damon: add python and drgn based DAMON sysfs functionality tests" from SeongJae Park adds additional drgn- and python-based DAMON selftests which are more comprehensive than the existing selftest suite. - The 5 patch series "Misc rework on hugetlb faulting path" from Oscar Salvador fixes a rather obscure deadlock in the hugetlb fault code and follows that fix with a series of cleanups. - The 3 patch series "cma: factor out allocation logic from __cma_declare_contiguous_nid" from Mike Rapoport rationalizes and cleans up the highmem-specific code in the CMA allocator. - The 28 patch series "mm/migration: rework movable_ops page migration (part 1)" from David Hildenbrand provides cleanups and future-preparedness to the migration code. - The 2 patch series "mm/damon: add trace events for auto-tuned monitoring intervals and DAMOS quota" from SeongJae Park adds some tracepoints to some DAMON auto-tuning code. - The 6 patch series "mm/damon: fix misc bugs in DAMON modules" from SeongJae Park does that. - The 6 patch series "mm/damon: misc cleanups" from SeongJae Park also does what it claims. - The 4 patch series "mm: folio_pte_batch() improvements" from David Hildenbrand cleans up the large folio PTE batching code. - The 13 patch series "mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions" from SeongJae Park facilitates dynamic alteration of DAMON's inter-node allocation policy. - The 3 patch series "Remove unmap_and_put_page()" from Vishal Moola provides a couple of page->folio conversions. - The 4 patch series "mm: per-node proactive reclaim" from Davidlohr Bueso implements a per-node control of proactive reclaim - beyond the current memcg-based implementation. - The 14 patch series "mm/damon: remove damon_callback" from SeongJae Park replaces the damon_callback interface with a more general and powerful damon_call()+damos_walk() interface. - The 10 patch series "mm/mremap: permit mremap() move of multiple VMAs" from Lorenzo Stoakes implements a number of mremap cleanups (of course) in preparation for adding new mremap() functionality: newly permit the remapping of multiple VMAs when the user is specifying MREMAP_FIXED. It still excludes some specialized situations where this cannot be performed reliably. - The 3 patch series "drop hugetlb_free_pgd_range()" from Anthony Yznaga switches some sparc hugetlb code over to the generic version and removes the thus-unneeded hugetlb_free_pgd_range(). - The 4 patch series "mm/damon/sysfs: support periodic and automated stats update" from SeongJae Park augments the present userspace-requested update of DAMON sysfs monitoring files. Automatic update is now provided, along with a tunable to control the update interval. - The 4 patch series "Some randome fixes and cleanups to swapfile" from Kemeng Shi does what is claims. - The 4 patch series "mm: introduce snapshot_page" from Luiz Capitulino and David Hildenbrand provides (and uses) a means by which debug-style functions can grab a copy of a pageframe and inspect it locklessly without tripping over the races inherent in operating on the live pageframe directly. - The 6 patch series "use per-vma locks for /proc/pid/maps reads" from Suren Baghdasaryan addresses the large contention issues which can be triggered by reads from that procfs file. Latencies are reduced by more than half in some situations. The series also introduces several new selftests for the /proc/pid/maps interface. - The 6 patch series "__folio_split() clean up" from Zi Yan cleans up __folio_split()! - The 7 patch series "Optimize mprotect() for large folios" from Dev Jain provides some quite large (>3x) speedups to mprotect() when dealing with large folios. - The 2 patch series "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup" from wang lian does some cleanup work in the selftests code. - The 3 patch series "tools/testing: expand mremap testing" from Lorenzo Stoakes extends the mremap() selftest in several ways, including adding more checking of Lorenzo's recently added "permit mremap() move of multiple VMAs" feature. - The 22 patch series "selftests/damon/sysfs.py: test all parameters" from SeongJae Park extends the DAMON sysfs interface selftest so that it tests all possible user-requested parameters. Rather than the present minimal subset. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaIqcCgAKCRDdBJ7gKXxA jkVBAQCCn9DR1QP0CRk961ot0cKzOgioSc0aA03DPb2KXRt2kQEAzDAz0ARurFhL 8BzbvI0c+4tntHLXvIlrC33n9KWAOQM= =XsFy -----END PGP SIGNATURE----- Merge tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull MM updates from Andrew Morton: "As usual, many cleanups. The below blurbiage describes 42 patchsets. 21 of those are partially or fully cleanup work. "cleans up", "cleanup", "maintainability", "rationalizes", etc. I never knew the MM code was so dirty. "mm: ksm: prevent KSM from breaking merging of new VMAs" (Lorenzo Stoakes) addresses an issue with KSM's PR_SET_MEMORY_MERGE mode: newly mapped VMAs were not eligible for merging with existing adjacent VMAs. "mm/damon: introduce DAMON_STAT for simple and practical access monitoring" (SeongJae Park) adds a new kernel module which simplifies the setup and usage of DAMON in production environments. "stop passing a writeback_control to swap/shmem writeout" (Christoph Hellwig) is a cleanup to the writeback code which removes a couple of pointers from struct writeback_control. "drivers/base/node.c: optimization and cleanups" (Donet Tom) contains largely uncorrelated cleanups to the NUMA node setup and management code. "mm: userfaultfd: assorted fixes and cleanups" (Tal Zussman) does some maintenance work on the userfaultfd code. "Readahead tweaks for larger folios" (Ryan Roberts) implements some tuneups for pagecache readahead when it is reading into order>0 folios. "selftests/mm: Tweaks to the cow test" (Mark Brown) provides some cleanups and consistency improvements to the selftests code. "Optimize mremap() for large folios" (Dev Jain) does that. A 37% reduction in execution time was measured in a memset+mremap+munmap microbenchmark. "Remove zero_user()" (Matthew Wilcox) expunges zero_user() in favor of the more modern memzero_page(). "mm/huge_memory: vmf_insert_folio_*() and vmf_insert_pfn_pud() fixes" (David Hildenbrand) addresses some warts which David noticed in the huge page code. These were not known to be causing any issues at this time. "mm/damon: use alloc_migrate_target() for DAMOS_MIGRATE_{HOT,COLD" (SeongJae Park) provides some cleanup and consolidation work in DAMON. "use vm_flags_t consistently" (Lorenzo Stoakes) uses vm_flags_t in places where we were inappropriately using other types. "mm/memfd: Reserve hugetlb folios before allocation" (Vivek Kasireddy) increases the reliability of large page allocation in the memfd code. "mm: Remove pXX_devmap page table bit and pfn_t type" (Alistair Popple) removes several now-unneeded PFN_* flags. "mm/damon: decouple sysfs from core" (SeongJae Park) implememnts some cleanup and maintainability work in the DAMON sysfs layer. "madvise cleanup" (Lorenzo Stoakes) does quite a lot of cleanup/maintenance work in the madvise() code. "madvise anon_name cleanups" (Vlastimil Babka) provides additional cleanups on top or Lorenzo's effort. "Implement numa node notifier" (Oscar Salvador) creates a standalone notifier for NUMA node memory state changes. Previously these were lumped under the more general memory on/offline notifier. "Make MIGRATE_ISOLATE a standalone bit" (Zi Yan) cleans up the pageblock isolation code and fixes a potential issue which doesn't seem to cause any problems in practice. "selftests/damon: add python and drgn based DAMON sysfs functionality tests" (SeongJae Park) adds additional drgn- and python-based DAMON selftests which are more comprehensive than the existing selftest suite. "Misc rework on hugetlb faulting path" (Oscar Salvador) fixes a rather obscure deadlock in the hugetlb fault code and follows that fix with a series of cleanups. "cma: factor out allocation logic from __cma_declare_contiguous_nid" (Mike Rapoport) rationalizes and cleans up the highmem-specific code in the CMA allocator. "mm/migration: rework movable_ops page migration (part 1)" (David Hildenbrand) provides cleanups and future-preparedness to the migration code. "mm/damon: add trace events for auto-tuned monitoring intervals and DAMOS quota" (SeongJae Park) adds some tracepoints to some DAMON auto-tuning code. "mm/damon: fix misc bugs in DAMON modules" (SeongJae Park) does that. "mm/damon: misc cleanups" (SeongJae Park) also does what it claims. "mm: folio_pte_batch() improvements" (David Hildenbrand) cleans up the large folio PTE batching code. "mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions" (SeongJae Park) facilitates dynamic alteration of DAMON's inter-node allocation policy. "Remove unmap_and_put_page()" (Vishal Moola) provides a couple of page->folio conversions. "mm: per-node proactive reclaim" (Davidlohr Bueso) implements a per-node control of proactive reclaim - beyond the current memcg-based implementation. "mm/damon: remove damon_callback" (SeongJae Park) replaces the damon_callback interface with a more general and powerful damon_call()+damos_walk() interface. "mm/mremap: permit mremap() move of multiple VMAs" (Lorenzo Stoakes) implements a number of mremap cleanups (of course) in preparation for adding new mremap() functionality: newly permit the remapping of multiple VMAs when the user is specifying MREMAP_FIXED. It still excludes some specialized situations where this cannot be performed reliably. "drop hugetlb_free_pgd_range()" (Anthony Yznaga) switches some sparc hugetlb code over to the generic version and removes the thus-unneeded hugetlb_free_pgd_range(). "mm/damon/sysfs: support periodic and automated stats update" (SeongJae Park) augments the present userspace-requested update of DAMON sysfs monitoring files. Automatic update is now provided, along with a tunable to control the update interval. "Some randome fixes and cleanups to swapfile" (Kemeng Shi) does what is claims. "mm: introduce snapshot_page" (Luiz Capitulino and David Hildenbrand) provides (and uses) a means by which debug-style functions can grab a copy of a pageframe and inspect it locklessly without tripping over the races inherent in operating on the live pageframe directly. "use per-vma locks for /proc/pid/maps reads" (Suren Baghdasaryan) addresses the large contention issues which can be triggered by reads from that procfs file. Latencies are reduced by more than half in some situations. The series also introduces several new selftests for the /proc/pid/maps interface. "__folio_split() clean up" (Zi Yan) cleans up __folio_split()! "Optimize mprotect() for large folios" (Dev Jain) provides some quite large (>3x) speedups to mprotect() when dealing with large folios. "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup" (wang lian) does some cleanup work in the selftests code. "tools/testing: expand mremap testing" (Lorenzo Stoakes) extends the mremap() selftest in several ways, including adding more checking of Lorenzo's recently added "permit mremap() move of multiple VMAs" feature. "selftests/damon/sysfs.py: test all parameters" (SeongJae Park) extends the DAMON sysfs interface selftest so that it tests all possible user-requested parameters. Rather than the present minimal subset" * tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (370 commits) MAINTAINERS: add missing headers to mempory policy & migration section MAINTAINERS: add missing file to cgroup section MAINTAINERS: add MM MISC section, add missing files to MISC and CORE MAINTAINERS: add missing zsmalloc file MAINTAINERS: add missing files to page alloc section MAINTAINERS: add missing shrinker files MAINTAINERS: move memremap.[ch] to hotplug section MAINTAINERS: add missing mm_slot.h file THP section MAINTAINERS: add missing interval_tree.c to memory mapping section MAINTAINERS: add missing percpu-internal.h file to per-cpu section mm/page_alloc: remove trace_mm_alloc_contig_migrate_range_info() selftests/damon: introduce _common.sh to host shared function selftests/damon/sysfs.py: test runtime reduction of DAMON parameters selftests/damon/sysfs.py: test non-default parameters runtime commit selftests/damon/sysfs.py: generalize DAMON context commit assertion selftests/damon/sysfs.py: generalize monitoring attributes commit assertion selftests/damon/sysfs.py: generalize DAMOS schemes commit assertion selftests/damon/sysfs.py: test DAMOS filters commitment selftests/damon/sysfs.py: generalize DAMOS scheme commit assertion selftests/damon/sysfs.py: test DAMOS destinations commitment ...
662 lines
17 KiB
C
662 lines
17 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2014-2016 Intel Corporation
|
|
*/
|
|
|
|
#include <linux/pagevec.h>
|
|
#include <linux/shmem_fs.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/uio.h>
|
|
|
|
#include <drm/drm_cache.h>
|
|
|
|
#include "gem/i915_gem_region.h"
|
|
#include "i915_drv.h"
|
|
#include "i915_gem_object.h"
|
|
#include "i915_gem_tiling.h"
|
|
#include "i915_gemfs.h"
|
|
#include "i915_scatterlist.h"
|
|
#include "i915_trace.h"
|
|
|
|
/*
|
|
* Move folios to appropriate lru and release the batch, decrementing the
|
|
* ref count of those folios.
|
|
*/
|
|
static void check_release_folio_batch(struct folio_batch *fbatch)
|
|
{
|
|
check_move_unevictable_folios(fbatch);
|
|
__folio_batch_release(fbatch);
|
|
cond_resched();
|
|
}
|
|
|
|
void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
|
|
bool dirty, bool backup)
|
|
{
|
|
struct sgt_iter sgt_iter;
|
|
struct folio_batch fbatch;
|
|
struct folio *last = NULL;
|
|
struct page *page;
|
|
|
|
mapping_clear_unevictable(mapping);
|
|
|
|
folio_batch_init(&fbatch);
|
|
for_each_sgt_page(page, sgt_iter, st) {
|
|
struct folio *folio = page_folio(page);
|
|
|
|
if (folio == last)
|
|
continue;
|
|
last = folio;
|
|
if (dirty)
|
|
folio_mark_dirty(folio);
|
|
if (backup)
|
|
folio_mark_accessed(folio);
|
|
|
|
if (!folio_batch_add(&fbatch, folio))
|
|
check_release_folio_batch(&fbatch);
|
|
}
|
|
if (fbatch.nr)
|
|
check_release_folio_batch(&fbatch);
|
|
|
|
sg_free_table(st);
|
|
}
|
|
|
|
int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
|
|
size_t size, struct intel_memory_region *mr,
|
|
struct address_space *mapping,
|
|
unsigned int max_segment)
|
|
{
|
|
unsigned int page_count; /* restricted by sg_alloc_table */
|
|
unsigned long i;
|
|
struct scatterlist *sg;
|
|
unsigned long next_pfn = 0; /* suppress gcc warning */
|
|
gfp_t noreclaim;
|
|
int ret;
|
|
|
|
if (overflows_type(size / PAGE_SIZE, page_count))
|
|
return -E2BIG;
|
|
|
|
page_count = size / PAGE_SIZE;
|
|
/*
|
|
* If there's no chance of allocating enough pages for the whole
|
|
* object, bail early.
|
|
*/
|
|
if (size > resource_size(&mr->region))
|
|
return -ENOMEM;
|
|
|
|
if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* Get the list of pages out of our struct file. They'll be pinned
|
|
* at this point until we release them.
|
|
*
|
|
* Fail silently without starting the shrinker
|
|
*/
|
|
mapping_set_unevictable(mapping);
|
|
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
|
|
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
|
|
|
|
sg = st->sgl;
|
|
st->nents = 0;
|
|
for (i = 0; i < page_count; i++) {
|
|
struct folio *folio;
|
|
unsigned long nr_pages;
|
|
const unsigned int shrink[] = {
|
|
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
|
|
0,
|
|
}, *s = shrink;
|
|
gfp_t gfp = noreclaim;
|
|
|
|
do {
|
|
cond_resched();
|
|
folio = shmem_read_folio_gfp(mapping, i, gfp);
|
|
if (!IS_ERR(folio))
|
|
break;
|
|
|
|
if (!*s) {
|
|
ret = PTR_ERR(folio);
|
|
goto err_sg;
|
|
}
|
|
|
|
i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
|
|
|
|
/*
|
|
* We've tried hard to allocate the memory by reaping
|
|
* our own buffer, now let the real VM do its job and
|
|
* go down in flames if truly OOM.
|
|
*
|
|
* However, since graphics tend to be disposable,
|
|
* defer the oom here by reporting the ENOMEM back
|
|
* to userspace.
|
|
*/
|
|
if (!*s) {
|
|
/* reclaim and warn, but no oom */
|
|
gfp = mapping_gfp_mask(mapping);
|
|
|
|
/*
|
|
* Our bo are always dirty and so we require
|
|
* kswapd to reclaim our pages (direct reclaim
|
|
* does not effectively begin pageout of our
|
|
* buffers on its own). However, direct reclaim
|
|
* only waits for kswapd when under allocation
|
|
* congestion. So as a result __GFP_RECLAIM is
|
|
* unreliable and fails to actually reclaim our
|
|
* dirty pages -- unless you try over and over
|
|
* again with !__GFP_NORETRY. However, we still
|
|
* want to fail this allocation rather than
|
|
* trigger the out-of-memory killer and for
|
|
* this we want __GFP_RETRY_MAYFAIL.
|
|
*/
|
|
gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
|
|
}
|
|
} while (1);
|
|
|
|
nr_pages = min_t(unsigned long,
|
|
folio_nr_pages(folio), page_count - i);
|
|
if (!i ||
|
|
sg->length >= max_segment ||
|
|
folio_pfn(folio) != next_pfn) {
|
|
if (i)
|
|
sg = sg_next(sg);
|
|
|
|
st->nents++;
|
|
sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
|
|
} else {
|
|
/* XXX: could overflow? */
|
|
sg->length += nr_pages * PAGE_SIZE;
|
|
}
|
|
next_pfn = folio_pfn(folio) + nr_pages;
|
|
i += nr_pages - 1;
|
|
|
|
/* Check that the i965g/gm workaround works. */
|
|
GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
|
|
}
|
|
if (sg) /* loop terminated early; short sg table */
|
|
sg_mark_end(sg);
|
|
|
|
/* Trim unused sg entries to avoid wasting memory. */
|
|
i915_sg_trim(st);
|
|
|
|
return 0;
|
|
err_sg:
|
|
sg_mark_end(sg);
|
|
if (sg != st->sgl) {
|
|
shmem_sg_free_table(st, mapping, false, false);
|
|
} else {
|
|
mapping_clear_unevictable(mapping);
|
|
sg_free_table(st);
|
|
}
|
|
|
|
/*
|
|
* shmemfs first checks if there is enough memory to allocate the page
|
|
* and reports ENOSPC should there be insufficient, along with the usual
|
|
* ENOMEM for a genuine allocation failure.
|
|
*
|
|
* We use ENOSPC in our driver to mean that we have run out of aperture
|
|
* space and so want to translate the error from shmemfs back to our
|
|
* usual understanding of ENOMEM.
|
|
*/
|
|
if (ret == -ENOSPC)
|
|
ret = -ENOMEM;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int shmem_get_pages(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
struct intel_memory_region *mem = obj->mm.region;
|
|
struct address_space *mapping = obj->base.filp->f_mapping;
|
|
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
|
|
struct sg_table *st;
|
|
int ret;
|
|
|
|
/*
|
|
* Assert that the object is not currently in any GPU domain. As it
|
|
* wasn't in the GTT, there shouldn't be any way it could have been in
|
|
* a GPU cache
|
|
*/
|
|
GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
|
|
GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
|
|
|
|
rebuild_st:
|
|
st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
|
|
if (!st)
|
|
return -ENOMEM;
|
|
|
|
ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
|
|
max_segment);
|
|
if (ret)
|
|
goto err_st;
|
|
|
|
ret = i915_gem_gtt_prepare_pages(obj, st);
|
|
if (ret) {
|
|
/*
|
|
* DMA remapping failed? One possible cause is that
|
|
* it could not reserve enough large entries, asking
|
|
* for PAGE_SIZE chunks instead may be helpful.
|
|
*/
|
|
if (max_segment > PAGE_SIZE) {
|
|
shmem_sg_free_table(st, mapping, false, false);
|
|
kfree(st);
|
|
|
|
max_segment = PAGE_SIZE;
|
|
goto rebuild_st;
|
|
} else {
|
|
dev_warn(i915->drm.dev,
|
|
"Failed to DMA remap %zu pages\n",
|
|
obj->base.size >> PAGE_SHIFT);
|
|
goto err_pages;
|
|
}
|
|
}
|
|
|
|
if (i915_gem_object_needs_bit17_swizzle(obj))
|
|
i915_gem_object_do_bit_17_swizzle(obj, st);
|
|
|
|
if (i915_gem_object_can_bypass_llc(obj))
|
|
obj->cache_dirty = true;
|
|
|
|
__i915_gem_object_set_pages(obj, st);
|
|
|
|
return 0;
|
|
|
|
err_pages:
|
|
shmem_sg_free_table(st, mapping, false, false);
|
|
/*
|
|
* shmemfs first checks if there is enough memory to allocate the page
|
|
* and reports ENOSPC should there be insufficient, along with the usual
|
|
* ENOMEM for a genuine allocation failure.
|
|
*
|
|
* We use ENOSPC in our driver to mean that we have run out of aperture
|
|
* space and so want to translate the error from shmemfs back to our
|
|
* usual understanding of ENOMEM.
|
|
*/
|
|
err_st:
|
|
if (ret == -ENOSPC)
|
|
ret = -ENOMEM;
|
|
|
|
kfree(st);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int
|
|
shmem_truncate(struct drm_i915_gem_object *obj)
|
|
{
|
|
/*
|
|
* Our goal here is to return as much of the memory as
|
|
* is possible back to the system as we are called from OOM.
|
|
* To do this we must instruct the shmfs to drop all of its
|
|
* backing pages, *now*.
|
|
*/
|
|
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
|
|
obj->mm.madv = __I915_MADV_PURGED;
|
|
obj->mm.pages = ERR_PTR(-EFAULT);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void __shmem_writeback(size_t size, struct address_space *mapping)
|
|
{
|
|
struct writeback_control wbc = {
|
|
.sync_mode = WB_SYNC_NONE,
|
|
.nr_to_write = SWAP_CLUSTER_MAX,
|
|
.range_start = 0,
|
|
.range_end = LLONG_MAX,
|
|
};
|
|
struct folio *folio = NULL;
|
|
int error = 0;
|
|
|
|
/*
|
|
* Leave mmapings intact (GTT will have been revoked on unbinding,
|
|
* leaving only CPU mmapings around) and add those folios to the LRU
|
|
* instead of invoking writeback so they are aged and paged out
|
|
* as normal.
|
|
*/
|
|
while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
|
|
if (folio_mapped(folio))
|
|
folio_redirty_for_writepage(&wbc, folio);
|
|
else
|
|
error = shmem_writeout(folio, NULL, NULL);
|
|
}
|
|
}
|
|
|
|
static void
|
|
shmem_writeback(struct drm_i915_gem_object *obj)
|
|
{
|
|
__shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
|
|
}
|
|
|
|
static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
|
|
{
|
|
switch (obj->mm.madv) {
|
|
case I915_MADV_DONTNEED:
|
|
return i915_gem_object_truncate(obj);
|
|
case __I915_MADV_PURGED:
|
|
return 0;
|
|
}
|
|
|
|
if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
|
|
shmem_writeback(obj);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
|
struct sg_table *pages,
|
|
bool needs_clflush)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
|
|
|
|
if (obj->mm.madv == I915_MADV_DONTNEED)
|
|
obj->mm.dirty = false;
|
|
|
|
if (needs_clflush &&
|
|
(obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
|
|
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
|
drm_clflush_sg(pages);
|
|
|
|
__start_cpu_write(obj);
|
|
/*
|
|
* On non-LLC igfx platforms, force the flush-on-acquire if this is ever
|
|
* swapped-in. Our async flush path is not trust worthy enough yet(and
|
|
* happens in the wrong order), and with some tricks it's conceivable
|
|
* for userspace to change the cache-level to I915_CACHE_NONE after the
|
|
* pages are swapped-in, and since execbuf binds the object before doing
|
|
* the async flush, we have a race window.
|
|
*/
|
|
if (!HAS_LLC(i915) && !IS_DGFX(i915))
|
|
obj->cache_dirty = true;
|
|
}
|
|
|
|
void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
|
|
{
|
|
__i915_gem_object_release_shmem(obj, pages, true);
|
|
|
|
i915_gem_gtt_finish_pages(obj, pages);
|
|
|
|
if (i915_gem_object_needs_bit17_swizzle(obj))
|
|
i915_gem_object_save_bit_17_swizzle(obj, pages);
|
|
|
|
shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
|
|
obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
|
|
kfree(pages);
|
|
obj->mm.dirty = false;
|
|
}
|
|
|
|
static void
|
|
shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
|
|
{
|
|
if (likely(i915_gem_object_has_struct_page(obj)))
|
|
i915_gem_object_put_pages_shmem(obj, pages);
|
|
else
|
|
i915_gem_object_put_pages_phys(obj, pages);
|
|
}
|
|
|
|
static int
|
|
shmem_pwrite(struct drm_i915_gem_object *obj,
|
|
const struct drm_i915_gem_pwrite *arg)
|
|
{
|
|
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
|
|
struct file *file = obj->base.filp;
|
|
struct kiocb kiocb;
|
|
struct iov_iter iter;
|
|
ssize_t written;
|
|
u64 size = arg->size;
|
|
|
|
/* Caller already validated user args */
|
|
GEM_BUG_ON(!access_ok(user_data, arg->size));
|
|
|
|
if (!i915_gem_object_has_struct_page(obj))
|
|
return i915_gem_object_pwrite_phys(obj, arg);
|
|
|
|
/*
|
|
* Before we instantiate/pin the backing store for our use, we
|
|
* can prepopulate the shmemfs filp efficiently using a write into
|
|
* the pagecache. We avoid the penalty of instantiating all the
|
|
* pages, important if the user is just writing to a few and never
|
|
* uses the object on the GPU, and using a direct write into shmemfs
|
|
* allows it to avoid the cost of retrieving a page (either swapin
|
|
* or clearing-before-use) before it is overwritten.
|
|
*/
|
|
if (i915_gem_object_has_pages(obj))
|
|
return -ENODEV;
|
|
|
|
if (obj->mm.madv != I915_MADV_WILLNEED)
|
|
return -EFAULT;
|
|
|
|
if (size > MAX_RW_COUNT)
|
|
return -EFBIG;
|
|
|
|
if (!file->f_op->write_iter)
|
|
return -EINVAL;
|
|
|
|
init_sync_kiocb(&kiocb, file);
|
|
kiocb.ki_pos = arg->offset;
|
|
iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)user_data, size);
|
|
|
|
written = file->f_op->write_iter(&kiocb, &iter);
|
|
BUG_ON(written == -EIOCBQUEUED);
|
|
|
|
if (written != size)
|
|
return -EIO;
|
|
|
|
if (written < 0)
|
|
return written;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
shmem_pread(struct drm_i915_gem_object *obj,
|
|
const struct drm_i915_gem_pread *arg)
|
|
{
|
|
if (!i915_gem_object_has_struct_page(obj))
|
|
return i915_gem_object_pread_phys(obj, arg);
|
|
|
|
return -ENODEV;
|
|
}
|
|
|
|
static void shmem_release(struct drm_i915_gem_object *obj)
|
|
{
|
|
if (i915_gem_object_has_struct_page(obj))
|
|
i915_gem_object_release_memory_region(obj);
|
|
|
|
fput(obj->base.filp);
|
|
}
|
|
|
|
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
|
|
.name = "i915_gem_object_shmem",
|
|
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
|
|
|
|
.get_pages = shmem_get_pages,
|
|
.put_pages = shmem_put_pages,
|
|
.truncate = shmem_truncate,
|
|
.shrink = shmem_shrink,
|
|
|
|
.pwrite = shmem_pwrite,
|
|
.pread = shmem_pread,
|
|
|
|
.release = shmem_release,
|
|
};
|
|
|
|
static int __create_shmem(struct drm_i915_private *i915,
|
|
struct drm_gem_object *obj,
|
|
resource_size_t size)
|
|
{
|
|
unsigned long flags = VM_NORESERVE;
|
|
struct file *filp;
|
|
|
|
drm_gem_private_object_init(&i915->drm, obj, size);
|
|
|
|
/* XXX: The __shmem_file_setup() function returns -EINVAL if size is
|
|
* greater than MAX_LFS_FILESIZE.
|
|
* To handle the same error as other code that returns -E2BIG when
|
|
* the size is too large, we add a code that returns -E2BIG when the
|
|
* size is larger than the size that can be handled.
|
|
* If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false,
|
|
* so we only needs to check when BITS_PER_LONG is 64.
|
|
* If BITS_PER_LONG is 32, E2BIG checks are processed when
|
|
* i915_gem_object_size_2big() is called before init_object() callback
|
|
* is called.
|
|
*/
|
|
if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
|
|
return -E2BIG;
|
|
|
|
if (i915->mm.gemfs)
|
|
filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
|
|
flags);
|
|
else
|
|
filp = shmem_file_setup("i915", size, flags);
|
|
if (IS_ERR(filp))
|
|
return PTR_ERR(filp);
|
|
|
|
obj->filp = filp;
|
|
return 0;
|
|
}
|
|
|
|
static int shmem_object_init(struct intel_memory_region *mem,
|
|
struct drm_i915_gem_object *obj,
|
|
resource_size_t offset,
|
|
resource_size_t size,
|
|
resource_size_t page_size,
|
|
unsigned int flags)
|
|
{
|
|
static struct lock_class_key lock_class;
|
|
struct drm_i915_private *i915 = mem->i915;
|
|
struct address_space *mapping;
|
|
unsigned int cache_level;
|
|
gfp_t mask;
|
|
int ret;
|
|
|
|
ret = __create_shmem(i915, &obj->base, size);
|
|
if (ret)
|
|
return ret;
|
|
|
|
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
|
|
if (IS_I965GM(i915) || IS_I965G(i915)) {
|
|
/* 965gm cannot relocate objects above 4GiB. */
|
|
mask &= ~__GFP_HIGHMEM;
|
|
mask |= __GFP_DMA32;
|
|
}
|
|
|
|
mapping = obj->base.filp->f_mapping;
|
|
mapping_set_gfp_mask(mapping, mask);
|
|
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
|
|
|
|
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
|
|
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
obj->read_domains = I915_GEM_DOMAIN_CPU;
|
|
|
|
/*
|
|
* MTL doesn't snoop CPU cache by default for GPU access (namely
|
|
* 1-way coherency). However some UMD's are currently depending on
|
|
* that. Make 1-way coherent the default setting for MTL. A follow
|
|
* up patch will extend the GEM_CREATE uAPI to allow UMD's specify
|
|
* caching mode at BO creation time
|
|
*/
|
|
if (HAS_LLC(i915) || (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)))
|
|
/* On some devices, we can have the GPU use the LLC (the CPU
|
|
* cache) for about a 10% performance improvement
|
|
* compared to uncached. Graphics requests other than
|
|
* display scanout are coherent with the CPU in
|
|
* accessing this cache. This means in this mode we
|
|
* don't need to clflush on the CPU side, and on the
|
|
* GPU side we only need to flush internal caches to
|
|
* get data visible to the CPU.
|
|
*
|
|
* However, we maintain the display planes as UC, and so
|
|
* need to rebind when first used as such.
|
|
*/
|
|
cache_level = I915_CACHE_LLC;
|
|
else
|
|
cache_level = I915_CACHE_NONE;
|
|
|
|
i915_gem_object_set_cache_coherency(obj, cache_level);
|
|
|
|
i915_gem_object_init_memory_region(obj, mem);
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct drm_i915_gem_object *
|
|
i915_gem_object_create_shmem(struct drm_i915_private *i915,
|
|
resource_size_t size)
|
|
{
|
|
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
|
|
size, 0, 0);
|
|
}
|
|
|
|
/* Allocate a new GEM object and fill it with the supplied data */
|
|
struct drm_i915_gem_object *
|
|
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
|
|
const void *data, resource_size_t size)
|
|
{
|
|
struct drm_i915_gem_object *obj;
|
|
struct file *file;
|
|
loff_t pos = 0;
|
|
ssize_t err;
|
|
|
|
GEM_WARN_ON(IS_DGFX(i915));
|
|
obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
|
|
if (IS_ERR(obj))
|
|
return obj;
|
|
|
|
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
|
|
|
|
file = obj->base.filp;
|
|
err = kernel_write(file, data, size, &pos);
|
|
|
|
if (err < 0)
|
|
goto fail;
|
|
|
|
if (err != size) {
|
|
err = -EIO;
|
|
goto fail;
|
|
}
|
|
|
|
return obj;
|
|
|
|
fail:
|
|
i915_gem_object_put(obj);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
static int init_shmem(struct intel_memory_region *mem)
|
|
{
|
|
i915_gemfs_init(mem->i915);
|
|
intel_memory_region_set_name(mem, "system");
|
|
|
|
return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
|
|
}
|
|
|
|
static int release_shmem(struct intel_memory_region *mem)
|
|
{
|
|
i915_gemfs_fini(mem->i915);
|
|
return 0;
|
|
}
|
|
|
|
static const struct intel_memory_region_ops shmem_region_ops = {
|
|
.init = init_shmem,
|
|
.release = release_shmem,
|
|
.init_object = shmem_object_init,
|
|
};
|
|
|
|
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
|
|
u16 type, u16 instance)
|
|
{
|
|
return intel_memory_region_create(i915, 0,
|
|
totalram_pages() << PAGE_SHIFT,
|
|
PAGE_SIZE, 0, 0,
|
|
type, instance,
|
|
&shmem_region_ops);
|
|
}
|
|
|
|
bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
|
|
{
|
|
return obj->ops == &i915_gem_shmem_ops;
|
|
}
|