linux/drivers/gpu/drm/msm/msm_gem.c
Linus Torvalds beace86e61 Summary of significant series in this pull request:
- The 4 patch series "mm: ksm: prevent KSM from breaking merging of new
   VMAs" from Lorenzo Stoakes addresses an issue with KSM's
   PR_SET_MEMORY_MERGE mode: newly mapped VMAs were not eligible for
   merging with existing adjacent VMAs.
 
 - The 4 patch series "mm/damon: introduce DAMON_STAT for simple and
   practical access monitoring" from SeongJae Park adds a new kernel module
   which simplifies the setup and usage of DAMON in production
   environments.
 
 - The 6 patch series "stop passing a writeback_control to swap/shmem
   writeout" from Christoph Hellwig is a cleanup to the writeback code
   which removes a couple of pointers from struct writeback_control.
 
 - The 7 patch series "drivers/base/node.c: optimization and cleanups"
   from Donet Tom contains largely uncorrelated cleanups to the NUMA node
   setup and management code.
 
 - The 4 patch series "mm: userfaultfd: assorted fixes and cleanups" from
   Tal Zussman does some maintenance work on the userfaultfd code.
 
 - The 5 patch series "Readahead tweaks for larger folios" from Ryan
   Roberts implements some tuneups for pagecache readahead when it is
   reading into order>0 folios.
 
 - The 4 patch series "selftests/mm: Tweaks to the cow test" from Mark
   Brown provides some cleanups and consistency improvements to the
   selftests code.
 
 - The 4 patch series "Optimize mremap() for large folios" from Dev Jain
   does that.  A 37% reduction in execution time was measured in a
   memset+mremap+munmap microbenchmark.
 
 - The 5 patch series "Remove zero_user()" from Matthew Wilcox expunges
   zero_user() in favor of the more modern memzero_page().
 
 - The 3 patch series "mm/huge_memory: vmf_insert_folio_*() and
   vmf_insert_pfn_pud() fixes" from David Hildenbrand addresses some warts
   which David noticed in the huge page code.  These were not known to be
   causing any issues at this time.
 
 - The 3 patch series "mm/damon: use alloc_migrate_target() for
   DAMOS_MIGRATE_{HOT,COLD" from SeongJae Park provides some cleanup and
   consolidation work in DAMON.
 
 - The 3 patch series "use vm_flags_t consistently" from Lorenzo Stoakes
   uses vm_flags_t in places where we were inappropriately using other
   types.
 
 - The 3 patch series "mm/memfd: Reserve hugetlb folios before
   allocation" from Vivek Kasireddy increases the reliability of large page
   allocation in the memfd code.
 
 - The 14 patch series "mm: Remove pXX_devmap page table bit and pfn_t
   type" from Alistair Popple removes several now-unneeded PFN_* flags.
 
 - The 5 patch series "mm/damon: decouple sysfs from core" from SeongJae
   Park implememnts some cleanup and maintainability work in the DAMON
   sysfs layer.
 
 - The 5 patch series "madvise cleanup" from Lorenzo Stoakes does quite a
   lot of cleanup/maintenance work in the madvise() code.
 
 - The 4 patch series "madvise anon_name cleanups" from Vlastimil Babka
   provides additional cleanups on top or Lorenzo's effort.
 
 - The 11 patch series "Implement numa node notifier" from Oscar Salvador
   creates a standalone notifier for NUMA node memory state changes.
   Previously these were lumped under the more general memory on/offline
   notifier.
 
 - The 6 patch series "Make MIGRATE_ISOLATE a standalone bit" from Zi Yan
   cleans up the pageblock isolation code and fixes a potential issue which
   doesn't seem to cause any problems in practice.
 
 - The 5 patch series "selftests/damon: add python and drgn based DAMON
   sysfs functionality tests" from SeongJae Park adds additional drgn- and
   python-based DAMON selftests which are more comprehensive than the
   existing selftest suite.
 
 - The 5 patch series "Misc rework on hugetlb faulting path" from Oscar
   Salvador fixes a rather obscure deadlock in the hugetlb fault code and
   follows that fix with a series of cleanups.
 
 - The 3 patch series "cma: factor out allocation logic from
   __cma_declare_contiguous_nid" from Mike Rapoport rationalizes and cleans
   up the highmem-specific code in the CMA allocator.
 
 - The 28 patch series "mm/migration: rework movable_ops page migration
   (part 1)" from David Hildenbrand provides cleanups and
   future-preparedness to the migration code.
 
 - The 2 patch series "mm/damon: add trace events for auto-tuned
   monitoring intervals and DAMOS quota" from SeongJae Park adds some
   tracepoints to some DAMON auto-tuning code.
 
 - The 6 patch series "mm/damon: fix misc bugs in DAMON modules" from
   SeongJae Park does that.
 
 - The 6 patch series "mm/damon: misc cleanups" from SeongJae Park also
   does what it claims.
 
 - The 4 patch series "mm: folio_pte_batch() improvements" from David
   Hildenbrand cleans up the large folio PTE batching code.
 
 - The 13 patch series "mm/damon/vaddr: Allow interleaving in
   migrate_{hot,cold} actions" from SeongJae Park facilitates dynamic
   alteration of DAMON's inter-node allocation policy.
 
 - The 3 patch series "Remove unmap_and_put_page()" from Vishal Moola
   provides a couple of page->folio conversions.
 
 - The 4 patch series "mm: per-node proactive reclaim" from Davidlohr
   Bueso implements a per-node control of proactive reclaim - beyond the
   current memcg-based implementation.
 
 - The 14 patch series "mm/damon: remove damon_callback" from SeongJae
   Park replaces the damon_callback interface with a more general and
   powerful damon_call()+damos_walk() interface.
 
 - The 10 patch series "mm/mremap: permit mremap() move of multiple VMAs"
   from Lorenzo Stoakes implements a number of mremap cleanups (of course)
   in preparation for adding new mremap() functionality: newly permit the
   remapping of multiple VMAs when the user is specifying MREMAP_FIXED.  It
   still excludes some specialized situations where this cannot be
   performed reliably.
 
 - The 3 patch series "drop hugetlb_free_pgd_range()" from Anthony Yznaga
   switches some sparc hugetlb code over to the generic version and removes
   the thus-unneeded hugetlb_free_pgd_range().
 
 - The 4 patch series "mm/damon/sysfs: support periodic and automated
   stats update" from SeongJae Park augments the present
   userspace-requested update of DAMON sysfs monitoring files.  Automatic
   update is now provided, along with a tunable to control the update
   interval.
 
 - The 4 patch series "Some randome fixes and cleanups to swapfile" from
   Kemeng Shi does what is claims.
 
 - The 4 patch series "mm: introduce snapshot_page" from Luiz Capitulino
   and David Hildenbrand provides (and uses) a means by which debug-style
   functions can grab a copy of a pageframe and inspect it locklessly
   without tripping over the races inherent in operating on the live
   pageframe directly.
 
 - The 6 patch series "use per-vma locks for /proc/pid/maps reads" from
   Suren Baghdasaryan addresses the large contention issues which can be
   triggered by reads from that procfs file.  Latencies are reduced by more
   than half in some situations.  The series also introduces several new
   selftests for the /proc/pid/maps interface.
 
 - The 6 patch series "__folio_split() clean up" from Zi Yan cleans up
   __folio_split()!
 
 - The 7 patch series "Optimize mprotect() for large folios" from Dev
   Jain provides some quite large (>3x) speedups to mprotect() when dealing
   with large folios.
 
 - The 2 patch series "selftests/mm: reuse FORCE_READ to replace "asm
   volatile("" : "+r" (XXX));" and some cleanup" from wang lian does some
   cleanup work in the selftests code.
 
 - The 3 patch series "tools/testing: expand mremap testing" from Lorenzo
   Stoakes extends the mremap() selftest in several ways, including adding
   more checking of Lorenzo's recently added "permit mremap() move of
   multiple VMAs" feature.
 
 - The 22 patch series "selftests/damon/sysfs.py: test all parameters"
   from SeongJae Park extends the DAMON sysfs interface selftest so that it
   tests all possible user-requested parameters.  Rather than the present
   minimal subset.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaIqcCgAKCRDdBJ7gKXxA
 jkVBAQCCn9DR1QP0CRk961ot0cKzOgioSc0aA03DPb2KXRt2kQEAzDAz0ARurFhL
 8BzbvI0c+4tntHLXvIlrC33n9KWAOQM=
 =XsFy
 -----END PGP SIGNATURE-----

Merge tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull MM updates from Andrew Morton:
 "As usual, many cleanups. The below blurbiage describes 42 patchsets.
  21 of those are partially or fully cleanup work. "cleans up",
  "cleanup", "maintainability", "rationalizes", etc.

  I never knew the MM code was so dirty.

  "mm: ksm: prevent KSM from breaking merging of new VMAs" (Lorenzo Stoakes)
     addresses an issue with KSM's PR_SET_MEMORY_MERGE mode: newly
     mapped VMAs were not eligible for merging with existing adjacent
     VMAs.

  "mm/damon: introduce DAMON_STAT for simple and practical access monitoring" (SeongJae Park)
     adds a new kernel module which simplifies the setup and usage of
     DAMON in production environments.

  "stop passing a writeback_control to swap/shmem writeout" (Christoph Hellwig)
     is a cleanup to the writeback code which removes a couple of
     pointers from struct writeback_control.

  "drivers/base/node.c: optimization and cleanups" (Donet Tom)
     contains largely uncorrelated cleanups to the NUMA node setup and
     management code.

  "mm: userfaultfd: assorted fixes and cleanups" (Tal Zussman)
     does some maintenance work on the userfaultfd code.

  "Readahead tweaks for larger folios" (Ryan Roberts)
     implements some tuneups for pagecache readahead when it is reading
     into order>0 folios.

  "selftests/mm: Tweaks to the cow test" (Mark Brown)
     provides some cleanups and consistency improvements to the
     selftests code.

  "Optimize mremap() for large folios" (Dev Jain)
     does that. A 37% reduction in execution time was measured in a
     memset+mremap+munmap microbenchmark.

  "Remove zero_user()" (Matthew Wilcox)
     expunges zero_user() in favor of the more modern memzero_page().

  "mm/huge_memory: vmf_insert_folio_*() and vmf_insert_pfn_pud() fixes" (David Hildenbrand)
     addresses some warts which David noticed in the huge page code.
     These were not known to be causing any issues at this time.

  "mm/damon: use alloc_migrate_target() for DAMOS_MIGRATE_{HOT,COLD" (SeongJae Park)
     provides some cleanup and consolidation work in DAMON.

  "use vm_flags_t consistently" (Lorenzo Stoakes)
     uses vm_flags_t in places where we were inappropriately using other
     types.

  "mm/memfd: Reserve hugetlb folios before allocation" (Vivek Kasireddy)
     increases the reliability of large page allocation in the memfd
     code.

  "mm: Remove pXX_devmap page table bit and pfn_t type" (Alistair Popple)
     removes several now-unneeded PFN_* flags.

  "mm/damon: decouple sysfs from core" (SeongJae Park)
     implememnts some cleanup and maintainability work in the DAMON
     sysfs layer.

  "madvise cleanup" (Lorenzo Stoakes)
     does quite a lot of cleanup/maintenance work in the madvise() code.

  "madvise anon_name cleanups" (Vlastimil Babka)
     provides additional cleanups on top or Lorenzo's effort.

  "Implement numa node notifier" (Oscar Salvador)
     creates a standalone notifier for NUMA node memory state changes.
     Previously these were lumped under the more general memory
     on/offline notifier.

  "Make MIGRATE_ISOLATE a standalone bit" (Zi Yan)
     cleans up the pageblock isolation code and fixes a potential issue
     which doesn't seem to cause any problems in practice.

  "selftests/damon: add python and drgn based DAMON sysfs functionality tests" (SeongJae Park)
     adds additional drgn- and python-based DAMON selftests which are
     more comprehensive than the existing selftest suite.

  "Misc rework on hugetlb faulting path" (Oscar Salvador)
     fixes a rather obscure deadlock in the hugetlb fault code and
     follows that fix with a series of cleanups.

  "cma: factor out allocation logic from __cma_declare_contiguous_nid" (Mike Rapoport)
     rationalizes and cleans up the highmem-specific code in the CMA
     allocator.

  "mm/migration: rework movable_ops page migration (part 1)" (David Hildenbrand)
     provides cleanups and future-preparedness to the migration code.

  "mm/damon: add trace events for auto-tuned monitoring intervals and DAMOS quota" (SeongJae Park)
     adds some tracepoints to some DAMON auto-tuning code.

  "mm/damon: fix misc bugs in DAMON modules" (SeongJae Park)
     does that.

  "mm/damon: misc cleanups" (SeongJae Park)
     also does what it claims.

  "mm: folio_pte_batch() improvements" (David Hildenbrand)
     cleans up the large folio PTE batching code.

  "mm/damon/vaddr: Allow interleaving in migrate_{hot,cold} actions" (SeongJae Park)
     facilitates dynamic alteration of DAMON's inter-node allocation
     policy.

  "Remove unmap_and_put_page()" (Vishal Moola)
     provides a couple of page->folio conversions.

  "mm: per-node proactive reclaim" (Davidlohr Bueso)
     implements a per-node control of proactive reclaim - beyond the
     current memcg-based implementation.

  "mm/damon: remove damon_callback" (SeongJae Park)
     replaces the damon_callback interface with a more general and
     powerful damon_call()+damos_walk() interface.

  "mm/mremap: permit mremap() move of multiple VMAs" (Lorenzo Stoakes)
     implements a number of mremap cleanups (of course) in preparation
     for adding new mremap() functionality: newly permit the remapping
     of multiple VMAs when the user is specifying MREMAP_FIXED. It still
     excludes some specialized situations where this cannot be performed
     reliably.

  "drop hugetlb_free_pgd_range()" (Anthony Yznaga)
     switches some sparc hugetlb code over to the generic version and
     removes the thus-unneeded hugetlb_free_pgd_range().

  "mm/damon/sysfs: support periodic and automated stats update" (SeongJae Park)
     augments the present userspace-requested update of DAMON sysfs
     monitoring files. Automatic update is now provided, along with a
     tunable to control the update interval.

  "Some randome fixes and cleanups to swapfile" (Kemeng Shi)
     does what is claims.

  "mm: introduce snapshot_page" (Luiz Capitulino and David Hildenbrand)
     provides (and uses) a means by which debug-style functions can grab
     a copy of a pageframe and inspect it locklessly without tripping
     over the races inherent in operating on the live pageframe
     directly.

  "use per-vma locks for /proc/pid/maps reads" (Suren Baghdasaryan)
     addresses the large contention issues which can be triggered by
     reads from that procfs file. Latencies are reduced by more than
     half in some situations. The series also introduces several new
     selftests for the /proc/pid/maps interface.

  "__folio_split() clean up" (Zi Yan)
     cleans up __folio_split()!

  "Optimize mprotect() for large folios" (Dev Jain)
     provides some quite large (>3x) speedups to mprotect() when dealing
     with large folios.

  "selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));" and some cleanup" (wang lian)
     does some cleanup work in the selftests code.

  "tools/testing: expand mremap testing" (Lorenzo Stoakes)
     extends the mremap() selftest in several ways, including adding
     more checking of Lorenzo's recently added "permit mremap() move of
     multiple VMAs" feature.

  "selftests/damon/sysfs.py: test all parameters" (SeongJae Park)
     extends the DAMON sysfs interface selftest so that it tests all
     possible user-requested parameters. Rather than the present minimal
     subset"

* tag 'mm-stable-2025-07-30-15-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (370 commits)
  MAINTAINERS: add missing headers to mempory policy & migration section
  MAINTAINERS: add missing file to cgroup section
  MAINTAINERS: add MM MISC section, add missing files to MISC and CORE
  MAINTAINERS: add missing zsmalloc file
  MAINTAINERS: add missing files to page alloc section
  MAINTAINERS: add missing shrinker files
  MAINTAINERS: move memremap.[ch] to hotplug section
  MAINTAINERS: add missing mm_slot.h file THP section
  MAINTAINERS: add missing interval_tree.c to memory mapping section
  MAINTAINERS: add missing percpu-internal.h file to per-cpu section
  mm/page_alloc: remove trace_mm_alloc_contig_migrate_range_info()
  selftests/damon: introduce _common.sh to host shared function
  selftests/damon/sysfs.py: test runtime reduction of DAMON parameters
  selftests/damon/sysfs.py: test non-default parameters runtime commit
  selftests/damon/sysfs.py: generalize DAMON context commit assertion
  selftests/damon/sysfs.py: generalize monitoring attributes commit assertion
  selftests/damon/sysfs.py: generalize DAMOS schemes commit assertion
  selftests/damon/sysfs.py: test DAMOS filters commitment
  selftests/damon/sysfs.py: generalize DAMOS scheme commit assertion
  selftests/damon/sysfs.py: test DAMOS destinations commitment
  ...
2025-07-31 14:57:54 -07:00

1403 lines
32 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/dma-map-ops.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <drm/drm_prime.h>
#include <drm/drm_file.h>
#include <trace/events/gpu_mem.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_kms.h"
static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
{
uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
trace_gpu_mem_total(0, 0, total_mem);
}
static void update_ctx_mem(struct drm_file *file, ssize_t size)
{
struct msm_context *ctx = file->driver_priv;
uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
rcu_read_lock(); /* Locks file->pid! */
trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
rcu_read_unlock();
}
static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{
msm_gem_vma_get(obj);
update_ctx_mem(file, obj->size);
return 0;
}
static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
bool close, const char *reason);
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct msm_context *ctx = file->driver_priv;
struct drm_exec exec;
update_ctx_mem(file, -obj->size);
msm_gem_vma_put(obj);
/*
* If VM isn't created yet, nothing to cleanup. And in fact calling
* put_iova_spaces() with vm=NULL would be bad, in that it will tear-
* down the mappings of shared buffers in other contexts.
*/
if (!ctx->vm)
return;
/*
* VM_BIND does not depend on implicit teardown of VMAs on handle
* close, but instead on implicit teardown of the VM when the device
* is closed (see msm_gem_vm_close())
*/
if (msm_context_is_vmbind(ctx))
return;
/*
* TODO we might need to kick this to a queue to avoid blocking
* in CLOSE ioctl
*/
dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
put_iova_spaces(obj, ctx->vm, true, "close");
drm_exec_fini(&exec); /* drop locks */
}
/*
* Get/put for kms->vm VMA
*/
void msm_gem_vma_get(struct drm_gem_object *obj)
{
atomic_inc(&to_msm_bo(obj)->vma_ref);
}
void msm_gem_vma_put(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct drm_exec exec;
if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
return;
if (!priv->kms)
return;
msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
drm_exec_fini(&exec); /* drop locks */
}
/*
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
* API. Really GPU cache is out of scope here (handled on cmdstream)
* and all we need to do is invalidate newly allocated pages before
* mapping to CPU as uncached/writecombine.
*
* On top of this, we have the added headache, that depending on
* display generation, the display's iommu may be wired up to either
* the toplevel drm device (mdss), or to the mdp sub-node, meaning
* that here we either have dma-direct or iommu ops.
*
* Let this be a cautionary tail of abstraction gone wrong.
*/
static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
static void update_lru_active(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_obj->pages);
if (msm_obj->pin_count) {
drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
} else {
GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
}
}
static void update_lru_locked(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(&msm_obj->base);
if (!msm_obj->pages) {
GEM_WARN_ON(msm_obj->pin_count);
drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
} else {
update_lru_active(obj);
}
}
static void update_lru(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
mutex_lock(&priv->lru.lock);
update_lru_locked(obj);
mutex_unlock(&priv->lru.lock);
}
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
p = drm_gem_get_pages(obj);
if (IS_ERR(p)) {
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
return p;
}
update_device_mem(dev->dev_private, obj->size);
msm_obj->pages = p;
msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
msm_obj->sgt = NULL;
return ptr;
}
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
update_lru(obj);
}
return msm_obj->pages;
}
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
/*
* Skip gpuvm in the object free path to avoid a WARN_ON() splat.
* See explaination in msm_gem_assert_locked()
*/
if (kref_read(&obj->refcount))
drm_gpuvm_bo_gem_evict(obj, true);
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
* pages are clean because display controller,
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & MSM_BO_WC)
sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
msm_obj->sgt = NULL;
}
update_device_mem(obj->dev->dev_private, -obj->size);
drm_gem_put_pages(obj, msm_obj->pages, true, false);
msm_obj->pages = NULL;
update_lru(obj);
}
}
struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
if (msm_obj->madv > madv) {
DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
return get_pages(obj);
}
/*
* Update the pin count of the object, call under lru.lock
*/
void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
msm_gem_assert_locked(obj);
to_msm_bo(obj)->pin_count++;
drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
}
static void pin_obj_locked(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
mutex_lock(&priv->lru.lock);
msm_gem_pin_obj_locked(obj);
mutex_unlock(&priv->lru.lock);
}
struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct page **p;
msm_gem_assert_locked(obj);
p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (!IS_ERR(p))
pin_obj_locked(obj);
return p;
}
void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
{
msm_gem_assert_locked(obj);
msm_gem_unpin_locked(obj);
}
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
{
if (msm_obj->flags & MSM_BO_WC)
return pgprot_writecombine(prot);
return prot;
}
static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int err;
vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
err = msm_gem_lock_interruptible(obj);
if (err) {
ret = VM_FAULT_NOPAGE;
goto out;
}
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj);
return VM_FAULT_SIGBUS;
}
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = page_to_pfn(pages[pgoff]);
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
out_unlock:
msm_gem_unlock(obj);
out:
return ret;
}
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
msm_gem_assert_locked(obj);
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
msm_gem_lock(obj);
offset = mmap_offset(obj);
msm_gem_unlock(obj);
return offset;
}
static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
struct drm_gpuvm *vm)
{
struct drm_gpuvm_bo *vm_bo;
msm_gem_assert_locked(obj);
drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
struct drm_gpuva *vma;
drm_gpuvm_bo_for_each_va (vma, vm_bo) {
if (vma->vm == vm) {
/* lookup_vma() should only be used in paths
* with at most one vma per vm
*/
GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
return vma;
}
}
}
return NULL;
}
/*
* If close is true, this also closes the VMA (releasing the allocated
* iova range) in addition to removing the iommu mapping. In the eviction
* case (!close), we keep the iova allocated, but only remove the iommu
* mapping.
*/
static void
put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
bool close, const char *reason)
{
struct drm_gpuvm_bo *vm_bo, *tmp;
msm_gem_assert_locked(obj);
drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
struct drm_gpuva *vma, *vmatmp;
if (vm && vm_bo->vm != vm)
continue;
drm_gpuvm_bo_get(vm_bo);
drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
msm_gem_vma_unmap(vma, reason);
if (close)
msm_gem_vma_close(vma);
}
drm_gpuvm_bo_put(vm_bo);
}
}
static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
struct drm_gpuvm *vm, u64 range_start,
u64 range_end)
{
struct drm_gpuva *vma;
msm_gem_assert_locked(obj);
vma = lookup_vma(obj, vm);
if (!vma) {
vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
} else {
GEM_WARN_ON(vma->va.addr < range_start);
GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
}
return vma;
}
int msm_gem_prot(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV;
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
return prot;
}
int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
int prot = msm_gem_prot(obj);
msm_gem_assert_locked(obj);
pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
return msm_gem_vma_map(vma, prot, msm_obj->sgt);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
mutex_lock(&priv->lru.lock);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_lru_locked(obj);
mutex_unlock(&priv->lru.lock);
}
/* Special unpin path for use in fence-signaling path, avoiding the need
* to hold the obj lock by only depending on things that a protected by
* the LRU lock. In particular we know that that we already have backing
* and and that the object's dma_resv has the fence for the current
* submit/job which will prevent us racing against page eviction.
*/
void msm_gem_unpin_active(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_lru_active(obj);
}
struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
struct drm_gpuvm *vm)
{
return get_vma_locked(obj, vm, 0, U64_MAX);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct drm_gpuvm *vm, uint64_t *iova,
u64 range_start, u64 range_end)
{
struct drm_gpuva *vma;
int ret;
msm_gem_assert_locked(obj);
if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
return -EINVAL;
vma = get_vma_locked(obj, vm, range_start, range_end);
if (IS_ERR(vma))
return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret) {
*iova = vma->va.addr;
pin_obj_locked(obj);
}
return ret;
}
/*
* get iova and pin it. Should have a matching put
* limits iova to specified range (in pages)
*/
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
struct drm_gpuvm *vm, uint64_t *iova,
u64 range_start, u64 range_end)
{
struct drm_exec exec;
int ret;
msm_gem_lock_vm_and_obj(&exec, obj, vm);
ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
drm_exec_fini(&exec); /* drop locks */
return ret;
}
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
uint64_t *iova)
{
return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
}
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
uint64_t *iova)
{
struct drm_gpuva *vma;
struct drm_exec exec;
int ret = 0;
msm_gem_lock_vm_and_obj(&exec, obj, vm);
vma = get_vma_locked(obj, vm, 0, U64_MAX);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
*iova = vma->va.addr;
}
drm_exec_fini(&exec); /* drop locks */
return ret;
}
static int clear_iova(struct drm_gem_object *obj,
struct drm_gpuvm *vm)
{
struct drm_gpuva *vma = lookup_vma(obj, vm);
if (!vma)
return 0;
msm_gem_vma_unmap(vma, NULL);
msm_gem_vma_close(vma);
return 0;
}
/*
* Get the requested iova but don't pin it. Fails if the requested iova is
* not available. Doesn't need a put because iovas are currently valid for
* the life of the object.
*
* Setting an iova of zero will clear the vma.
*/
int msm_gem_set_iova(struct drm_gem_object *obj,
struct drm_gpuvm *vm, uint64_t iova)
{
struct drm_exec exec;
int ret = 0;
msm_gem_lock_vm_and_obj(&exec, obj, vm);
if (!iova) {
ret = clear_iova(obj, vm);
} else {
struct drm_gpuva *vma;
vma = get_vma_locked(obj, vm, iova, iova + obj->size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else if (GEM_WARN_ON(vma->va.addr != iova)) {
clear_iova(obj, vm);
ret = -EBUSY;
}
}
drm_exec_fini(&exec); /* drop locks */
return ret;
}
static bool is_kms_vm(struct drm_gpuvm *vm)
{
struct msm_drm_private *priv = vm->drm->dev_private;
return priv->kms && (priv->kms->vm == vm);
}
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
struct drm_gpuva *vma;
struct drm_exec exec;
msm_gem_lock_vm_and_obj(&exec, obj, vm);
vma = lookup_vma(obj, vm);
if (vma) {
msm_gem_unpin_locked(obj);
}
if (!is_kms_vm(vm))
put_iova_spaces(obj, vm, true, "close");
drm_exec_fini(&exec); /* drop locks */
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
args->pitch = align_pitch(args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
return msm_gem_new_handle(dev, file, args->size,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *obj;
int ret = 0;
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto fail;
}
*offset = msm_gem_mmap_offset(obj);
drm_gem_object_put(obj);
fail:
return ret;
}
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
int ret = 0;
msm_gem_assert_locked(obj);
if (drm_gem_is_imported(obj))
return ERR_PTR(-ENODEV);
pages = msm_gem_get_pages_locked(obj, madv);
if (IS_ERR(pages))
return ERR_CAST(pages);
pin_obj_locked(obj);
/* increment vmap_count *before* vmap() call, so shrinker can
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
* This guarantees that we won't try to msm_gem_vunmap() this
* same object from within the vmap() call (while we already
* hold msm_obj lock)
*/
msm_obj->vmap_count++;
if (!msm_obj->vaddr) {
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
}
}
return msm_obj->vaddr;
fail:
msm_obj->vmap_count--;
msm_gem_unpin_locked(obj);
return ERR_PTR(ret);
}
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{
return get_vaddr(obj, MSM_MADV_WILLNEED);
}
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
void *ret;
msm_gem_lock(obj);
ret = msm_gem_get_vaddr_locked(obj);
msm_gem_unlock(obj);
return ret;
}
/*
* Don't use this! It is for the very special case of dumping
* submits from GPU hangs or faults, were the bo may already
* be MSM_MADV_DONTNEED, but we know the buffer is still on the
* active list.
*/
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
return get_vaddr(obj, __MSM_MADV_PURGED);
}
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
msm_gem_unpin_locked(obj);
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
msm_gem_lock(obj);
msm_gem_put_vaddr_locked(obj);
msm_gem_unlock(obj);
}
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
mutex_lock(&priv->lru.lock);
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
madv = msm_obj->madv;
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
update_lru_locked(obj);
mutex_unlock(&priv->lru.lock);
msm_gem_unlock(obj);
return (madv != __MSM_MADV_PURGED);
}
void msm_gem_purge(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, NULL, false, "purge");
msm_gem_vunmap(obj);
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
mutex_lock(&priv->lru.lock);
/* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
mutex_unlock(&priv->lru.lock);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
}
/*
* Unpin the backing pages and make them available to be swapped out.
*/
void msm_gem_evict(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, NULL, false, "evict");
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return;
vunmap(msm_obj->vaddr);
msm_obj->vaddr = NULL;
}
bool msm_gem_active(struct drm_gem_object *obj)
{
msm_gem_assert_locked(obj);
if (to_msm_bo(obj)->pin_count)
return true;
return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
bool write = !!(op & MSM_PREP_WRITE);
unsigned long remain =
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
if (op & MSM_PREP_BOOST) {
dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
ktime_get());
}
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
return ret;
/* TODO cache maintenance */
return 0;
}
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
/* TODO cache maintenance */
return 0;
}
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
struct msm_gem_stats *stats)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
if (!msm_gem_trylock(obj))
return;
stats->all.count++;
stats->all.size += obj->size;
if (msm_gem_active(obj)) {
stats->active.count++;
stats->active.size += obj->size;
}
if (msm_obj->pages) {
stats->resident.count++;
stats->resident.size += obj->size;
}
switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
stats->purged.count++;
stats->purged.size += obj->size;
madv = " purged";
break;
case MSM_MADV_DONTNEED:
stats->purgeable.count++;
stats->purgeable.size += obj->size;
madv = " purgeable";
break;
case MSM_MADV_WILLNEED:
default:
madv = "";
break;
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
if (!list_empty(&obj->gpuva.list)) {
struct drm_gpuvm_bo *vm_bo;
seq_puts(m, " vmas:");
drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
struct drm_gpuva *vma;
drm_gpuvm_bo_for_each_va (vma, vm_bo) {
const char *name, *comm;
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
struct task_struct *task =
get_pid_task(vm->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
put_task_struct(task);
} else {
comm = NULL;
}
name = vm->base.name;
seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
name, comm ? ":" : "", comm ? comm : "",
vma->vm, vma->va.addr,
to_msm_vma(vma)->mapped ? "" : "un");
kfree(comm);
}
}
seq_puts(m, "\n");
}
dma_resv_describe(robj, m);
msm_gem_unlock(obj);
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
struct msm_gem_stats stats = {};
struct msm_gem_object *msm_obj;
seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, node) {
struct drm_gem_object *obj = &msm_obj->base;
seq_puts(m, " ");
msm_gem_describe(obj, m, &stats);
}
seq_printf(m, "Total: %4d objects, %9zu bytes\n",
stats.all.count, stats.all.size);
seq_printf(m, "Active: %4d objects, %9zu bytes\n",
stats.active.count, stats.active.size);
seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
stats.resident.count, stats.resident.size);
seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
stats.purgeable.count, stats.purgeable.size);
seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
stats.purged.count, stats.purged.size);
}
#endif
/* don't call directly! Use drm_gem_object_put() */
static void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_exec exec;
mutex_lock(&priv->obj_lock);
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
/*
* We need to lock any VMs the object is still attached to, but not
* the object itself (see explaination in msm_gem_assert_locked()),
* so just open-code this special case.
*
* Note that we skip the dance if we aren't attached to any VM. This
* is load bearing. The driver needs to support two usage models:
*
* 1. Legacy kernel managed VM: Userspace expects the VMA's to be
* implicitly torn down when the object is freed, the VMA's do
* not hold a hard reference to the BO.
*
* 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
* BO. This can be dropped when the VM is closed and it's associated
* VMAs are torn down. (See msm_gem_vm_close()).
*
* In the latter case the last reference to a BO can be dropped while
* we already have the VM locked. It would have already been removed
* from the gpuva list, but lockdep doesn't know that. Or understand
* the differences between the two usage models.
*/
if (!list_empty(&obj->gpuva.list)) {
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked (&exec) {
struct drm_gpuvm_bo *vm_bo;
drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
drm_exec_lock_obj(&exec,
drm_gpuvm_resv_obj(vm_bo->vm));
drm_exec_retry_on_contention(&exec);
}
}
put_iova_spaces(obj, NULL, true, "free");
drm_exec_fini(&exec); /* drop locks */
}
if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
kvfree(msm_obj->pages);
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
}
if (msm_obj->flags & MSM_BO_NO_SHARE) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}
drm_gem_object_release(obj);
kfree(msm_obj->metadata);
kfree(msm_obj);
}
static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle,
char *name)
{
struct drm_gem_object *obj;
int ret;
obj = msm_gem_new(dev, size, flags);
if (IS_ERR(obj))
return PTR_ERR(obj);
if (name)
msm_gem_object_set_name(obj, "%s", name);
if (flags & MSM_BO_NO_SHARE) {
struct msm_context *ctx = file->driver_priv;
struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
drm_gem_object_get(r_obj);
obj->resv = r_obj->resv;
}
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(obj);
return ret;
}
static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
enum drm_gem_object_status status = 0;
if (msm_obj->pages)
status |= DRM_GEM_OBJECT_RESIDENT;
if (msm_obj->madv == MSM_MADV_DONTNEED)
status |= DRM_GEM_OBJECT_PURGEABLE;
return status;
}
static const struct vm_operations_struct vm_ops = {
.fault = msm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.open = msm_gem_open,
.close = msm_gem_close,
.export = msm_gem_prime_export,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
.vmap = msm_gem_prime_vmap,
.vunmap = msm_gem_prime_vunmap,
.mmap = msm_gem_object_mmap,
.status = msm_gem_status,
.vm_ops = &vm_ops,
};
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_CACHED:
case MSM_BO_WC:
break;
case MSM_BO_CACHED_COHERENT:
if (priv->has_cached_coherent)
break;
fallthrough;
default:
DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
INIT_LIST_HEAD(&msm_obj->node);
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;
return 0;
}
struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
int ret;
size = PAGE_ALIGN(size);
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
if (size == 0)
return ERR_PTR(-EINVAL);
ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
return ERR_PTR(ret);
msm_obj = to_msm_bo(obj);
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
/*
* Our buffers are kept pinned, so allocating them from the
* MOVABLE zone is a really bad idea, and conflicts with CMA.
* See comments above new_inode() why this is required _and_
* expected if you're going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto fail;
return obj;
fail:
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
uint32_t size;
int ret, npages;
size = PAGE_ALIGN(dmabuf->size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
if (ret)
return ERR_PTR(ret);
drm_gem_private_object_init(dev, obj, size);
npages = size / PAGE_SIZE;
msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
msm_obj->sgt = sgt;
msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!msm_obj->pages) {
msm_gem_unlock(obj);
ret = -ENOMEM;
goto fail;
}
ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) {
msm_gem_unlock(obj);
goto fail;
}
msm_gem_unlock(obj);
drm_gem_lru_move_tail(&priv->lru.pinned, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto fail;
return obj;
fail:
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
struct drm_gpuvm *vm, struct drm_gem_object **bo,
uint64_t *iova)
{
void *vaddr;
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
int ret;
if (IS_ERR(obj))
return ERR_CAST(obj);
if (iova) {
ret = msm_gem_get_and_pin_iova(obj, vm, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
msm_gem_unpin_iova(obj, vm);
ret = PTR_ERR(vaddr);
goto err;
}
if (bo)
*bo = obj;
return vaddr;
err:
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
msm_gem_unpin_iova(bo, vm);
drm_gem_object_put(bo);
}
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{
struct msm_gem_object *msm_obj = to_msm_bo(bo);
va_list ap;
if (!fmt)
return;
va_start(ap, fmt);
vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
va_end(ap);
}