2016-08-19 16:54:28 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/io-mapping.h>
|
|
|
|
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
2021-12-08 09:42:15 +05:30
|
|
|
#include "i915_mm.h"
|
2016-08-19 16:54:28 +01:00
|
|
|
|
2021-05-19 05:55:57 -10:00
|
|
|
struct remap_pfn {
|
|
|
|
struct mm_struct *mm;
|
|
|
|
unsigned long pfn;
|
|
|
|
pgprot_t prot;
|
|
|
|
|
|
|
|
struct sgt_iter sgt;
|
|
|
|
resource_size_t iobase;
|
|
|
|
};
|
2016-08-19 16:54:28 +01:00
|
|
|
|
2020-01-03 20:41:35 +00:00
|
|
|
#define use_dma(io) ((io) != -1)
|
|
|
|
|
2021-05-19 05:55:57 -10:00
|
|
|
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
|
|
|
|
{
|
|
|
|
if (use_dma(r->iobase))
|
|
|
|
return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
|
|
|
|
else
|
|
|
|
return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int remap_sg(pte_t *pte, unsigned long addr, void *data)
|
|
|
|
{
|
|
|
|
struct remap_pfn *r = data;
|
|
|
|
|
|
|
|
if (GEM_WARN_ON(!r->sgt.sgp))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Special PTE are not associated with any struct page */
|
|
|
|
set_pte_at(r->mm, addr, pte,
|
|
|
|
pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
|
|
|
|
r->pfn++; /* track insertions in case we need to unwind later */
|
|
|
|
|
|
|
|
r->sgt.curr += PAGE_SIZE;
|
|
|
|
if (r->sgt.curr >= r->sgt.max)
|
|
|
|
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-08 09:42:15 +05:30
|
|
|
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_X86)
|
|
|
|
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
|
|
|
|
{
|
|
|
|
struct remap_pfn *r = data;
|
|
|
|
|
|
|
|
/* Special PTE are not associated with any struct page */
|
|
|
|
set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
|
|
|
|
r->pfn++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-27 19:51:45 +01:00
|
|
|
/**
|
|
|
|
* remap_io_mapping - remap an IO mapping to userspace
|
|
|
|
* @vma: user vma to map to
|
|
|
|
* @addr: target user address to start at
|
|
|
|
* @pfn: physical address of kernel memory
|
|
|
|
* @size: size of map area
|
|
|
|
* @iomap: the source io_mapping
|
|
|
|
*
|
|
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
|
|
*/
|
|
|
|
int remap_io_mapping(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, unsigned long pfn, unsigned long size,
|
|
|
|
struct io_mapping *iomap)
|
|
|
|
{
|
|
|
|
struct remap_pfn r;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
|
|
|
|
|
2025-05-12 14:34:23 +02:00
|
|
|
/* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
|
2021-05-27 19:51:45 +01:00
|
|
|
r.mm = vma->vm_mm;
|
|
|
|
r.pfn = pfn;
|
|
|
|
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
|
|
|
|
(pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
|
|
|
|
|
|
|
|
err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2021-12-08 09:42:15 +05:30
|
|
|
#endif
|
2021-05-19 05:55:57 -10:00
|
|
|
|
2019-12-31 20:03:56 +00:00
|
|
|
/**
|
2020-01-03 20:41:35 +00:00
|
|
|
* remap_io_sg - remap an IO mapping to userspace
|
2019-12-31 20:03:56 +00:00
|
|
|
* @vma: user vma to map to
|
|
|
|
* @addr: target user address to start at
|
|
|
|
* @size: size of map area
|
|
|
|
* @sgl: Start sg entry
|
2024-08-07 11:05:21 +01:00
|
|
|
* @offset: offset from the start of the page
|
2020-01-03 20:41:35 +00:00
|
|
|
* @iobase: Use stored dma address offset by this address or pfn if -1
|
2019-12-31 20:03:56 +00:00
|
|
|
*
|
|
|
|
* Note: this is only safe if the mm semaphore is held when called.
|
|
|
|
*/
|
2020-01-03 20:41:35 +00:00
|
|
|
int remap_io_sg(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, unsigned long size,
|
2024-08-07 11:05:21 +01:00
|
|
|
struct scatterlist *sgl, unsigned long offset,
|
|
|
|
resource_size_t iobase)
|
2019-12-31 20:03:56 +00:00
|
|
|
{
|
2021-05-19 05:55:57 -10:00
|
|
|
struct remap_pfn r = {
|
|
|
|
.mm = vma->vm_mm,
|
|
|
|
.prot = vma->vm_page_prot,
|
|
|
|
.sgt = __sgt_iter(sgl, use_dma(iobase)),
|
|
|
|
.iobase = iobase,
|
|
|
|
};
|
2019-12-31 20:03:56 +00:00
|
|
|
int err;
|
|
|
|
|
2025-05-12 14:34:23 +02:00
|
|
|
/* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
|
2019-12-31 20:03:56 +00:00
|
|
|
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
|
|
|
|
|
drm/i915: ensure segment offset never exceeds allowed max
Commit 255fc1703e42 ("drm/i915/gem: Calculate object page offset for
partial memory mapping") introduced a new offset, which accounts for
userspace mapping not starting from the beginning of object's scatterlist.
This works fine for cases where first object pte is larger than the new
offset - "r->sgt.curr" counter is set to the offset to match the difference
in the number of total pages. However, if object's first pte's size is
equal to or smaller than the offset, then information about the offset
in userspace is covered up by moving "r->sgt" pointer in remap_sg():
r->sgt.curr += PAGE_SIZE;
if (r->sgt.curr >= r->sgt.max)
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
This means that two or more pages from virtual memory are counted for
only one page in object's memory, because after moving "r->sgt" pointer
"r->sgt.curr" will be 0.
We should account for this mismatch by moving "r->sgt" pointer to the
next pte. For that we may use "r.sgt.max", which already holds the max
allowed size. This change also eliminates possible confusion, when
looking at i915_scatterlist.h and remap_io_sg() code: former has
scatterlist pointer definition, which differentiates "s.max" value
based on "dma" flag (sg_dma_len() is used only when the flag is
enabled), while latter uses sg_dma_len() indiscriminately.
This patch aims to resolve issue:
https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12031
v3:
- instead of checking if r.sgt.curr would exceed allowed max, changed
the value in the while loop to be aligned with `dma` value
v4:
- remove unnecessary parent relation
v5:
- update commit message with explanation about page counting mismatch
and link to the issue
Signed-off-by: Krzysztof Karas <krzysztof.karas@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/upbjdavlbcxku63ns4vstp5kgbn2anxwewpmnppszgb67fn66t@tfclfgkqijue
2024-11-18 12:19:22 +00:00
|
|
|
while (offset >= r.sgt.max >> PAGE_SHIFT) {
|
|
|
|
offset -= r.sgt.max >> PAGE_SHIFT;
|
2024-08-07 11:05:21 +01:00
|
|
|
r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase));
|
|
|
|
if (!r.sgt.sgp)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
r.sgt.curr = offset << PAGE_SHIFT;
|
|
|
|
|
2020-01-03 20:41:35 +00:00
|
|
|
if (!use_dma(iobase))
|
|
|
|
flush_cache_range(vma, addr, size);
|
|
|
|
|
2021-05-19 05:55:57 -10:00
|
|
|
err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2019-12-31 20:03:56 +00:00
|
|
|
}
|