mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
iommufd: Selftest coverage for IOMMU_IOAS_MAP_FILE
Add test cases to exercise IOMMU_IOAS_MAP_FILE. Link: https://patch.msgid.link/r/1729861919-234514-10-git-send-email-steven.sistare@oracle.com Signed-off-by: Steve Sistare <steven.sistare@oracle.com> Reviewed-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
976a40c075
commit
0bcceb1f51
3 changed files with 205 additions and 15 deletions
|
@ -1,5 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
|
||||
#include <asm/unistd.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/eventfd.h>
|
||||
|
@ -49,6 +50,9 @@ static __attribute__((constructor)) void setup_sizes(void)
|
|||
vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
||||
assert(vrc == buffer);
|
||||
|
||||
mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
&mfd);
|
||||
}
|
||||
|
||||
FIXTURE(iommufd)
|
||||
|
@ -128,6 +132,7 @@ TEST_F(iommufd, cmd_length)
|
|||
TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
|
||||
TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
|
||||
TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
|
||||
TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
|
||||
#undef TEST_LENGTH
|
||||
}
|
||||
|
||||
|
@ -1372,6 +1377,7 @@ FIXTURE_VARIANT(iommufd_mock_domain)
|
|||
{
|
||||
unsigned int mock_domains;
|
||||
bool hugepages;
|
||||
bool file;
|
||||
};
|
||||
|
||||
FIXTURE_SETUP(iommufd_mock_domain)
|
||||
|
@ -1410,26 +1416,45 @@ FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
|
|||
{
|
||||
.mock_domains = 1,
|
||||
.hugepages = false,
|
||||
.file = false,
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
|
||||
{
|
||||
.mock_domains = 2,
|
||||
.hugepages = false,
|
||||
.file = false,
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
|
||||
{
|
||||
.mock_domains = 1,
|
||||
.hugepages = true,
|
||||
.file = false,
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
|
||||
{
|
||||
.mock_domains = 2,
|
||||
.hugepages = true,
|
||||
.file = false,
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
|
||||
{
|
||||
.mock_domains = 1,
|
||||
.hugepages = false,
|
||||
.file = true,
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
|
||||
{
|
||||
.mock_domains = 1,
|
||||
.hugepages = true,
|
||||
.file = true,
|
||||
};
|
||||
|
||||
|
||||
/* Have the kernel check that the user pages made it to the iommu_domain */
|
||||
#define check_mock_iova(_ptr, _iova, _length) \
|
||||
({ \
|
||||
|
@ -1455,7 +1480,10 @@ FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
|
|||
} \
|
||||
})
|
||||
|
||||
TEST_F(iommufd_mock_domain, basic)
|
||||
static void
|
||||
test_basic_mmap(struct __test_metadata *_metadata,
|
||||
struct _test_data_iommufd_mock_domain *self,
|
||||
const struct _fixture_variant_iommufd_mock_domain *variant)
|
||||
{
|
||||
size_t buf_size = self->mmap_buf_size;
|
||||
uint8_t *buf;
|
||||
|
@ -1478,6 +1506,40 @@ TEST_F(iommufd_mock_domain, basic)
|
|||
test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
|
||||
}
|
||||
|
||||
static void
|
||||
test_basic_file(struct __test_metadata *_metadata,
|
||||
struct _test_data_iommufd_mock_domain *self,
|
||||
const struct _fixture_variant_iommufd_mock_domain *variant)
|
||||
{
|
||||
size_t buf_size = self->mmap_buf_size;
|
||||
uint8_t *buf;
|
||||
__u64 iova;
|
||||
int mfd_tmp;
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
|
||||
/* Simple one page map */
|
||||
test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
|
||||
check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
|
||||
|
||||
buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
|
||||
ASSERT_NE(MAP_FAILED, buf);
|
||||
|
||||
test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);
|
||||
|
||||
ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
|
||||
test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);
|
||||
|
||||
close(mfd_tmp);
|
||||
}
|
||||
|
||||
TEST_F(iommufd_mock_domain, basic)
|
||||
{
|
||||
if (variant->file)
|
||||
test_basic_file(_metadata, self, variant);
|
||||
else
|
||||
test_basic_mmap(_metadata, self, variant);
|
||||
}
|
||||
|
||||
TEST_F(iommufd_mock_domain, ro_unshare)
|
||||
{
|
||||
uint8_t *buf;
|
||||
|
@ -1513,9 +1575,13 @@ TEST_F(iommufd_mock_domain, all_aligns)
|
|||
unsigned int start;
|
||||
unsigned int end;
|
||||
uint8_t *buf;
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
int mfd;
|
||||
|
||||
buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
|
||||
0);
|
||||
if (variant->file)
|
||||
buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
|
||||
else
|
||||
buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
|
||||
ASSERT_NE(MAP_FAILED, buf);
|
||||
check_refs(buf, buf_size, 0);
|
||||
|
||||
|
@ -1532,7 +1598,12 @@ TEST_F(iommufd_mock_domain, all_aligns)
|
|||
size_t length = end - start;
|
||||
__u64 iova;
|
||||
|
||||
test_ioctl_ioas_map(buf + start, length, &iova);
|
||||
if (variant->file) {
|
||||
test_ioctl_ioas_map_file(mfd, start, length,
|
||||
&iova);
|
||||
} else {
|
||||
test_ioctl_ioas_map(buf + start, length, &iova);
|
||||
}
|
||||
check_mock_iova(buf + start, iova, length);
|
||||
check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
|
||||
end / PAGE_SIZE * PAGE_SIZE -
|
||||
|
@ -1544,6 +1615,8 @@ TEST_F(iommufd_mock_domain, all_aligns)
|
|||
}
|
||||
check_refs(buf, buf_size, 0);
|
||||
ASSERT_EQ(0, munmap(buf, buf_size));
|
||||
if (variant->file)
|
||||
close(mfd);
|
||||
}
|
||||
|
||||
TEST_F(iommufd_mock_domain, all_aligns_copy)
|
||||
|
@ -1554,9 +1627,13 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
|
|||
unsigned int start;
|
||||
unsigned int end;
|
||||
uint8_t *buf;
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
int mfd;
|
||||
|
||||
buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
|
||||
0);
|
||||
if (variant->file)
|
||||
buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
|
||||
else
|
||||
buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
|
||||
ASSERT_NE(MAP_FAILED, buf);
|
||||
check_refs(buf, buf_size, 0);
|
||||
|
||||
|
@ -1575,7 +1652,12 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
|
|||
uint32_t mock_stdev_id;
|
||||
__u64 iova;
|
||||
|
||||
test_ioctl_ioas_map(buf + start, length, &iova);
|
||||
if (variant->file) {
|
||||
test_ioctl_ioas_map_file(mfd, start, length,
|
||||
&iova);
|
||||
} else {
|
||||
test_ioctl_ioas_map(buf + start, length, &iova);
|
||||
}
|
||||
|
||||
/* Add and destroy a domain while the area exists */
|
||||
old_id = self->hwpt_ids[1];
|
||||
|
@ -1596,15 +1678,18 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
|
|||
}
|
||||
check_refs(buf, buf_size, 0);
|
||||
ASSERT_EQ(0, munmap(buf, buf_size));
|
||||
if (variant->file)
|
||||
close(mfd);
|
||||
}
|
||||
|
||||
TEST_F(iommufd_mock_domain, user_copy)
|
||||
{
|
||||
void *buf = variant->file ? mfd_buffer : buffer;
|
||||
struct iommu_test_cmd access_cmd = {
|
||||
.size = sizeof(access_cmd),
|
||||
.op = IOMMU_TEST_OP_ACCESS_PAGES,
|
||||
.access_pages = { .length = BUFFER_SIZE,
|
||||
.uptr = (uintptr_t)buffer },
|
||||
.uptr = (uintptr_t)buf },
|
||||
};
|
||||
struct iommu_ioas_copy copy_cmd = {
|
||||
.size = sizeof(copy_cmd),
|
||||
|
@ -1623,9 +1708,13 @@ TEST_F(iommufd_mock_domain, user_copy)
|
|||
|
||||
/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
|
||||
test_ioctl_ioas_alloc(&ioas_id);
|
||||
test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
|
||||
©_cmd.src_iova);
|
||||
|
||||
if (variant->file) {
|
||||
test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
|
||||
©_cmd.src_iova);
|
||||
} else {
|
||||
test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
|
||||
©_cmd.src_iova);
|
||||
}
|
||||
test_cmd_create_access(ioas_id, &access_cmd.id,
|
||||
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
|
||||
|
||||
|
@ -1635,12 +1724,17 @@ TEST_F(iommufd_mock_domain, user_copy)
|
|||
&access_cmd));
|
||||
copy_cmd.src_ioas_id = ioas_id;
|
||||
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
|
||||
check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
|
||||
check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
|
||||
|
||||
/* Now replace the ioas with a new one */
|
||||
test_ioctl_ioas_alloc(&new_ioas_id);
|
||||
test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
|
||||
©_cmd.src_iova);
|
||||
if (variant->file) {
|
||||
test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
|
||||
©_cmd.src_iova);
|
||||
} else {
|
||||
test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
|
||||
©_cmd.src_iova);
|
||||
}
|
||||
test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
|
||||
|
||||
/* Destroy the old ioas and cleanup copied mapping */
|
||||
|
@ -1654,7 +1748,7 @@ TEST_F(iommufd_mock_domain, user_copy)
|
|||
&access_cmd));
|
||||
copy_cmd.src_ioas_id = new_ioas_id;
|
||||
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
|
||||
check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
|
||||
check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
|
||||
|
||||
test_cmd_destroy_access_pages(
|
||||
access_cmd.id, access_cmd.access_pages.out_access_pages_id);
|
||||
|
|
|
@ -47,6 +47,9 @@ static __attribute__((constructor)) void setup_buffer(void)
|
|||
|
||||
buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
||||
|
||||
mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
&mfd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -331,6 +334,42 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* iopt_area_fill_domains() and iopt_area_fill_domain() */
|
||||
TEST_FAIL_NTH(basic_fail_nth, map_file_domain)
|
||||
{
|
||||
uint32_t ioas_id;
|
||||
__u32 stdev_id;
|
||||
__u32 hwpt_id;
|
||||
__u64 iova;
|
||||
|
||||
self->fd = open("/dev/iommu", O_RDWR);
|
||||
if (self->fd == -1)
|
||||
return -1;
|
||||
|
||||
if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
|
||||
return -1;
|
||||
|
||||
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
|
||||
return -1;
|
||||
|
||||
fail_nth_enable();
|
||||
|
||||
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
|
||||
return -1;
|
||||
|
||||
if (_test_ioctl_ioas_map_file(self->fd, ioas_id, mfd, 0, 262144, &iova,
|
||||
IOMMU_IOAS_MAP_WRITEABLE |
|
||||
IOMMU_IOAS_MAP_READABLE))
|
||||
return -1;
|
||||
|
||||
if (_test_ioctl_destroy(self->fd, stdev_id))
|
||||
return -1;
|
||||
|
||||
if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
|
||||
{
|
||||
uint32_t ioas_id;
|
||||
|
|
|
@ -40,12 +40,28 @@ static inline bool test_bit(unsigned int nr, unsigned long *addr)
|
|||
static void *buffer;
|
||||
static unsigned long BUFFER_SIZE;
|
||||
|
||||
static void *mfd_buffer;
|
||||
static int mfd;
|
||||
|
||||
static unsigned long PAGE_SIZE;
|
||||
|
||||
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
|
||||
#define offsetofend(TYPE, MEMBER) \
|
||||
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
|
||||
|
||||
static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
|
||||
{
|
||||
int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;
|
||||
int mfd = memfd_create("buffer", mfd_flags);
|
||||
|
||||
if (mfd <= 0)
|
||||
return MAP_FAILED;
|
||||
if (ftruncate(mfd, length))
|
||||
return MAP_FAILED;
|
||||
*mfd_p = mfd;
|
||||
return mmap(0, length, prot, flags, mfd, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Have the kernel check the refcount on pages. I don't know why a freshly
|
||||
* mmap'd anon non-compound page starts out with a ref of 3
|
||||
|
@ -589,6 +605,47 @@ static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
|
|||
EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
|
||||
iova, length, NULL))
|
||||
|
||||
static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
|
||||
size_t start, size_t length, __u64 *iova,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct iommu_ioas_map_file cmd = {
|
||||
.size = sizeof(cmd),
|
||||
.flags = flags,
|
||||
.ioas_id = ioas_id,
|
||||
.fd = mfd,
|
||||
.start = start,
|
||||
.length = length,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
|
||||
cmd.iova = *iova;
|
||||
|
||||
ret = ioctl(fd, IOMMU_IOAS_MAP_FILE, &cmd);
|
||||
*iova = cmd.iova;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define test_ioctl_ioas_map_file(mfd, start, length, iova_p) \
|
||||
ASSERT_EQ(0, \
|
||||
_test_ioctl_ioas_map_file( \
|
||||
self->fd, self->ioas_id, mfd, start, length, iova_p, \
|
||||
IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
|
||||
|
||||
#define test_err_ioctl_ioas_map_file(_errno, mfd, start, length, iova_p) \
|
||||
EXPECT_ERRNO( \
|
||||
_errno, \
|
||||
_test_ioctl_ioas_map_file( \
|
||||
self->fd, self->ioas_id, mfd, start, length, iova_p, \
|
||||
IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
|
||||
|
||||
#define test_ioctl_ioas_map_id_file(ioas_id, mfd, start, length, iova_p) \
|
||||
ASSERT_EQ(0, \
|
||||
_test_ioctl_ioas_map_file( \
|
||||
self->fd, ioas_id, mfd, start, length, iova_p, \
|
||||
IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
|
||||
|
||||
static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
|
||||
{
|
||||
struct iommu_test_cmd memlimit_cmd = {
|
||||
|
|
Loading…
Add table
Reference in a new issue