2022-11-29 16:29:29 -04:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
|
|
|
|
*/
|
|
|
|
#ifndef __IOMMUFD_PRIVATE_H
|
|
|
|
#define __IOMMUFD_PRIVATE_H
|
|
|
|
|
2023-10-24 14:50:58 +01:00
|
|
|
#include <linux/iommu.h>
|
2024-11-05 12:04:17 -08:00
|
|
|
#include <linux/iommufd.h>
|
2023-10-24 14:50:58 +01:00
|
|
|
#include <linux/iova_bitmap.h>
|
2025-07-09 22:59:09 -07:00
|
|
|
#include <linux/maple_tree.h>
|
2024-08-02 17:32:02 -07:00
|
|
|
#include <linux/rwsem.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/xarray.h>
|
2023-10-24 14:50:57 +01:00
|
|
|
#include <uapi/linux/iommufd.h>
|
2024-08-02 17:32:02 -07:00
|
|
|
|
2024-07-02 14:34:41 +08:00
|
|
|
#include "../iommu-priv.h"
|
2022-11-29 16:29:29 -04:00
|
|
|
|
2022-11-29 16:29:33 -04:00
|
|
|
struct iommu_domain;
|
|
|
|
struct iommu_group;
|
2022-11-29 16:29:34 -04:00
|
|
|
struct iommu_option;
|
2023-03-01 15:30:22 -04:00
|
|
|
struct iommufd_device;
|
2022-11-29 16:29:33 -04:00
|
|
|
|
2025-02-19 17:31:41 -08:00
|
|
|
struct iommufd_sw_msi_map {
|
|
|
|
struct list_head sw_msi_item;
|
|
|
|
phys_addr_t sw_msi_start;
|
|
|
|
phys_addr_t msi_addr;
|
|
|
|
unsigned int pgoff;
|
|
|
|
unsigned int id;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Bitmap of struct iommufd_sw_msi_map::id */
|
|
|
|
struct iommufd_sw_msi_maps {
|
|
|
|
DECLARE_BITMAP(bitmap, 64);
|
|
|
|
};
|
|
|
|
|
2025-03-24 21:05:16 -07:00
|
|
|
#ifdef CONFIG_IRQ_MSI_IOMMU
|
|
|
|
int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_hwpt_paging *hwpt_paging,
|
|
|
|
struct iommufd_sw_msi_map *msi_map);
|
|
|
|
#endif
|
2025-02-19 17:31:41 -08:00
|
|
|
|
2022-11-29 16:29:29 -04:00
|
|
|
struct iommufd_ctx {
|
|
|
|
struct file *file;
|
|
|
|
struct xarray objects;
|
2023-07-17 15:11:58 -03:00
|
|
|
struct xarray groups;
|
2023-11-12 15:44:08 -04:00
|
|
|
wait_queue_head_t destroy_wait;
|
2024-11-13 11:51:35 -08:00
|
|
|
struct rw_semaphore ioas_creation_lock;
|
2025-07-09 22:59:09 -07:00
|
|
|
struct maple_tree mt_mmap;
|
2022-11-29 16:29:33 -04:00
|
|
|
|
2025-02-19 17:31:41 -08:00
|
|
|
struct mutex sw_msi_lock;
|
|
|
|
struct list_head sw_msi_list;
|
|
|
|
unsigned int sw_msi_id;
|
|
|
|
|
2022-11-29 16:29:33 -04:00
|
|
|
u8 account_mode;
|
2023-01-18 13:50:28 -04:00
|
|
|
/* Compatibility with VFIO no iommu */
|
|
|
|
u8 no_iommu_mode;
|
2022-11-29 16:29:38 -04:00
|
|
|
struct iommufd_ioas *vfio_ioas;
|
2022-11-29 16:29:29 -04:00
|
|
|
};
|
|
|
|
|
2025-07-09 22:59:09 -07:00
|
|
|
/* Entry for iommufd_ctx::mt_mmap */
|
|
|
|
struct iommufd_mmap {
|
|
|
|
struct iommufd_object *owner;
|
|
|
|
|
|
|
|
/* Page-shifted start position in mt_mmap to validate vma->vm_pgoff */
|
|
|
|
unsigned long vm_pgoff;
|
|
|
|
|
|
|
|
/* Physical range for io_remap_pfn_range() */
|
|
|
|
phys_addr_t mmio_addr;
|
|
|
|
size_t length;
|
|
|
|
};
|
|
|
|
|
2022-11-29 16:29:31 -04:00
|
|
|
/*
|
|
|
|
* The IOVA to PFN map. The map automatically copies the PFNs into multiple
|
|
|
|
* domains and permits sharing of PFNs between io_pagetable instances. This
|
|
|
|
* supports both a design where IOAS's are 1:1 with a domain (eg because the
|
|
|
|
* domain is HW customized), or where the IOAS is 1:N with multiple generic
|
|
|
|
* domains. The io_pagetable holds an interval tree of iopt_areas which point
|
|
|
|
* to shared iopt_pages which hold the pfns mapped to the page table.
|
|
|
|
*
|
|
|
|
* The locking order is domains_rwsem -> iova_rwsem -> pages::mutex
|
|
|
|
*/
|
|
|
|
struct io_pagetable {
|
|
|
|
struct rw_semaphore domains_rwsem;
|
|
|
|
struct xarray domains;
|
2022-11-29 16:29:33 -04:00
|
|
|
struct xarray access_list;
|
2022-11-29 16:29:31 -04:00
|
|
|
unsigned int next_domain_id;
|
|
|
|
|
|
|
|
struct rw_semaphore iova_rwsem;
|
|
|
|
struct rb_root_cached area_itree;
|
|
|
|
/* IOVA that cannot become reserved, struct iopt_allowed */
|
|
|
|
struct rb_root_cached allowed_itree;
|
|
|
|
/* IOVA that cannot be allocated, struct iopt_reserved */
|
|
|
|
struct rb_root_cached reserved_itree;
|
|
|
|
u8 disable_large_pages;
|
2022-11-29 16:29:33 -04:00
|
|
|
unsigned long iova_alignment;
|
|
|
|
};
|
|
|
|
|
|
|
|
void iopt_init_table(struct io_pagetable *iopt);
|
|
|
|
void iopt_destroy_table(struct io_pagetable *iopt);
|
|
|
|
int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
|
|
|
|
unsigned long length, struct list_head *pages_list);
|
|
|
|
void iopt_free_pages_list(struct list_head *pages_list);
|
|
|
|
enum {
|
|
|
|
IOPT_ALLOC_IOVA = 1 << 0,
|
2022-11-29 16:29:31 -04:00
|
|
|
};
|
2022-11-29 16:29:33 -04:00
|
|
|
int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
|
|
|
|
unsigned long *iova, void __user *uptr,
|
|
|
|
unsigned long length, int iommu_prot,
|
|
|
|
unsigned int flags);
|
2024-10-25 06:11:57 -07:00
|
|
|
int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
|
|
|
|
unsigned long *iova, struct file *file,
|
|
|
|
unsigned long start, unsigned long length,
|
|
|
|
int iommu_prot, unsigned int flags);
|
2022-11-29 16:29:33 -04:00
|
|
|
int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
|
|
|
|
unsigned long length, unsigned long *dst_iova,
|
|
|
|
int iommu_prot, unsigned int flags);
|
|
|
|
int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
|
|
|
|
unsigned long length, unsigned long *unmapped);
|
|
|
|
int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
|
|
|
|
|
2023-10-24 14:50:58 +01:00
|
|
|
int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt,
|
|
|
|
struct iommu_domain *domain,
|
|
|
|
unsigned long flags,
|
|
|
|
struct iommu_hwpt_get_dirty_bitmap *bitmap);
|
2023-10-24 14:50:57 +01:00
|
|
|
int iopt_set_dirty_tracking(struct io_pagetable *iopt,
|
|
|
|
struct iommu_domain *domain, bool enable);
|
|
|
|
|
2022-11-29 16:29:37 -04:00
|
|
|
void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
|
|
|
|
unsigned long length);
|
2022-11-29 16:29:33 -04:00
|
|
|
int iopt_table_add_domain(struct io_pagetable *iopt,
|
|
|
|
struct iommu_domain *domain);
|
|
|
|
void iopt_table_remove_domain(struct io_pagetable *iopt,
|
|
|
|
struct iommu_domain *domain);
|
2023-07-17 15:12:01 -03:00
|
|
|
int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
|
|
|
|
struct device *dev,
|
|
|
|
phys_addr_t *sw_msi_start);
|
2022-11-29 16:29:33 -04:00
|
|
|
int iopt_set_allow_iova(struct io_pagetable *iopt,
|
|
|
|
struct rb_root_cached *allowed_iova);
|
|
|
|
int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
|
|
|
|
unsigned long last, void *owner);
|
|
|
|
void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner);
|
|
|
|
int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas,
|
|
|
|
size_t num_iovas);
|
|
|
|
void iopt_enable_large_pages(struct io_pagetable *iopt);
|
|
|
|
int iopt_disable_large_pages(struct io_pagetable *iopt);
|
2022-11-29 16:29:31 -04:00
|
|
|
|
2022-11-29 16:29:29 -04:00
|
|
|
struct iommufd_ucmd {
|
|
|
|
struct iommufd_ctx *ictx;
|
|
|
|
void __user *ubuffer;
|
|
|
|
u32 user_size;
|
|
|
|
void *cmd;
|
2025-06-13 23:35:25 -07:00
|
|
|
struct iommufd_object *new_obj;
|
2022-11-29 16:29:29 -04:00
|
|
|
};
|
|
|
|
|
2022-11-29 16:29:38 -04:00
|
|
|
int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
|
|
|
|
unsigned long arg);
|
|
|
|
|
2022-11-29 16:29:29 -04:00
|
|
|
/* Copy the response in ucmd->cmd back to userspace. */
|
|
|
|
static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd,
|
|
|
|
size_t cmd_len)
|
|
|
|
{
|
|
|
|
if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
|
|
|
|
min_t(size_t, ucmd->user_size, cmd_len)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool iommufd_lock_obj(struct iommufd_object *obj)
|
|
|
|
{
|
2023-11-12 15:44:08 -04:00
|
|
|
if (!refcount_inc_not_zero(&obj->users))
|
2022-11-29 16:29:29 -04:00
|
|
|
return false;
|
2025-07-16 15:03:49 +08:00
|
|
|
if (!refcount_inc_not_zero(&obj->wait_cnt)) {
|
2023-11-12 15:44:08 -04:00
|
|
|
/*
|
|
|
|
* If the caller doesn't already have a ref on obj this must be
|
|
|
|
* called under the xa_lock. Otherwise the caller is holding a
|
|
|
|
* ref on users. Thus it cannot be one before this decrement.
|
|
|
|
*/
|
|
|
|
refcount_dec(&obj->users);
|
2022-11-29 16:29:29 -04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
|
|
|
|
enum iommufd_object_type type);
|
2023-11-12 14:50:13 -04:00
|
|
|
static inline void iommufd_put_object(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_object *obj)
|
2022-11-29 16:29:29 -04:00
|
|
|
{
|
2023-11-12 15:44:08 -04:00
|
|
|
/*
|
2025-07-16 15:03:49 +08:00
|
|
|
* Users first, then wait_cnt so that REMOVE_WAIT never sees a spurious
|
|
|
|
* !0 users with a 0 wait_cnt.
|
2023-11-12 15:44:08 -04:00
|
|
|
*/
|
2022-11-29 16:29:29 -04:00
|
|
|
refcount_dec(&obj->users);
|
2025-07-16 15:03:49 +08:00
|
|
|
if (refcount_dec_and_test(&obj->wait_cnt))
|
2023-11-12 15:44:08 -04:00
|
|
|
wake_up_interruptible_all(&ictx->destroy_wait);
|
2022-11-29 16:29:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
|
|
|
|
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_object *obj);
|
|
|
|
void iommufd_object_finalize(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_object *obj);
|
2023-11-12 15:44:08 -04:00
|
|
|
|
|
|
|
enum {
|
2025-07-16 15:03:49 +08:00
|
|
|
REMOVE_WAIT = BIT(0),
|
2025-07-16 15:03:43 +08:00
|
|
|
REMOVE_OBJ_TOMBSTONE = BIT(1),
|
2023-11-12 15:44:08 -04:00
|
|
|
};
|
|
|
|
int iommufd_object_remove(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_object *to_destroy, u32 id,
|
|
|
|
unsigned int flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The caller holds a users refcount and wants to destroy the object. At this
|
2025-07-16 15:03:49 +08:00
|
|
|
* point the caller has no wait_cnt reference and at least the xarray will be
|
|
|
|
* holding one.
|
2023-11-12 15:44:08 -04:00
|
|
|
*/
|
2023-07-25 16:05:49 -03:00
|
|
|
static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_object *obj)
|
|
|
|
{
|
2023-11-12 15:44:08 -04:00
|
|
|
int ret;
|
|
|
|
|
2025-07-16 15:03:49 +08:00
|
|
|
ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT);
|
2023-11-12 15:44:08 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is a bug and we couldn't destroy the object then we did put
|
|
|
|
* back the caller's users refcount and will eventually try to free it
|
2025-07-16 15:03:43 +08:00
|
|
|
* again during close.
|
|
|
|
*/
|
|
|
|
WARN_ON(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Similar to iommufd_object_destroy_user(), except that the object ID is left
|
|
|
|
* reserved/tombstoned.
|
|
|
|
*/
|
|
|
|
static inline void iommufd_object_tombstone_user(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_object *obj)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = iommufd_object_remove(ictx, obj, obj->id,
|
2025-07-16 15:03:49 +08:00
|
|
|
REMOVE_WAIT | REMOVE_OBJ_TOMBSTONE);
|
2025-07-16 15:03:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is a bug and we couldn't destroy the object then we did put
|
|
|
|
* back the caller's users refcount and will eventually try to free it
|
2023-11-12 15:44:08 -04:00
|
|
|
* again during close.
|
|
|
|
*/
|
|
|
|
WARN_ON(ret);
|
2023-07-25 16:05:49 -03:00
|
|
|
}
|
2023-11-12 15:44:08 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The HWPT allocated by autodomains is used in possibly many devices and
|
|
|
|
* is automatically destroyed when its refcount reaches zero.
|
|
|
|
*
|
|
|
|
* If userspace uses the HWPT manually, even for a short term, then it will
|
|
|
|
* disrupt this refcounting and the auto-free in the kernel will not work.
|
|
|
|
* Userspace that tries to use the automatically allocated HWPT must be careful
|
|
|
|
* to ensure that it is consistently destroyed, eg by not racing accesses
|
|
|
|
* and by not attaching an automatic HWPT to a device manually.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_object *obj)
|
2023-07-25 16:05:49 -03:00
|
|
|
{
|
2023-11-12 15:44:08 -04:00
|
|
|
iommufd_object_remove(ictx, obj, obj->id, 0);
|
2023-07-25 16:05:49 -03:00
|
|
|
}
|
|
|
|
|
2025-06-13 23:35:25 -07:00
|
|
|
/*
|
|
|
|
* Callers of these normal object allocators must call iommufd_object_finalize()
|
|
|
|
* to finalize the object, or call iommufd_object_abort_and_destroy() to revert
|
|
|
|
* the allocation.
|
|
|
|
*/
|
2025-06-13 23:35:24 -07:00
|
|
|
struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
|
|
|
|
size_t size,
|
|
|
|
enum iommufd_object_type type);
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
#define __iommufd_object_alloc(ictx, ptr, type, obj) \
|
2022-11-29 16:29:29 -04:00
|
|
|
container_of(_iommufd_object_alloc( \
|
|
|
|
ictx, \
|
|
|
|
sizeof(*(ptr)) + BUILD_BUG_ON_ZERO( \
|
|
|
|
offsetof(typeof(*(ptr)), \
|
|
|
|
obj) != 0), \
|
|
|
|
type), \
|
|
|
|
typeof(*(ptr)), obj)
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
#define iommufd_object_alloc(ictx, ptr, type) \
|
|
|
|
__iommufd_object_alloc(ictx, ptr, type, obj)
|
|
|
|
|
2025-06-13 23:35:25 -07:00
|
|
|
/*
|
|
|
|
* Callers of these _ucmd allocators should not call iommufd_object_finalize()
|
|
|
|
* or iommufd_object_abort_and_destroy(), as the core automatically does that.
|
|
|
|
*/
|
|
|
|
struct iommufd_object *
|
|
|
|
_iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd, size_t size,
|
|
|
|
enum iommufd_object_type type);
|
|
|
|
|
|
|
|
#define __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj) \
|
|
|
|
container_of(_iommufd_object_alloc_ucmd( \
|
|
|
|
ucmd, \
|
|
|
|
sizeof(*(ptr)) + BUILD_BUG_ON_ZERO( \
|
|
|
|
offsetof(typeof(*(ptr)), \
|
|
|
|
obj) != 0), \
|
|
|
|
type), \
|
|
|
|
typeof(*(ptr)), obj)
|
|
|
|
|
|
|
|
#define iommufd_object_alloc_ucmd(ucmd, ptr, type) \
|
|
|
|
__iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)
|
|
|
|
|
2022-11-29 16:29:34 -04:00
|
|
|
/*
|
|
|
|
* The IO Address Space (IOAS) pagetable is a virtual page table backed by the
|
|
|
|
* io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The
|
|
|
|
* mapping is copied into all of the associated domains and made available to
|
|
|
|
* in-kernel users.
|
2022-11-29 16:29:35 -04:00
|
|
|
*
|
|
|
|
* Every iommu_domain that is created is wrapped in a iommufd_hw_pagetable
|
|
|
|
* object. When we go to attach a device to an IOAS we need to get an
|
|
|
|
* iommu_domain and wrapping iommufd_hw_pagetable for it.
|
|
|
|
*
|
|
|
|
* An iommu_domain & iommfd_hw_pagetable will be automatically selected
|
|
|
|
* for a device based on the hwpt_list. If no suitable iommu_domain
|
|
|
|
* is found a new iommu_domain will be created.
|
2022-11-29 16:29:34 -04:00
|
|
|
*/
|
|
|
|
struct iommufd_ioas {
|
|
|
|
struct iommufd_object obj;
|
|
|
|
struct io_pagetable iopt;
|
2022-11-29 16:29:35 -04:00
|
|
|
struct mutex mutex;
|
|
|
|
struct list_head hwpt_list;
|
2022-11-29 16:29:34 -04:00
|
|
|
};
|
|
|
|
|
2023-03-27 02:33:46 -07:00
|
|
|
static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx,
|
2022-11-29 16:29:34 -04:00
|
|
|
u32 id)
|
|
|
|
{
|
2025-06-13 23:35:13 -07:00
|
|
|
return container_of(iommufd_get_object(ictx, id, IOMMUFD_OBJ_IOAS),
|
2022-11-29 16:29:34 -04:00
|
|
|
struct iommufd_ioas, obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx);
|
|
|
|
int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd);
|
|
|
|
void iommufd_ioas_destroy(struct iommufd_object *obj);
|
|
|
|
int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd);
|
|
|
|
int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd);
|
|
|
|
int iommufd_ioas_map(struct iommufd_ucmd *ucmd);
|
2024-10-25 06:11:57 -07:00
|
|
|
int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd);
|
2024-11-13 11:51:36 -08:00
|
|
|
int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd);
|
2022-11-29 16:29:34 -04:00
|
|
|
int iommufd_ioas_copy(struct iommufd_ucmd *ucmd);
|
|
|
|
int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd);
|
|
|
|
int iommufd_ioas_option(struct iommufd_ucmd *ucmd);
|
|
|
|
int iommufd_option_rlimit_mode(struct iommu_option *cmd,
|
|
|
|
struct iommufd_ctx *ictx);
|
|
|
|
|
2022-11-29 16:29:38 -04:00
|
|
|
int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd);
|
2023-10-24 14:50:58 +01:00
|
|
|
int iommufd_check_iova_range(struct io_pagetable *iopt,
|
|
|
|
struct iommu_hwpt_get_dirty_bitmap *bitmap);
|
2022-11-29 16:29:38 -04:00
|
|
|
|
2022-11-29 16:29:35 -04:00
|
|
|
/*
|
|
|
|
* A HW pagetable is called an iommu_domain inside the kernel. This user object
|
|
|
|
* allows directly creating and inspecting the domains. Domains that have kernel
|
|
|
|
* owned page tables will be associated with an iommufd_ioas that provides the
|
|
|
|
* IOVA to PFN map.
|
|
|
|
*/
|
|
|
|
struct iommufd_hw_pagetable {
|
|
|
|
struct iommufd_object obj;
|
|
|
|
struct iommu_domain *domain;
|
2024-07-02 14:34:41 +08:00
|
|
|
struct iommufd_fault *fault;
|
2025-03-21 10:19:32 -07:00
|
|
|
bool pasid_compat : 1;
|
2023-10-25 21:39:32 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct iommufd_hwpt_paging {
|
|
|
|
struct iommufd_hw_pagetable common;
|
|
|
|
struct iommufd_ioas *ioas;
|
2022-11-29 16:29:35 -04:00
|
|
|
bool auto_domain : 1;
|
2022-11-29 16:29:36 -04:00
|
|
|
bool enforce_cache_coherency : 1;
|
2023-10-25 21:39:35 -07:00
|
|
|
bool nest_parent : 1;
|
2022-11-29 16:29:35 -04:00
|
|
|
/* Head at iommufd_ioas::hwpt_list */
|
|
|
|
struct list_head hwpt_item;
|
2025-02-19 17:31:41 -08:00
|
|
|
struct iommufd_sw_msi_maps present_sw_msi;
|
2022-11-29 16:29:35 -04:00
|
|
|
};
|
|
|
|
|
2023-10-25 21:39:35 -07:00
|
|
|
struct iommufd_hwpt_nested {
|
|
|
|
struct iommufd_hw_pagetable common;
|
|
|
|
struct iommufd_hwpt_paging *parent;
|
2024-11-05 12:04:23 -08:00
|
|
|
struct iommufd_viommu *viommu;
|
2023-10-25 21:39:35 -07:00
|
|
|
};
|
|
|
|
|
2023-10-25 21:39:31 -07:00
|
|
|
static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt)
|
|
|
|
{
|
|
|
|
return hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING;
|
|
|
|
}
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
static inline struct iommufd_hwpt_paging *
|
|
|
|
to_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
|
|
|
|
{
|
|
|
|
return container_of(hwpt, struct iommufd_hwpt_paging, common);
|
|
|
|
}
|
|
|
|
|
iommufd/device: Enforce reserved IOVA also when attached to hwpt_nested
Currently, device reserved regions are only enforced when the device is
attached to an hwpt_paging. In other words, if the device gets attached to
an hwpt_nested directly, the parent hwpt_paging of the hwpt_nested's would
not enforce those reserved IOVAs. This works for most of reserved region
types, but not for IOMMU_RESV_SW_MSI, which is a unique software defined
window, required by a nesting case too to setup an MSI doorbell on the
parent stage-2 hwpt/domain.
Kevin pointed out in 1 that:
1) there is no usage using up closely the entire IOVA space yet,
2) guest may change the viommu mode to switch between nested and paging
then VMM has to take all devices' reserved regions into consideration
anyway, when composing the GPA space.
So it would be actually convenient for us to also enforce reserved IOVA
onto the parent hwpt_paging, when attaching a device to an hwpt_nested.
Repurpose the existing attach/replace_paging helpers to attach device's
reserved IOVAs exclusively.
Add a new find_hwpt_paging helper, which is only used by these reserved
IOVA functions, to allow an IOMMUFD_OBJ_HWPT_NESTED hwpt to redirect to
its parent hwpt_paging. Return a NULL in these two helpers for any new
HWPT type in the future.
Link: https://patch.msgid.link/r/20240807003446.3740368-1-nicolinc@nvidia.com
Link: https://lore.kernel.org/all/BN9PR11MB5276497781C96415272E6FED8CB12@BN9PR11MB5276.namprd11.prod.outlook.com/ #1
Suggested-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2024-08-06 17:34:46 -07:00
|
|
|
static inline struct iommufd_hwpt_nested *
|
|
|
|
to_hwpt_nested(struct iommufd_hw_pagetable *hwpt)
|
|
|
|
{
|
|
|
|
return container_of(hwpt, struct iommufd_hwpt_nested, common);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iommufd_hwpt_paging *
|
|
|
|
find_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
|
|
|
|
{
|
|
|
|
switch (hwpt->obj.type) {
|
|
|
|
case IOMMUFD_OBJ_HWPT_PAGING:
|
|
|
|
return to_hwpt_paging(hwpt);
|
|
|
|
case IOMMUFD_OBJ_HWPT_NESTED:
|
|
|
|
return to_hwpt_nested(hwpt)->parent;
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
static inline struct iommufd_hwpt_paging *
|
|
|
|
iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
|
2023-10-24 14:50:57 +01:00
|
|
|
{
|
|
|
|
return container_of(iommufd_get_object(ucmd->ictx, id,
|
2023-10-25 21:39:30 -07:00
|
|
|
IOMMUFD_OBJ_HWPT_PAGING),
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommufd_hwpt_paging, common.obj);
|
2023-10-24 14:50:57 +01:00
|
|
|
}
|
2024-01-10 20:10:09 -08:00
|
|
|
|
|
|
|
static inline struct iommufd_hw_pagetable *
|
|
|
|
iommufd_get_hwpt_nested(struct iommufd_ucmd *ucmd, u32 id)
|
|
|
|
{
|
|
|
|
return container_of(iommufd_get_object(ucmd->ictx, id,
|
|
|
|
IOMMUFD_OBJ_HWPT_NESTED),
|
|
|
|
struct iommufd_hw_pagetable, obj);
|
|
|
|
}
|
|
|
|
|
2023-10-24 14:50:57 +01:00
|
|
|
int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd);
|
2023-10-24 14:50:58 +01:00
|
|
|
int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommufd_hwpt_paging *
|
|
|
|
iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
|
2025-03-21 10:19:25 -07:00
|
|
|
struct iommufd_device *idev, ioasid_t pasid,
|
|
|
|
u32 flags, bool immediate_attach,
|
2023-10-25 21:39:35 -07:00
|
|
|
const struct iommu_user_data *user_data);
|
2023-03-01 15:30:23 -04:00
|
|
|
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
|
2025-03-21 10:19:25 -07:00
|
|
|
struct iommufd_device *idev, ioasid_t pasid);
|
2023-07-17 15:11:59 -03:00
|
|
|
struct iommufd_hw_pagetable *
|
2025-03-21 10:19:25 -07:00
|
|
|
iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid);
|
2023-10-25 21:39:32 -07:00
|
|
|
void iommufd_hwpt_paging_destroy(struct iommufd_object *obj);
|
|
|
|
void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
|
2023-10-25 21:39:35 -07:00
|
|
|
void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
|
|
|
|
void iommufd_hwpt_nested_abort(struct iommufd_object *obj);
|
2023-07-17 15:12:13 -03:00
|
|
|
int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
|
2024-01-10 20:10:09 -08:00
|
|
|
int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
|
2022-11-29 16:29:35 -04:00
|
|
|
|
2023-07-17 15:12:04 -03:00
|
|
|
static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_hw_pagetable *hwpt)
|
|
|
|
{
|
2023-10-25 21:39:35 -07:00
|
|
|
if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
|
|
|
|
struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
|
2023-10-25 21:39:32 -07:00
|
|
|
|
2023-10-25 21:39:35 -07:00
|
|
|
lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
|
|
|
|
|
|
|
|
if (hwpt_paging->auto_domain) {
|
2023-11-12 15:44:08 -04:00
|
|
|
iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
|
2023-10-25 21:39:35 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
refcount_dec(&hwpt->obj.users);
|
2023-07-17 15:12:04 -03:00
|
|
|
}
|
|
|
|
|
2025-03-21 10:19:29 -07:00
|
|
|
struct iommufd_attach;
|
|
|
|
|
2023-07-17 15:11:58 -03:00
|
|
|
struct iommufd_group {
|
|
|
|
struct kref ref;
|
2023-07-17 15:11:59 -03:00
|
|
|
struct mutex lock;
|
2023-07-17 15:11:58 -03:00
|
|
|
struct iommufd_ctx *ictx;
|
|
|
|
struct iommu_group *group;
|
2025-03-21 10:19:31 -07:00
|
|
|
struct xarray pasid_attach;
|
2025-02-19 17:31:41 -08:00
|
|
|
struct iommufd_sw_msi_maps required_sw_msi;
|
2023-07-17 15:12:03 -03:00
|
|
|
phys_addr_t sw_msi_start;
|
2023-07-17 15:11:58 -03:00
|
|
|
};
|
|
|
|
|
2023-03-01 15:30:22 -04:00
|
|
|
/*
|
|
|
|
* A iommufd_device object represents the binding relationship between a
|
|
|
|
* consuming driver and the iommufd. These objects are created/destroyed by
|
|
|
|
* external drivers, not by userspace.
|
|
|
|
*/
|
|
|
|
struct iommufd_device {
|
|
|
|
struct iommufd_object obj;
|
|
|
|
struct iommufd_ctx *ictx;
|
2023-07-17 15:11:58 -03:00
|
|
|
struct iommufd_group *igroup;
|
2023-07-17 15:11:59 -03:00
|
|
|
struct list_head group_item;
|
2023-03-01 15:30:22 -04:00
|
|
|
/* always the physical device */
|
|
|
|
struct device *dev;
|
|
|
|
bool enforce_cache_coherency;
|
2025-07-16 15:03:45 +08:00
|
|
|
struct iommufd_vdevice *vdev;
|
|
|
|
bool destroying;
|
2023-03-01 15:30:22 -04:00
|
|
|
};
|
|
|
|
|
2023-07-17 15:12:13 -03:00
|
|
|
static inline struct iommufd_device *
|
|
|
|
iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
|
|
|
|
{
|
|
|
|
return container_of(iommufd_get_object(ucmd->ictx, id,
|
|
|
|
IOMMUFD_OBJ_DEVICE),
|
|
|
|
struct iommufd_device, obj);
|
|
|
|
}
|
|
|
|
|
2025-07-16 15:03:45 +08:00
|
|
|
void iommufd_device_pre_destroy(struct iommufd_object *obj);
|
2022-11-29 16:29:36 -04:00
|
|
|
void iommufd_device_destroy(struct iommufd_object *obj);
|
2023-08-18 03:10:31 -07:00
|
|
|
int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
|
2022-11-29 16:29:36 -04:00
|
|
|
|
2022-11-29 16:29:33 -04:00
|
|
|
struct iommufd_access {
|
2022-11-29 16:29:37 -04:00
|
|
|
struct iommufd_object obj;
|
|
|
|
struct iommufd_ctx *ictx;
|
|
|
|
struct iommufd_ioas *ioas;
|
iommufd/device: Add iommufd_access_detach() API
Previously, the detach routine is only done by the destroy(). And it was
called by vfio_iommufd_emulated_unbind() when the device runs close(), so
all the mappings in iopt were cleaned in that setup, when the call trace
reaches this detach() routine.
Now, there's a need of a detach uAPI, meaning that it does not only need
a new iommufd_access_detach() API, but also requires access->ops->unmap()
call as a cleanup. So add one.
However, leaving that unprotected can introduce some potential of a race
condition during the pin_/unpin_pages() call, where access->ioas->iopt is
getting referenced. So, add an ioas_lock to protect the context of iopt
referencings.
Also, to allow the iommufd_access_unpin_pages() callback to happen via
this unmap() call, add an ioas_unpin pointer, so the unpin routine won't
be affected by the "access->ioas = NULL" trick.
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Terrence Xu <terrence.xu@intel.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com>
Tested-by: Yanting Jiang <yanting.jiang@intel.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Tested-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Link: https://lore.kernel.org/r/20230718135551.6592-15-yi.l.liu@intel.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
2023-07-18 06:55:39 -07:00
|
|
|
struct iommufd_ioas *ioas_unpin;
|
|
|
|
struct mutex ioas_lock;
|
2022-11-29 16:29:37 -04:00
|
|
|
const struct iommufd_access_ops *ops;
|
|
|
|
void *data;
|
2022-11-29 16:29:33 -04:00
|
|
|
unsigned long iova_alignment;
|
|
|
|
u32 iopt_access_list_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
|
|
|
|
void iopt_remove_access(struct io_pagetable *iopt,
|
2025-06-13 23:35:13 -07:00
|
|
|
struct iommufd_access *access, u32 iopt_access_list_id);
|
2022-11-29 16:29:37 -04:00
|
|
|
void iommufd_access_destroy_object(struct iommufd_object *obj);
|
|
|
|
|
2025-07-09 22:59:02 -07:00
|
|
|
/* iommufd_access for internal use */
|
|
|
|
static inline bool iommufd_access_is_internal(struct iommufd_access *access)
|
|
|
|
{
|
|
|
|
return !access->ictx;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx);
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
iommufd_access_destroy_internal(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_access *access)
|
|
|
|
{
|
|
|
|
iommufd_object_destroy_user(ictx, &access->obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
int iommufd_access_attach_internal(struct iommufd_access *access,
|
|
|
|
struct iommufd_ioas *ioas);
|
|
|
|
|
|
|
|
static inline void iommufd_access_detach_internal(struct iommufd_access *access)
|
|
|
|
{
|
|
|
|
iommufd_access_detach(access);
|
|
|
|
}
|
|
|
|
|
2025-03-11 12:44:21 -07:00
|
|
|
struct iommufd_eventq {
|
2024-07-02 14:34:40 +08:00
|
|
|
struct iommufd_object obj;
|
|
|
|
struct iommufd_ctx *ictx;
|
|
|
|
struct file *filep;
|
|
|
|
|
iommufd/fault: Use a separate spinlock to protect fault->deliver list
The fault->mutex serializes the fault read()/write() fops and the
iommufd_fault_auto_response_faults(), mainly for fault->response. Also, it
was conveniently used to fence the fault->deliver in poll() fop and
iommufd_fault_iopf_handler().
However, copy_from/to_user() may sleep if pagefaults are enabled. Thus,
they could take a long time to wait for user pages to swap in, blocking
iommufd_fault_iopf_handler() and its caller that is typically a shared IRQ
handler of an IOMMU driver, resulting in a potential global DOS.
Instead of reusing the mutex to protect the fault->deliver list, add a
separate spinlock, nested under the mutex, to do the job.
iommufd_fault_iopf_handler() would no longer be blocked by
copy_from/to_user().
Add a free_list in iommufd_auto_response_faults(), so the spinlock can
simply fence a fast list_for_each_entry_safe routine.
Provide two deliver list helpers for iommufd_fault_fops_read() to use:
- Fetch the first iopf_group out of the fault->deliver list
- Restore an iopf_group back to the head of the fault->deliver list
Lastly, move the mutex closer to the response in the fault structure,
and update its kdoc accordingly.
Fixes: 07838f7fd529 ("iommufd: Add iommufd fault object")
Link: https://patch.msgid.link/r/20250117192901.79491-1-nicolinc@nvidia.com
Cc: stable@vger.kernel.org
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2025-01-17 11:29:01 -08:00
|
|
|
spinlock_t lock; /* protects the deliver list */
|
2024-07-02 14:34:40 +08:00
|
|
|
struct list_head deliver;
|
|
|
|
|
|
|
|
struct wait_queue_head wait_queue;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct iommufd_attach_handle {
|
|
|
|
struct iommu_attach_handle handle;
|
|
|
|
struct iommufd_device *idev;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Convert an iommu attach handle to iommufd handle. */
|
|
|
|
#define to_iommufd_handle(hdl) container_of(hdl, struct iommufd_attach_handle, handle)
|
|
|
|
|
2025-03-11 12:44:21 -07:00
|
|
|
/*
|
|
|
|
* An iommufd_fault object represents an interface to deliver I/O page faults
|
|
|
|
* to the user space. These objects are created/destroyed by the user space and
|
|
|
|
* associated with hardware page table objects during page-table allocation.
|
|
|
|
*/
|
|
|
|
struct iommufd_fault {
|
|
|
|
struct iommufd_eventq common;
|
|
|
|
struct mutex mutex; /* serializes response flows */
|
|
|
|
struct xarray response;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct iommufd_fault *
|
|
|
|
eventq_to_fault(struct iommufd_eventq *eventq)
|
|
|
|
{
|
|
|
|
return container_of(eventq, struct iommufd_fault, common);
|
|
|
|
}
|
|
|
|
|
2024-07-02 14:34:42 +08:00
|
|
|
static inline struct iommufd_fault *
|
|
|
|
iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
|
|
|
|
{
|
|
|
|
return container_of(iommufd_get_object(ucmd->ictx, id,
|
|
|
|
IOMMUFD_OBJ_FAULT),
|
2025-03-11 12:44:21 -07:00
|
|
|
struct iommufd_fault, common.obj);
|
2024-07-02 14:34:42 +08:00
|
|
|
}
|
|
|
|
|
2024-07-02 14:34:40 +08:00
|
|
|
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
|
|
|
|
void iommufd_fault_destroy(struct iommufd_object *obj);
|
2024-07-02 14:34:42 +08:00
|
|
|
int iommufd_fault_iopf_handler(struct iopf_group *group);
|
2025-02-03 21:00:54 -08:00
|
|
|
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
|
|
|
|
struct iommufd_attach_handle *handle);
|
2024-07-02 14:34:41 +08:00
|
|
|
|
2025-03-11 12:44:23 -07:00
|
|
|
/* An iommufd_vevent represents a vIOMMU event in an iommufd_veventq */
|
|
|
|
struct iommufd_vevent {
|
|
|
|
struct iommufd_vevent_header header;
|
|
|
|
struct list_head node; /* for iommufd_eventq::deliver */
|
|
|
|
ssize_t data_len;
|
|
|
|
u64 event_data[] __counted_by(data_len);
|
|
|
|
};
|
|
|
|
|
|
|
|
#define vevent_for_lost_events_header(vevent) \
|
|
|
|
(vevent->header.flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* An iommufd_veventq object represents an interface to deliver vIOMMU events to
|
|
|
|
* the user space. It is created/destroyed by the user space and associated with
|
|
|
|
* a vIOMMU object during the allocations.
|
|
|
|
*/
|
|
|
|
struct iommufd_veventq {
|
|
|
|
struct iommufd_eventq common;
|
|
|
|
struct iommufd_viommu *viommu;
|
|
|
|
struct list_head node; /* for iommufd_viommu::veventqs */
|
|
|
|
struct iommufd_vevent lost_events_header;
|
|
|
|
|
2025-06-13 23:35:16 -07:00
|
|
|
enum iommu_veventq_type type;
|
2025-03-11 12:44:23 -07:00
|
|
|
unsigned int depth;
|
|
|
|
|
|
|
|
/* Use common.lock for protection */
|
|
|
|
u32 num_events;
|
|
|
|
u32 sequence;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct iommufd_veventq *
|
|
|
|
eventq_to_veventq(struct iommufd_eventq *eventq)
|
|
|
|
{
|
|
|
|
return container_of(eventq, struct iommufd_veventq, common);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct iommufd_veventq *
|
|
|
|
iommufd_get_veventq(struct iommufd_ucmd *ucmd, u32 id)
|
|
|
|
{
|
|
|
|
return container_of(iommufd_get_object(ucmd->ictx, id,
|
|
|
|
IOMMUFD_OBJ_VEVENTQ),
|
|
|
|
struct iommufd_veventq, common.obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd);
|
|
|
|
void iommufd_veventq_destroy(struct iommufd_object *obj);
|
|
|
|
void iommufd_veventq_abort(struct iommufd_object *obj);
|
|
|
|
|
|
|
|
static inline void iommufd_vevent_handler(struct iommufd_veventq *veventq,
|
|
|
|
struct iommufd_vevent *vevent)
|
|
|
|
{
|
|
|
|
struct iommufd_eventq *eventq = &veventq->common;
|
|
|
|
|
|
|
|
lockdep_assert_held(&eventq->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the lost_events_header and add the new node at the same time.
|
|
|
|
* Note the new node can be lost_events_header, for a sequence update.
|
|
|
|
*/
|
|
|
|
if (list_is_last(&veventq->lost_events_header.node, &eventq->deliver))
|
|
|
|
list_del(&veventq->lost_events_header.node);
|
|
|
|
list_add_tail(&vevent->node, &eventq->deliver);
|
|
|
|
vevent->header.sequence = veventq->sequence;
|
|
|
|
veventq->sequence = (veventq->sequence + 1) & INT_MAX;
|
|
|
|
|
|
|
|
wake_up_interruptible(&eventq->wait_queue);
|
|
|
|
}
|
|
|
|
|
2024-11-05 12:05:09 -08:00
|
|
|
static inline struct iommufd_viommu *
|
|
|
|
iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
|
|
|
|
{
|
|
|
|
return container_of(iommufd_get_object(ucmd->ictx, id,
|
|
|
|
IOMMUFD_OBJ_VIOMMU),
|
|
|
|
struct iommufd_viommu, obj);
|
|
|
|
}
|
|
|
|
|
2025-03-11 12:44:23 -07:00
|
|
|
static inline struct iommufd_veventq *
|
2025-06-13 23:35:16 -07:00
|
|
|
iommufd_viommu_find_veventq(struct iommufd_viommu *viommu,
|
|
|
|
enum iommu_veventq_type type)
|
2025-03-11 12:44:23 -07:00
|
|
|
{
|
|
|
|
struct iommufd_veventq *veventq, *next;
|
|
|
|
|
|
|
|
lockdep_assert_held(&viommu->veventqs_rwsem);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) {
|
|
|
|
if (veventq->type == type)
|
|
|
|
return veventq;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-11-05 12:04:21 -08:00
|
|
|
int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
|
|
|
|
void iommufd_viommu_destroy(struct iommufd_object *obj);
|
2024-11-05 12:05:09 -08:00
|
|
|
int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
|
|
|
|
void iommufd_vdevice_destroy(struct iommufd_object *obj);
|
2025-07-16 15:03:45 +08:00
|
|
|
void iommufd_vdevice_abort(struct iommufd_object *obj);
|
2025-07-09 22:59:06 -07:00
|
|
|
int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd);
|
|
|
|
void iommufd_hw_queue_destroy(struct iommufd_object *obj);
|
2024-11-05 12:05:09 -08:00
|
|
|
|
2025-07-16 15:03:45 +08:00
|
|
|
static inline struct iommufd_vdevice *
|
|
|
|
iommufd_get_vdevice(struct iommufd_ctx *ictx, u32 id)
|
|
|
|
{
|
|
|
|
return container_of(iommufd_get_object(ictx, id,
|
|
|
|
IOMMUFD_OBJ_VDEVICE),
|
|
|
|
struct iommufd_vdevice, obj);
|
|
|
|
}
|
|
|
|
|
2022-11-29 16:29:39 -04:00
|
|
|
#ifdef CONFIG_IOMMUFD_TEST
|
|
|
|
int iommufd_test(struct iommufd_ucmd *ucmd);
|
|
|
|
void iommufd_selftest_destroy(struct iommufd_object *obj);
|
|
|
|
extern size_t iommufd_test_memory_limit;
|
|
|
|
void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
|
|
|
|
unsigned int ioas_id, u64 *iova, u32 *flags);
|
|
|
|
bool iommufd_should_fail(void);
|
2023-08-02 21:08:02 -03:00
|
|
|
int __init iommufd_test_init(void);
|
2022-11-29 16:29:39 -04:00
|
|
|
void iommufd_test_exit(void);
|
2023-03-01 15:30:28 -04:00
|
|
|
bool iommufd_selftest_is_mock_dev(struct device *dev);
|
2022-11-29 16:29:39 -04:00
|
|
|
#else
|
|
|
|
static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
|
|
|
|
unsigned int ioas_id,
|
|
|
|
u64 *iova, u32 *flags)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
static inline bool iommufd_should_fail(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2023-08-02 21:08:02 -03:00
|
|
|
static inline int __init iommufd_test_init(void)
|
2022-11-29 16:29:39 -04:00
|
|
|
{
|
2023-08-02 21:08:02 -03:00
|
|
|
return 0;
|
2022-11-29 16:29:39 -04:00
|
|
|
}
|
|
|
|
static inline void iommufd_test_exit(void)
|
|
|
|
{
|
|
|
|
}
|
2023-03-01 15:30:28 -04:00
|
|
|
static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2022-11-29 16:29:39 -04:00
|
|
|
#endif
|
2022-11-29 16:29:29 -04:00
|
|
|
#endif
|