2022-11-29 16:29:35 -04:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
|
|
|
|
*/
|
|
|
|
#include <linux/iommu.h>
|
2023-07-17 15:12:13 -03:00
|
|
|
#include <uapi/linux/iommufd.h>
|
2022-11-29 16:29:35 -04:00
|
|
|
|
2023-09-28 00:15:24 -07:00
|
|
|
#include "../iommu-priv.h"
|
2022-11-29 16:29:35 -04:00
|
|
|
#include "iommufd_private.h"
|
|
|
|
|
2024-07-02 14:34:42 +08:00
|
|
|
static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt)
|
|
|
|
{
|
|
|
|
if (hwpt->domain)
|
|
|
|
iommu_domain_free(hwpt->domain);
|
|
|
|
|
|
|
|
if (hwpt->fault)
|
2025-03-11 12:44:21 -07:00
|
|
|
refcount_dec(&hwpt->fault->common.obj.users);
|
2024-07-02 14:34:42 +08:00
|
|
|
}
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
|
2022-11-29 16:29:35 -04:00
|
|
|
{
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommufd_hwpt_paging *hwpt_paging =
|
|
|
|
container_of(obj, struct iommufd_hwpt_paging, common.obj);
|
2022-11-29 16:29:35 -04:00
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
if (!list_empty(&hwpt_paging->hwpt_item)) {
|
|
|
|
mutex_lock(&hwpt_paging->ioas->mutex);
|
|
|
|
list_del(&hwpt_paging->hwpt_item);
|
|
|
|
mutex_unlock(&hwpt_paging->ioas->mutex);
|
2023-03-01 15:30:21 -04:00
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
|
|
|
|
hwpt_paging->common.domain);
|
2023-03-01 15:30:21 -04:00
|
|
|
}
|
|
|
|
|
2024-07-02 14:34:42 +08:00
|
|
|
__iommufd_hwpt_destroy(&hwpt_paging->common);
|
2023-10-25 21:39:32 -07:00
|
|
|
refcount_dec(&hwpt_paging->ioas->obj.users);
|
2022-11-29 16:29:35 -04:00
|
|
|
}
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
void iommufd_hwpt_paging_abort(struct iommufd_object *obj)
|
2023-07-17 15:12:06 -03:00
|
|
|
{
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommufd_hwpt_paging *hwpt_paging =
|
|
|
|
container_of(obj, struct iommufd_hwpt_paging, common.obj);
|
2023-07-17 15:12:06 -03:00
|
|
|
|
|
|
|
/* The ioas->mutex must be held until finalize is called. */
|
2023-10-25 21:39:32 -07:00
|
|
|
lockdep_assert_held(&hwpt_paging->ioas->mutex);
|
2023-07-17 15:12:06 -03:00
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
if (!list_empty(&hwpt_paging->hwpt_item)) {
|
|
|
|
list_del_init(&hwpt_paging->hwpt_item);
|
|
|
|
iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
|
|
|
|
hwpt_paging->common.domain);
|
2023-07-17 15:12:06 -03:00
|
|
|
}
|
2023-10-25 21:39:32 -07:00
|
|
|
iommufd_hwpt_paging_destroy(obj);
|
2023-07-17 15:12:06 -03:00
|
|
|
}
|
|
|
|
|
2023-10-25 21:39:35 -07:00
|
|
|
void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
|
|
|
|
{
|
|
|
|
struct iommufd_hwpt_nested *hwpt_nested =
|
|
|
|
container_of(obj, struct iommufd_hwpt_nested, common.obj);
|
|
|
|
|
2024-07-02 14:34:42 +08:00
|
|
|
__iommufd_hwpt_destroy(&hwpt_nested->common);
|
2024-11-05 12:04:23 -08:00
|
|
|
if (hwpt_nested->viommu)
|
|
|
|
refcount_dec(&hwpt_nested->viommu->obj.users);
|
|
|
|
else
|
|
|
|
refcount_dec(&hwpt_nested->parent->common.obj.users);
|
2023-10-25 21:39:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void iommufd_hwpt_nested_abort(struct iommufd_object *obj)
|
|
|
|
{
|
|
|
|
iommufd_hwpt_nested_destroy(obj);
|
|
|
|
}
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
static int
|
|
|
|
iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
|
2023-07-17 15:12:05 -03:00
|
|
|
{
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommu_domain *paging_domain = hwpt_paging->common.domain;
|
|
|
|
|
|
|
|
if (hwpt_paging->enforce_cache_coherency)
|
2023-07-17 15:12:05 -03:00
|
|
|
return 0;
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
if (paging_domain->ops->enforce_cache_coherency)
|
|
|
|
hwpt_paging->enforce_cache_coherency =
|
|
|
|
paging_domain->ops->enforce_cache_coherency(
|
|
|
|
paging_domain);
|
|
|
|
if (!hwpt_paging->enforce_cache_coherency)
|
2023-07-17 15:12:05 -03:00
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-29 16:29:35 -04:00
|
|
|
/**
|
2023-10-25 21:39:32 -07:00
|
|
|
* iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device
|
2022-11-29 16:29:35 -04:00
|
|
|
* @ictx: iommufd context
|
|
|
|
* @ioas: IOAS to associate the domain with
|
2023-03-01 15:30:23 -04:00
|
|
|
* @idev: Device to get an iommu_domain for
|
2025-03-21 10:19:25 -07:00
|
|
|
* @pasid: PASID to get an iommu_domain for
|
2023-09-28 00:15:25 -07:00
|
|
|
* @flags: Flags from userspace
|
2023-03-01 15:30:23 -04:00
|
|
|
* @immediate_attach: True if idev should be attached to the hwpt
|
2023-10-25 21:39:35 -07:00
|
|
|
* @user_data: The user provided driver specific data describing the domain to
|
|
|
|
* create
|
2022-11-29 16:29:35 -04:00
|
|
|
*
|
2023-03-01 15:30:23 -04:00
|
|
|
* Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
|
|
|
|
* will be linked to the given ioas and upon return the underlying iommu_domain
|
|
|
|
* is fully popoulated.
|
2023-07-17 15:12:06 -03:00
|
|
|
*
|
|
|
|
* The caller must hold the ioas->mutex until after
|
|
|
|
* iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
|
|
|
|
* the returned hwpt.
|
2022-11-29 16:29:35 -04:00
|
|
|
*/
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommufd_hwpt_paging *
|
|
|
|
iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
|
2025-03-21 10:19:25 -07:00
|
|
|
struct iommufd_device *idev, ioasid_t pasid,
|
|
|
|
u32 flags, bool immediate_attach,
|
2023-10-25 21:39:35 -07:00
|
|
|
const struct iommu_user_data *user_data)
|
2022-11-29 16:29:35 -04:00
|
|
|
{
|
2023-10-25 21:39:33 -07:00
|
|
|
const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
|
2024-11-04 09:40:37 +08:00
|
|
|
IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
|
2025-03-21 10:19:36 -07:00
|
|
|
IOMMU_HWPT_FAULT_ID_VALID |
|
|
|
|
IOMMU_HWPT_ALLOC_PASID;
|
2023-09-28 00:15:24 -07:00
|
|
|
const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommufd_hwpt_paging *hwpt_paging;
|
2022-11-29 16:29:35 -04:00
|
|
|
struct iommufd_hw_pagetable *hwpt;
|
|
|
|
int rc;
|
|
|
|
|
2023-03-01 15:30:23 -04:00
|
|
|
lockdep_assert_held(&ioas->mutex);
|
|
|
|
|
2024-11-14 15:55:31 -04:00
|
|
|
if ((flags || user_data) && !ops->domain_alloc_paging_flags)
|
2023-09-28 00:15:26 -07:00
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
2023-10-25 21:39:33 -07:00
|
|
|
if (flags & ~valid_flags)
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
2024-07-03 11:16:03 +01:00
|
|
|
if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
|
|
|
|
!device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
2025-02-26 02:40:12 -08:00
|
|
|
if ((flags & IOMMU_HWPT_FAULT_ID_VALID) &&
|
|
|
|
(flags & IOMMU_HWPT_ALLOC_NEST_PARENT))
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
2023-09-28 00:15:26 -07:00
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
hwpt_paging = __iommufd_object_alloc(
|
|
|
|
ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
|
|
|
|
if (IS_ERR(hwpt_paging))
|
|
|
|
return ERR_CAST(hwpt_paging);
|
|
|
|
hwpt = &hwpt_paging->common;
|
2025-03-21 10:19:32 -07:00
|
|
|
hwpt->pasid_compat = flags & IOMMU_HWPT_ALLOC_PASID;
|
2022-11-29 16:29:35 -04:00
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
|
2022-11-29 16:29:35 -04:00
|
|
|
/* Pairs with iommufd_hw_pagetable_destroy() */
|
|
|
|
refcount_inc(&ioas->obj.users);
|
2023-10-25 21:39:32 -07:00
|
|
|
hwpt_paging->ioas = ioas;
|
2023-10-25 21:39:35 -07:00
|
|
|
hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
|
2023-03-01 15:30:21 -04:00
|
|
|
|
2024-11-14 15:55:31 -04:00
|
|
|
if (ops->domain_alloc_paging_flags) {
|
2024-12-07 04:01:08 -08:00
|
|
|
hwpt->domain = ops->domain_alloc_paging_flags(idev->dev,
|
|
|
|
flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
|
2023-09-28 00:15:24 -07:00
|
|
|
if (IS_ERR(hwpt->domain)) {
|
|
|
|
rc = PTR_ERR(hwpt->domain);
|
|
|
|
hwpt->domain = NULL;
|
|
|
|
goto out_abort;
|
|
|
|
}
|
2023-11-21 18:03:59 +00:00
|
|
|
hwpt->domain->owner = ops;
|
2023-09-28 00:15:24 -07:00
|
|
|
} else {
|
2024-06-10 16:55:36 +08:00
|
|
|
hwpt->domain = iommu_paging_domain_alloc(idev->dev);
|
|
|
|
if (IS_ERR(hwpt->domain)) {
|
|
|
|
rc = PTR_ERR(hwpt->domain);
|
|
|
|
hwpt->domain = NULL;
|
2023-09-28 00:15:24 -07:00
|
|
|
goto out_abort;
|
|
|
|
}
|
2023-03-01 15:30:21 -04:00
|
|
|
}
|
2025-03-05 13:18:00 -08:00
|
|
|
hwpt->domain->iommufd_hwpt = hwpt;
|
iommu: Sort out domain user data
When DMA/MSI cookies were made first-class citizens back in commit
46983fcd67ac ("iommu: Pull IOVA cookie management into the core"), there
was no real need to further expose the two different cookie types.
However, now that IOMMUFD wants to add a third type of MSI-mapping
cookie, we do have a nicely compelling reason to properly dismabiguate
things at the domain level beyond just vaguely guessing from the domain
type.
Meanwhile, we also effectively have another "cookie" in the form of the
anonymous union for other user data, which isn't much better in terms of
being vague and unenforced. The fact is that all these cookie types are
mutually exclusive, in the sense that combining them makes zero sense
and/or would be catastrophic (iommu_set_fault_handler() on an SVA
domain, anyone?) - the only combination which *might* be reasonable is
perhaps a fault handler and an MSI cookie, but nobody's doing that at
the moment, so let's rule it out as well for the sake of being clear and
robust. To that end, we pull DMA and MSI cookies apart a little more,
mostly to clear up the ambiguity at domain teardown, then for clarity
(and to save a little space), move them into the union, whose ownership
we can then properly describe and enforce entirely unambiguously.
[nicolinc: rebase on latest tree; use prefix IOMMU_COOKIE_; merge unions
in iommu_domain; add IOMMU_COOKIE_IOMMUFD for iommufd_hwpt]
Link: https://patch.msgid.link/r/1ace9076c95204bbe193ee77499d395f15f44b23.1742871535.git.nicolinc@nvidia.com
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2025-03-24 21:05:15 -07:00
|
|
|
hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
|
2023-03-01 15:30:21 -04:00
|
|
|
|
2023-07-17 15:12:05 -03:00
|
|
|
/*
|
|
|
|
* Set the coherency mode before we do iopt_table_add_domain() as some
|
|
|
|
* iommus have a per-PTE bit that controls it and need to decide before
|
|
|
|
* doing any maps. It is an iommu driver bug to report
|
|
|
|
* IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
|
|
|
|
* a new domain.
|
2023-10-23 18:29:58 -07:00
|
|
|
*
|
|
|
|
* The cache coherency mode must be configured here and unchanged later.
|
|
|
|
* Note that a HWPT (non-CC) created for a device (non-CC) can be later
|
|
|
|
* reused by another device (either non-CC or CC). However, A HWPT (CC)
|
|
|
|
* created for a device (CC) cannot be reused by another device (non-CC)
|
|
|
|
* but only devices (CC). Instead user space in this case would need to
|
|
|
|
* allocate a separate HWPT (non-CC).
|
2023-07-17 15:12:05 -03:00
|
|
|
*/
|
|
|
|
if (idev->enforce_cache_coherency) {
|
2023-10-25 21:39:32 -07:00
|
|
|
rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging);
|
2023-07-17 15:12:05 -03:00
|
|
|
if (WARN_ON(rc))
|
|
|
|
goto out_abort;
|
|
|
|
}
|
|
|
|
|
2023-03-01 15:30:23 -04:00
|
|
|
/*
|
|
|
|
* immediate_attach exists only to accommodate iommu drivers that cannot
|
|
|
|
* directly allocate a domain. These drivers do not finish creating the
|
|
|
|
* domain until attach is completed. Thus we must have this call
|
|
|
|
* sequence. Once those drivers are fixed this should be removed.
|
|
|
|
*/
|
|
|
|
if (immediate_attach) {
|
2025-03-21 10:19:25 -07:00
|
|
|
rc = iommufd_hw_pagetable_attach(hwpt, idev, pasid);
|
2023-03-01 15:30:23 -04:00
|
|
|
if (rc)
|
2023-07-17 15:11:59 -03:00
|
|
|
goto out_abort;
|
2023-03-01 15:30:23 -04:00
|
|
|
}
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain);
|
2023-03-01 15:30:23 -04:00
|
|
|
if (rc)
|
|
|
|
goto out_detach;
|
2023-10-25 21:39:32 -07:00
|
|
|
list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list);
|
|
|
|
return hwpt_paging;
|
2022-11-29 16:29:35 -04:00
|
|
|
|
2023-03-01 15:30:23 -04:00
|
|
|
out_detach:
|
|
|
|
if (immediate_attach)
|
2025-03-21 10:19:25 -07:00
|
|
|
iommufd_hw_pagetable_detach(idev, pasid);
|
2022-11-29 16:29:35 -04:00
|
|
|
out_abort:
|
2023-03-01 15:30:21 -04:00
|
|
|
iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
|
2022-11-29 16:29:35 -04:00
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
2023-07-17 15:12:13 -03:00
|
|
|
|
2023-10-25 21:39:35 -07:00
|
|
|
/**
|
|
|
|
* iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device
|
|
|
|
* @ictx: iommufd context
|
|
|
|
* @parent: Parent PAGING-type hwpt to associate the domain with
|
|
|
|
* @idev: Device to get an iommu_domain for
|
|
|
|
* @flags: Flags from userspace
|
|
|
|
* @user_data: user_data pointer. Must be valid
|
|
|
|
*
|
|
|
|
* Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as
|
|
|
|
* a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of
|
|
|
|
* being a parent.
|
|
|
|
*/
|
|
|
|
static struct iommufd_hwpt_nested *
|
|
|
|
iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
|
|
|
|
struct iommufd_hwpt_paging *parent,
|
|
|
|
struct iommufd_device *idev, u32 flags,
|
|
|
|
const struct iommu_user_data *user_data)
|
|
|
|
{
|
|
|
|
const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
|
|
|
|
struct iommufd_hwpt_nested *hwpt_nested;
|
|
|
|
struct iommufd_hw_pagetable *hwpt;
|
|
|
|
int rc;
|
|
|
|
|
2025-03-21 10:19:36 -07:00
|
|
|
if ((flags & ~(IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID)) ||
|
2024-11-14 15:55:30 -04:00
|
|
|
!user_data->len || !ops->domain_alloc_nested)
|
2023-10-25 21:39:35 -07:00
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
2024-08-29 10:19:59 -03:00
|
|
|
if (parent->auto_domain || !parent->nest_parent ||
|
|
|
|
parent->common.domain->owner != ops)
|
2023-10-25 21:39:35 -07:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
hwpt_nested = __iommufd_object_alloc(
|
|
|
|
ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
|
|
|
|
if (IS_ERR(hwpt_nested))
|
|
|
|
return ERR_CAST(hwpt_nested);
|
|
|
|
hwpt = &hwpt_nested->common;
|
2025-03-21 10:19:32 -07:00
|
|
|
hwpt->pasid_compat = flags & IOMMU_HWPT_ALLOC_PASID;
|
2023-10-25 21:39:35 -07:00
|
|
|
|
|
|
|
refcount_inc(&parent->common.obj.users);
|
|
|
|
hwpt_nested->parent = parent;
|
|
|
|
|
2024-11-14 15:55:30 -04:00
|
|
|
hwpt->domain = ops->domain_alloc_nested(
|
|
|
|
idev->dev, parent->common.domain,
|
|
|
|
flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
|
2023-10-25 21:39:35 -07:00
|
|
|
if (IS_ERR(hwpt->domain)) {
|
|
|
|
rc = PTR_ERR(hwpt->domain);
|
|
|
|
hwpt->domain = NULL;
|
|
|
|
goto out_abort;
|
|
|
|
}
|
2023-11-21 18:03:59 +00:00
|
|
|
hwpt->domain->owner = ops;
|
2025-03-05 13:18:00 -08:00
|
|
|
hwpt->domain->iommufd_hwpt = hwpt;
|
iommu: Sort out domain user data
When DMA/MSI cookies were made first-class citizens back in commit
46983fcd67ac ("iommu: Pull IOVA cookie management into the core"), there
was no real need to further expose the two different cookie types.
However, now that IOMMUFD wants to add a third type of MSI-mapping
cookie, we do have a nicely compelling reason to properly dismabiguate
things at the domain level beyond just vaguely guessing from the domain
type.
Meanwhile, we also effectively have another "cookie" in the form of the
anonymous union for other user data, which isn't much better in terms of
being vague and unenforced. The fact is that all these cookie types are
mutually exclusive, in the sense that combining them makes zero sense
and/or would be catastrophic (iommu_set_fault_handler() on an SVA
domain, anyone?) - the only combination which *might* be reasonable is
perhaps a fault handler and an MSI cookie, but nobody's doing that at
the moment, so let's rule it out as well for the sake of being clear and
robust. To that end, we pull DMA and MSI cookies apart a little more,
mostly to clear up the ambiguity at domain teardown, then for clarity
(and to save a little space), move them into the union, whose ownership
we can then properly describe and enforce entirely unambiguously.
[nicolinc: rebase on latest tree; use prefix IOMMU_COOKIE_; merge unions
in iommu_domain; add IOMMU_COOKIE_IOMMUFD for iommufd_hwpt]
Link: https://patch.msgid.link/r/1ace9076c95204bbe193ee77499d395f15f44b23.1742871535.git.nicolinc@nvidia.com
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2025-03-24 21:05:15 -07:00
|
|
|
hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
|
2023-10-25 21:39:35 -07:00
|
|
|
|
2024-11-05 12:05:12 -08:00
|
|
|
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
|
2025-06-13 23:35:17 -07:00
|
|
|
rc = -EOPNOTSUPP;
|
2023-10-25 21:39:35 -07:00
|
|
|
goto out_abort;
|
|
|
|
}
|
|
|
|
return hwpt_nested;
|
|
|
|
|
|
|
|
out_abort:
|
|
|
|
iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
2024-11-05 12:04:23 -08:00
|
|
|
/**
|
|
|
|
* iommufd_viommu_alloc_hwpt_nested() - Get a hwpt_nested for a vIOMMU
|
|
|
|
* @viommu: vIOMMU ojbect to associate the hwpt_nested/domain with
|
|
|
|
* @flags: Flags from userspace
|
|
|
|
* @user_data: user_data pointer. Must be valid
|
|
|
|
*
|
|
|
|
* Allocate a new IOMMU_DOMAIN_NESTED for a vIOMMU and return it as a NESTED
|
|
|
|
* hw_pagetable.
|
|
|
|
*/
|
|
|
|
static struct iommufd_hwpt_nested *
|
|
|
|
iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
|
|
|
|
const struct iommu_user_data *user_data)
|
|
|
|
{
|
|
|
|
struct iommufd_hwpt_nested *hwpt_nested;
|
|
|
|
struct iommufd_hw_pagetable *hwpt;
|
|
|
|
int rc;
|
|
|
|
|
2025-03-21 10:19:36 -07:00
|
|
|
if (flags & ~(IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID))
|
2024-12-07 04:01:08 -08:00
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
2024-11-05 12:04:23 -08:00
|
|
|
if (!user_data->len)
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
if (!viommu->ops || !viommu->ops->alloc_domain_nested)
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
|
|
|
|
hwpt_nested = __iommufd_object_alloc(
|
|
|
|
viommu->ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
|
|
|
|
if (IS_ERR(hwpt_nested))
|
|
|
|
return ERR_CAST(hwpt_nested);
|
|
|
|
hwpt = &hwpt_nested->common;
|
2025-03-21 10:19:32 -07:00
|
|
|
hwpt->pasid_compat = flags & IOMMU_HWPT_ALLOC_PASID;
|
2024-11-05 12:04:23 -08:00
|
|
|
|
|
|
|
hwpt_nested->viommu = viommu;
|
|
|
|
refcount_inc(&viommu->obj.users);
|
|
|
|
hwpt_nested->parent = viommu->hwpt;
|
|
|
|
|
2025-06-13 23:35:13 -07:00
|
|
|
hwpt->domain = viommu->ops->alloc_domain_nested(
|
|
|
|
viommu, flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
|
2024-11-05 12:04:23 -08:00
|
|
|
if (IS_ERR(hwpt->domain)) {
|
|
|
|
rc = PTR_ERR(hwpt->domain);
|
|
|
|
hwpt->domain = NULL;
|
|
|
|
goto out_abort;
|
|
|
|
}
|
2025-03-05 13:18:00 -08:00
|
|
|
hwpt->domain->iommufd_hwpt = hwpt;
|
2024-11-05 12:04:23 -08:00
|
|
|
hwpt->domain->owner = viommu->iommu_dev->ops;
|
iommu: Sort out domain user data
When DMA/MSI cookies were made first-class citizens back in commit
46983fcd67ac ("iommu: Pull IOVA cookie management into the core"), there
was no real need to further expose the two different cookie types.
However, now that IOMMUFD wants to add a third type of MSI-mapping
cookie, we do have a nicely compelling reason to properly dismabiguate
things at the domain level beyond just vaguely guessing from the domain
type.
Meanwhile, we also effectively have another "cookie" in the form of the
anonymous union for other user data, which isn't much better in terms of
being vague and unenforced. The fact is that all these cookie types are
mutually exclusive, in the sense that combining them makes zero sense
and/or would be catastrophic (iommu_set_fault_handler() on an SVA
domain, anyone?) - the only combination which *might* be reasonable is
perhaps a fault handler and an MSI cookie, but nobody's doing that at
the moment, so let's rule it out as well for the sake of being clear and
robust. To that end, we pull DMA and MSI cookies apart a little more,
mostly to clear up the ambiguity at domain teardown, then for clarity
(and to save a little space), move them into the union, whose ownership
we can then properly describe and enforce entirely unambiguously.
[nicolinc: rebase on latest tree; use prefix IOMMU_COOKIE_; merge unions
in iommu_domain; add IOMMU_COOKIE_IOMMUFD for iommufd_hwpt]
Link: https://patch.msgid.link/r/1ace9076c95204bbe193ee77499d395f15f44b23.1742871535.git.nicolinc@nvidia.com
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2025-03-24 21:05:15 -07:00
|
|
|
hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
|
2024-11-05 12:04:23 -08:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
|
2025-06-13 23:35:17 -07:00
|
|
|
rc = -EOPNOTSUPP;
|
2024-11-05 12:04:23 -08:00
|
|
|
goto out_abort;
|
|
|
|
}
|
|
|
|
return hwpt_nested;
|
|
|
|
|
|
|
|
out_abort:
|
|
|
|
iommufd_object_abort_and_destroy(viommu->ictx, &hwpt->obj);
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
}
|
|
|
|
|
2023-07-17 15:12:13 -03:00
|
|
|
int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
|
|
|
|
{
|
|
|
|
struct iommu_hwpt_alloc *cmd = ucmd->cmd;
|
2023-10-25 21:39:35 -07:00
|
|
|
const struct iommu_user_data user_data = {
|
|
|
|
.type = cmd->data_type,
|
|
|
|
.uptr = u64_to_user_ptr(cmd->data_uptr),
|
|
|
|
.len = cmd->data_len,
|
|
|
|
};
|
2023-07-17 15:12:13 -03:00
|
|
|
struct iommufd_hw_pagetable *hwpt;
|
2023-10-25 21:39:33 -07:00
|
|
|
struct iommufd_ioas *ioas = NULL;
|
|
|
|
struct iommufd_object *pt_obj;
|
2023-07-17 15:12:13 -03:00
|
|
|
struct iommufd_device *idev;
|
|
|
|
int rc;
|
|
|
|
|
2023-10-25 21:39:33 -07:00
|
|
|
if (cmd->__reserved)
|
2023-07-17 15:12:13 -03:00
|
|
|
return -EOPNOTSUPP;
|
2024-02-20 14:43:54 -04:00
|
|
|
if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
|
|
|
|
(cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
|
2023-10-25 21:39:35 -07:00
|
|
|
return -EINVAL;
|
2023-07-17 15:12:13 -03:00
|
|
|
|
|
|
|
idev = iommufd_get_device(ucmd, cmd->dev_id);
|
|
|
|
if (IS_ERR(idev))
|
|
|
|
return PTR_ERR(idev);
|
|
|
|
|
2023-10-25 21:39:33 -07:00
|
|
|
pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY);
|
|
|
|
if (IS_ERR(pt_obj)) {
|
|
|
|
rc = -EINVAL;
|
2023-07-17 15:12:13 -03:00
|
|
|
goto out_put_idev;
|
|
|
|
}
|
|
|
|
|
2023-10-25 21:39:33 -07:00
|
|
|
if (pt_obj->type == IOMMUFD_OBJ_IOAS) {
|
|
|
|
struct iommufd_hwpt_paging *hwpt_paging;
|
|
|
|
|
|
|
|
ioas = container_of(pt_obj, struct iommufd_ioas, obj);
|
|
|
|
mutex_lock(&ioas->mutex);
|
2023-10-25 21:39:35 -07:00
|
|
|
hwpt_paging = iommufd_hwpt_paging_alloc(
|
2025-03-21 10:19:25 -07:00
|
|
|
ucmd->ictx, ioas, idev, IOMMU_NO_PASID, cmd->flags,
|
|
|
|
false, user_data.len ? &user_data : NULL);
|
2023-10-25 21:39:33 -07:00
|
|
|
if (IS_ERR(hwpt_paging)) {
|
|
|
|
rc = PTR_ERR(hwpt_paging);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
hwpt = &hwpt_paging->common;
|
2023-10-25 21:39:35 -07:00
|
|
|
} else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) {
|
|
|
|
struct iommufd_hwpt_nested *hwpt_nested;
|
|
|
|
|
|
|
|
hwpt_nested = iommufd_hwpt_nested_alloc(
|
|
|
|
ucmd->ictx,
|
|
|
|
container_of(pt_obj, struct iommufd_hwpt_paging,
|
|
|
|
common.obj),
|
|
|
|
idev, cmd->flags, &user_data);
|
|
|
|
if (IS_ERR(hwpt_nested)) {
|
|
|
|
rc = PTR_ERR(hwpt_nested);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
hwpt = &hwpt_nested->common;
|
2024-11-05 12:04:23 -08:00
|
|
|
} else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) {
|
|
|
|
struct iommufd_hwpt_nested *hwpt_nested;
|
|
|
|
struct iommufd_viommu *viommu;
|
|
|
|
|
|
|
|
viommu = container_of(pt_obj, struct iommufd_viommu, obj);
|
|
|
|
if (viommu->iommu_dev != __iommu_get_iommu_dev(idev->dev)) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
hwpt_nested = iommufd_viommu_alloc_hwpt_nested(
|
|
|
|
viommu, cmd->flags, &user_data);
|
|
|
|
if (IS_ERR(hwpt_nested)) {
|
|
|
|
rc = PTR_ERR(hwpt_nested);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
hwpt = &hwpt_nested->common;
|
2023-10-25 21:39:33 -07:00
|
|
|
} else {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out_put_pt;
|
2023-07-17 15:12:13 -03:00
|
|
|
}
|
|
|
|
|
2024-07-02 14:34:42 +08:00
|
|
|
if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) {
|
|
|
|
struct iommufd_fault *fault;
|
|
|
|
|
|
|
|
fault = iommufd_get_fault(ucmd, cmd->fault_id);
|
|
|
|
if (IS_ERR(fault)) {
|
|
|
|
rc = PTR_ERR(fault);
|
|
|
|
goto out_hwpt;
|
|
|
|
}
|
|
|
|
hwpt->fault = fault;
|
|
|
|
hwpt->domain->iopf_handler = iommufd_fault_iopf_handler;
|
2025-03-11 12:44:21 -07:00
|
|
|
refcount_inc(&fault->common.obj.users);
|
|
|
|
iommufd_put_object(ucmd->ictx, &fault->common.obj);
|
2024-07-02 14:34:42 +08:00
|
|
|
}
|
|
|
|
|
2023-07-17 15:12:13 -03:00
|
|
|
cmd->out_hwpt_id = hwpt->obj.id;
|
|
|
|
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
|
|
|
|
if (rc)
|
|
|
|
goto out_hwpt;
|
|
|
|
iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
out_hwpt:
|
|
|
|
iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
|
|
|
|
out_unlock:
|
2023-10-25 21:39:33 -07:00
|
|
|
if (ioas)
|
|
|
|
mutex_unlock(&ioas->mutex);
|
|
|
|
out_put_pt:
|
2023-11-12 14:50:13 -04:00
|
|
|
iommufd_put_object(ucmd->ictx, pt_obj);
|
2023-07-17 15:12:13 -03:00
|
|
|
out_put_idev:
|
2023-11-12 14:50:13 -04:00
|
|
|
iommufd_put_object(ucmd->ictx, &idev->obj);
|
2023-07-17 15:12:13 -03:00
|
|
|
return rc;
|
|
|
|
}
|
2023-10-24 14:50:57 +01:00
|
|
|
|
|
|
|
int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
|
|
|
|
{
|
|
|
|
struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd;
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommufd_hwpt_paging *hwpt_paging;
|
2023-10-24 14:50:57 +01:00
|
|
|
struct iommufd_ioas *ioas;
|
|
|
|
int rc = -EOPNOTSUPP;
|
|
|
|
bool enable;
|
|
|
|
|
|
|
|
if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE)
|
|
|
|
return rc;
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
|
|
|
|
if (IS_ERR(hwpt_paging))
|
|
|
|
return PTR_ERR(hwpt_paging);
|
2023-10-24 14:50:57 +01:00
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
ioas = hwpt_paging->ioas;
|
2023-10-24 14:50:57 +01:00
|
|
|
enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE;
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
|
|
|
|
enable);
|
2023-10-24 14:50:57 +01:00
|
|
|
|
2023-11-12 14:50:13 -04:00
|
|
|
iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
|
2023-10-24 14:50:57 +01:00
|
|
|
return rc;
|
|
|
|
}
|
2023-10-24 14:50:58 +01:00
|
|
|
|
|
|
|
int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
|
|
|
|
{
|
|
|
|
struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd;
|
2023-10-25 21:39:32 -07:00
|
|
|
struct iommufd_hwpt_paging *hwpt_paging;
|
2023-10-24 14:50:58 +01:00
|
|
|
struct iommufd_ioas *ioas;
|
|
|
|
int rc = -EOPNOTSUPP;
|
|
|
|
|
2023-10-24 14:51:00 +01:00
|
|
|
if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) ||
|
|
|
|
cmd->__reserved)
|
2023-10-24 14:50:58 +01:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
|
|
|
|
if (IS_ERR(hwpt_paging))
|
|
|
|
return PTR_ERR(hwpt_paging);
|
2023-10-24 14:50:58 +01:00
|
|
|
|
2023-10-25 21:39:32 -07:00
|
|
|
ioas = hwpt_paging->ioas;
|
|
|
|
rc = iopt_read_and_clear_dirty_data(
|
|
|
|
&ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
|
2023-10-24 14:50:58 +01:00
|
|
|
|
2023-11-12 14:50:13 -04:00
|
|
|
iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
|
2023-10-24 14:50:58 +01:00
|
|
|
return rc;
|
|
|
|
}
|
2024-01-10 20:10:09 -08:00
|
|
|
|
|
|
|
int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
|
|
|
|
{
|
|
|
|
struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
|
|
|
|
struct iommu_user_data_array data_array = {
|
|
|
|
.type = cmd->data_type,
|
|
|
|
.uptr = u64_to_user_ptr(cmd->data_uptr),
|
|
|
|
.entry_len = cmd->entry_len,
|
|
|
|
.entry_num = cmd->entry_num,
|
|
|
|
};
|
2024-11-05 12:05:12 -08:00
|
|
|
struct iommufd_object *pt_obj;
|
2024-01-10 20:10:09 -08:00
|
|
|
u32 done_num = 0;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (cmd->__reserved) {
|
|
|
|
rc = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2024-11-05 12:05:12 -08:00
|
|
|
pt_obj = iommufd_get_object(ucmd->ictx, cmd->hwpt_id, IOMMUFD_OBJ_ANY);
|
|
|
|
if (IS_ERR(pt_obj)) {
|
|
|
|
rc = PTR_ERR(pt_obj);
|
2024-01-10 20:10:09 -08:00
|
|
|
goto out;
|
|
|
|
}
|
2024-11-05 12:05:12 -08:00
|
|
|
if (pt_obj->type == IOMMUFD_OBJ_HWPT_NESTED) {
|
|
|
|
struct iommufd_hw_pagetable *hwpt =
|
|
|
|
container_of(pt_obj, struct iommufd_hw_pagetable, obj);
|
|
|
|
|
|
|
|
if (!hwpt->domain->ops ||
|
|
|
|
!hwpt->domain->ops->cache_invalidate_user) {
|
|
|
|
rc = -EOPNOTSUPP;
|
|
|
|
goto out_put_pt;
|
|
|
|
}
|
|
|
|
rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain,
|
|
|
|
&data_array);
|
|
|
|
} else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) {
|
|
|
|
struct iommufd_viommu *viommu =
|
|
|
|
container_of(pt_obj, struct iommufd_viommu, obj);
|
|
|
|
|
|
|
|
if (!viommu->ops || !viommu->ops->cache_invalidate) {
|
|
|
|
rc = -EOPNOTSUPP;
|
|
|
|
goto out_put_pt;
|
|
|
|
}
|
|
|
|
rc = viommu->ops->cache_invalidate(viommu, &data_array);
|
|
|
|
} else {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out_put_pt;
|
|
|
|
}
|
2024-01-10 20:10:09 -08:00
|
|
|
|
|
|
|
done_num = data_array.entry_num;
|
|
|
|
|
2024-11-05 12:05:12 -08:00
|
|
|
out_put_pt:
|
|
|
|
iommufd_put_object(ucmd->ictx, pt_obj);
|
2024-01-10 20:10:09 -08:00
|
|
|
out:
|
|
|
|
cmd->entry_num = done_num;
|
|
|
|
if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
|
|
|
|
return -EFAULT;
|
|
|
|
return rc;
|
|
|
|
}
|