vfio/pci: Do vf_token checks for VFIO_DEVICE_BIND_IOMMUFD

This was missed during the initial implementation. The VFIO PCI encodes
the vf_token inside the device name when opening the device from the group
FD, something like:

  "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"

This is used to control access to a VF unless there is co-ordination with
the owner of the PF.

Since we no longer have a device name in the cdev path, pass the token
directly through VFIO_DEVICE_BIND_IOMMUFD using an optional field
indicated by VFIO_DEVICE_BIND_FLAG_TOKEN.

Fixes: 5fcc26969a ("vfio: Add VFIO_DEVICE_BIND_IOMMUFD")
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Reviewed-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/0-v3-bdd8716e85fe+3978a-vfio_token_jgg@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Jason Gunthorpe 2025-07-14 13:08:25 -03:00 committed by Alex Williamson
parent b306019848
commit 86624ba3b5
12 changed files with 76 additions and 12 deletions

View file

@ -60,22 +60,50 @@ static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
spin_unlock(&df->kvm_ref_lock);
}
static int vfio_df_check_token(struct vfio_device *device,
const struct vfio_device_bind_iommufd *bind)
{
uuid_t uuid;
if (!device->ops->match_token_uuid) {
if (bind->flags & VFIO_DEVICE_BIND_FLAG_TOKEN)
return -EINVAL;
return 0;
}
if (!(bind->flags & VFIO_DEVICE_BIND_FLAG_TOKEN))
return device->ops->match_token_uuid(device, NULL);
if (copy_from_user(&uuid, u64_to_user_ptr(bind->token_uuid_ptr),
sizeof(uuid)))
return -EFAULT;
return device->ops->match_token_uuid(device, &uuid);
}
long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
struct vfio_device_bind_iommufd __user *arg)
{
const u32 VALID_FLAGS = VFIO_DEVICE_BIND_FLAG_TOKEN;
struct vfio_device *device = df->device;
struct vfio_device_bind_iommufd bind;
unsigned long minsz;
u32 user_size;
int ret;
static_assert(__same_type(arg->out_devid, df->devid));
minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
if (copy_from_user(&bind, arg, minsz))
return -EFAULT;
ret = get_user(user_size, &arg->argsz);
if (ret)
return ret;
if (user_size < minsz)
return -EINVAL;
ret = copy_struct_from_user(&bind, minsz, arg, user_size);
if (ret)
return ret;
if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
if (bind.iommufd < 0 || bind.flags & ~VALID_FLAGS)
return -EINVAL;
/* BIND_IOMMUFD only allowed for cdev fds */
@ -93,6 +121,10 @@ long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
goto out_unlock;
}
ret = vfio_df_check_token(device, &bind);
if (ret)
goto out_unlock;
df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
if (IS_ERR(df->iommufd)) {
ret = PTR_ERR(df->iommufd);

View file

@ -1583,6 +1583,7 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,

View file

@ -1372,6 +1372,7 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,

View file

@ -696,6 +696,7 @@ static const struct vfio_device_ops nvgrace_gpu_pci_ops = {
.mmap = nvgrace_gpu_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
@ -715,6 +716,7 @@ static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,

View file

@ -201,6 +201,7 @@ static const struct vfio_device_ops pds_vfio_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,

View file

@ -614,6 +614,7 @@ static const struct vfio_device_ops qat_vf_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,

View file

@ -138,6 +138,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,

View file

@ -1821,9 +1821,13 @@ void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
}
EXPORT_SYMBOL_GPL(vfio_pci_core_request);
static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
bool vf_token, uuid_t *uuid)
int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
const uuid_t *uuid)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
/*
* There's always some degree of trust or collaboration between SR-IOV
* PF and VFs, even if just that the PF hosts the SR-IOV capability and
@ -1854,7 +1858,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
bool match;
if (!pf_vdev) {
if (!vf_token)
if (!uuid)
return 0; /* PF is not vfio-pci, no VF token */
pci_info_ratelimited(vdev->pdev,
@ -1862,7 +1866,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
return -EINVAL;
}
if (!vf_token) {
if (!uuid) {
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
return -EACCES;
@ -1880,7 +1884,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
} else if (vdev->vf_token) {
mutex_lock(&vdev->vf_token->lock);
if (vdev->vf_token->users) {
if (!vf_token) {
if (!uuid) {
mutex_unlock(&vdev->vf_token->lock);
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
@ -1893,12 +1897,12 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
"Incorrect VF token provided for device\n");
return -EACCES;
}
} else if (vf_token) {
} else if (uuid) {
uuid_copy(&vdev->vf_token->uuid, uuid);
}
mutex_unlock(&vdev->vf_token->lock);
} else if (vf_token) {
} else if (uuid) {
pci_info_ratelimited(vdev->pdev,
"VF token incorrectly provided, not a PF or VF\n");
return -EINVAL;
@ -1906,6 +1910,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
return 0;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_match_token_uuid);
#define VF_TOKEN_ARG "vf_token="
@ -1952,7 +1957,8 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
}
}
ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
ret = core_vdev->ops->match_token_uuid(core_vdev,
vf_token ? &uuid : NULL);
if (ret)
return ret;

View file

@ -94,6 +94,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_lm_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
@ -114,6 +115,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_tran_lm_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
@ -134,6 +136,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
.match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,

View file

@ -105,6 +105,9 @@ struct vfio_device {
* @match: Optional device name match callback (return: 0 for no-match, >0 for
* match, -errno for abort (ex. match with insufficient or incorrect
* additional args)
* @match_token_uuid: Optional device token match/validation. Return 0
* if the uuid is valid for the device, -errno otherwise. uuid is NULL
* if none was provided.
* @dma_unmap: Called when userspace unmaps IOVA from the container
* this device is attached to.
* @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
@ -132,6 +135,7 @@ struct vfio_device_ops {
int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
void (*request)(struct vfio_device *vdev, unsigned int count);
int (*match)(struct vfio_device *vdev, char *buf);
int (*match_token_uuid)(struct vfio_device *vdev, const uuid_t *uuid);
void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
int (*device_feature)(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz);

View file

@ -122,6 +122,8 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
const uuid_t *uuid);
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);

View file

@ -905,10 +905,12 @@ struct vfio_device_feature {
* VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
* struct vfio_device_bind_iommufd)
* @argsz: User filled size of this data.
* @flags: Must be 0.
* @flags: Must be 0 or a bit flags of VFIO_DEVICE_BIND_*
* @iommufd: iommufd to bind.
* @out_devid: The device id generated by this bind. devid is a handle for
* this device/iommufd bond and can be used in IOMMUFD commands.
* @token_uuid_ptr: Valid if VFIO_DEVICE_BIND_FLAG_TOKEN. Points to a 16 byte
* UUID in the same format as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN.
*
* Bind a vfio_device to the specified iommufd.
*
@ -917,13 +919,21 @@ struct vfio_device_feature {
*
* Unbind is automatically conducted when device fd is closed.
*
* A token is sometimes required to open the device, unless this is known to be
* needed VFIO_DEVICE_BIND_FLAG_TOKEN should not be set and token_uuid_ptr is
* ignored. The only case today is a PF/VF relationship where the VF bind must
* be provided the same token as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN provided to
* the PF.
*
* Return: 0 on success, -errno on failure.
*/
struct vfio_device_bind_iommufd {
__u32 argsz;
__u32 flags;
#define VFIO_DEVICE_BIND_FLAG_TOKEN (1 << 0)
__s32 iommufd;
__u32 out_devid;
__aligned_u64 token_uuid_ptr;
};
#define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18)