From 8bd2fa086a04886798b505f28db4002525895203 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 8 Aug 2024 10:51:41 +0300 Subject: [PATCH 01/17] virtio: break and reset virtio devices on device_shutdown() Hongyu reported a hang on kexec in a VM. QEMU reported invalid memory accesses during the hang. Invalid read at addr 0x102877002, size 2, region '(null)', reason: rejected Invalid write at addr 0x102877A44, size 2, region '(null)', reason: rejected ... It was traced down to virtio-console. Kexec works fine if virtio-console is not in use. The issue is that virtio-console continues to write to the MMIO even after underlying virtio-pci device is reset. Additionally, Eric noticed that IOMMUs are reset before devices, if devices are not reset on shutdown they continue to poke at guest memory and get errors from the IOMMU. Some devices get wedged then. The problem can be solved by breaking all virtio devices on virtio bus shutdown, then resetting them. Reported-by: Eric Auger Reported-by: Hongyu Ning Message-ID: Tested-by: Eric Auger Acked-by: Jason Wang Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index ba37665188b5..150753c3b578 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -395,6 +395,34 @@ static const struct cpumask *virtio_irq_get_affinity(struct device *_d, return dev->config->get_vq_affinity(dev, irq_vec); } +static void virtio_dev_shutdown(struct device *_d) +{ + struct virtio_device *dev = dev_to_virtio(_d); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + + /* + * Stop accesses to or from the device. + * We only need to do it if there's a driver - no accesses otherwise. + */ + if (!drv) + return; + + /* + * Some devices get wedged if you kick them after they are + * reset. Mark all vqs as broken to make sure we don't. + */ + virtio_break_device(dev); + /* + * Guarantee that any callback will see vq->broken as true. + */ + virtio_synchronize_cbs(dev); + /* + * As IOMMUs are reset on shutdown, this will block device access to memory. + * Some devices get wedged if this happens, so reset to make sure it does not. + */ + dev->config->reset(dev); +} + static const struct bus_type virtio_bus = { .name = "virtio", .match = virtio_dev_match, @@ -403,6 +431,7 @@ static const struct bus_type virtio_bus = { .probe = virtio_dev_probe, .remove = virtio_dev_remove, .irq_get_affinity = virtio_irq_get_affinity, + .shutdown = virtio_dev_shutdown, }; int __register_virtio_driver(struct virtio_driver *driver, struct module *owner) From ae376910f52b815003d433243bee93ca000537c0 Mon Sep 17 00:00:00 2001 From: Yufeng Wang Date: Mon, 13 Jan 2025 18:03:00 +0800 Subject: [PATCH 02/17] tools/virtio: Add DMA_MAPPING_ERROR and sg_dma_len api define for virtio test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit when we build tools/virtio, meet below error information. cc -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h -mfunction-return=thunk -fcf-protection=none -mindirect-branch-register -pthread -c -o virtio_ring.o ../../drivers/virtio/virtio_ring.c ../../drivers/virtio/virtio_ring.c: in function 'vring_need_unmap_buffer': ../../drivers/virtio/virtio_ring.c:294:54: error:'DMA_MAPPING_ERROR'Undeclared (first use within this function) 294 | return vring->use_dma_api && (extra->addr != DMA_MAPPING_ERROR); | ^~~~~~~~~~~~~~~~~ ../../drivers/virtio/virtio_ring.c:294:54: Note: Each undeclared identifier is only reported once within the function it appears in ../../drivers/virtio/virtio_ring.c: in function 'vring_map_one_sg': ../../drivers/virtio/virtio_ring.c:369:24: error:Implicit declaration function'sg_dma_len' [-Wimplicit-function-declaration] 369 | *len = sg_dma_len(sg); | ^~~~~~~~~~ ../../drivers/virtio/virtio_ring.c: in function'virtqueue_add_desc_split': ../../drivers/virtio/virtio_ring.c:518:37: error:'DMA_MAPPING_ERROR'Undeclared (first use within this function) 518 | extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr; | ^~~~~~~~~~~~~~~~~ ../../drivers/virtio/virtio_ring.c: in function'virtqueue_add_indirect_packed': ../../drivers/virtio/virtio_ring.c:1370:61: error: 'DMA_MAPPING_ERROR'Undeclared (first use within this function) 1370 | extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr; | ^~~~~~~~~~~~~~~~~ ../../drivers/virtio/virtio_ring.c: in function'virtqueue_add_packed': ../../drivers/virtio/virtio_ring.c:1535:41: error:'DMA_MAPPING_ERROR'Undeclared (first use within this function) 1535 | DMA_MAPPING_ERROR : addr; | ^~~~~~~~~~~~~~~~~ to fix, add DMA_MAPPING_ERROR define for virtio test. Fixes: c7e1b422afac ("virtio_ring: perform premapped operations based on per-buffer") Signed-off-by: Yufeng Wang Message-Id: <20250113100300.174382-1-wangyufeng@kylinos.cn> Signed-off-by: Michael S. Tsirkin --- tools/virtio/linux/dma-mapping.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h index 822ecaa8e4df..095958461788 100644 --- a/tools/virtio/linux/dma-mapping.h +++ b/tools/virtio/linux/dma-mapping.h @@ -31,6 +31,7 @@ enum dma_data_direction { #define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0) #define sg_dma_address(sg) (0) +#define sg_dma_len(sg) (0) #define dma_need_sync(v, a) (0) #define dma_unmap_single_attrs(d, a, s, r, t) do { \ (void)(d); (void)(a); (void)(s); (void)(r); (void)(t); \ @@ -43,4 +44,16 @@ enum dma_data_direction { } while (0) #define dma_max_mapping_size(...) SIZE_MAX +/* + * A dma_addr_t can hold any valid DMA or bus address for the platform. It can + * be given to a device to use as a DMA source or target. It is specific to a + * given device and there may be a translation between the CPU physical address + * space and the bus address space. + * + * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not + * be used directly in drivers, but checked for using dma_mapping_error() + * instead. + */ +#define DMA_MAPPING_ERROR (~(dma_addr_t)0) + #endif From 83dc0370f915b9ad996387c141399615627e32b6 Mon Sep 17 00:00:00 2001 From: Yufeng Wang Date: Tue, 14 Jan 2025 11:36:35 +0800 Subject: [PATCH 03/17] tools: virtio/linux/compiler.h: Add data_race() define. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Port over the definition of data_race() so we can build tools/virtio. cc -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h -mfunction-return=thunk -fcf-protection=none -mindirect-branch-register -pthread -c -o virtio_ring.o ../../drivers/virtio/virtio_ring.c ../../drivers/virtio/virtio_ring.c: in function'vring_interrupt': ../../drivers/virtio/virtio_ring.c:2711:17: error:Implicit declaration function'data_race' [-Wimplicit-function-declaration] 2711 | data_race(vq->event_triggered = true); | ^~~~~~~~~ Signed-off-by: Yufeng Wang Message-Id: <20250114033635.20623-1-wangyufeng@kylinos.cn> Signed-off-by: Michael S. Tsirkin --- tools/virtio/linux/compiler.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h index 1f3a15b954b9..204ef0e9f542 100644 --- a/tools/virtio/linux/compiler.h +++ b/tools/virtio/linux/compiler.h @@ -10,4 +10,29 @@ #define READ_ONCE(var) (*((volatile typeof(var) *)(&(var)))) #define __aligned(x) __attribute((__aligned__(x))) + +/** + * data_race - mark an expression as containing intentional data races + * + * This data_race() macro is useful for situations in which data races + * should be forgiven. One example is diagnostic code that accesses + * shared variables but is not a part of the core synchronization design. + * For example, if accesses to a given variable are protected by a lock, + * except for diagnostic code, then the accesses under the lock should + * be plain C-language accesses and those in the diagnostic code should + * use data_race(). This way, KCSAN will complain if buggy lockless + * accesses to that variable are introduced, even if the buggy accesses + * are protected by READ_ONCE() or WRITE_ONCE(). + * + * This macro *does not* affect normal code generation, but is a hint + * to tooling that data races here are to be ignored. If the access must + * be atomic *and* KCSAN should ignore the access, use both data_race() + * and READ_ONCE(), for example, data_race(READ_ONCE(x)). + */ +#define data_race(expr) \ +({ \ + __auto_type __v = (expr); \ + __v; \ +}) + #endif From ec05544c858fef45bc00738c2ced196ed91be611 Mon Sep 17 00:00:00 2001 From: Yufeng Wang Date: Tue, 14 Jan 2025 14:47:26 +0800 Subject: [PATCH 04/17] tools: virtio/linux/module.h add MODULE_DESCRIPTION() define. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit when we build tools/virtio, meet below error information. cc -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h -mfunction-return=thunk -fcf-protection=none -mindirect-branch-register -pthread -c -o virtio_ring.o ../../drivers/virtio/virtio_ring.c ../../drivers/virtio/virtio_ring.c:3276:20: error:expected declaration specifiers or ‘...’ before string constant 3276 | MODULE_DESCRIPTION("Virtio ring implementation"); | to fix, add MODULE_DESCRIPTION() define for virtio test. Fixes: ab0727f3ddb8 ("virtio: add missing MODULE_DESCRIPTION() macros") Signed-off-by: Yufeng Wang Message-Id: <20250114064726.33079-1-wangyufeng@kylinos.cn> Signed-off-by: Michael S. Tsirkin --- tools/virtio/linux/module.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/virtio/linux/module.h b/tools/virtio/linux/module.h index 9dfa96fea2b2..b91681fc1571 100644 --- a/tools/virtio/linux/module.h +++ b/tools/virtio/linux/module.h @@ -5,3 +5,10 @@ static __attribute__((unused)) const char *__MODULE_LICENSE_name = \ __MODULE_LICENSE_value +#ifndef MODULE_AUTHOR +#define MODULE_AUTHOR(x) +#endif + +#ifndef MODULE_DESCRIPTION +#define MODULE_DESCRIPTION(x) +#endif From 5dd639a1646ef5fe8f4bf270fad47c5c3755b9b6 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 29 Jan 2025 15:09:22 -0600 Subject: [PATCH 05/17] vhost-scsi: Fix handling of multiple calls to vhost_scsi_set_endpoint If vhost_scsi_set_endpoint is called multiple times without a vhost_scsi_clear_endpoint between them, we can hit multiple bugs found by Haoran Zhang: 1. Use-after-free when no tpgs are found: This fixes a use after free that occurs when vhost_scsi_set_endpoint is called more than once and calls after the first call do not find any tpgs to add to the vs_tpg. When vhost_scsi_set_endpoint first finds tpgs to add to the vs_tpg array match=true, so we will do: vhost_vq_set_backend(vq, vs_tpg); ... kfree(vs->vs_tpg); vs->vs_tpg = vs_tpg; If vhost_scsi_set_endpoint is called again and no tpgs are found match=false so we skip the vhost_vq_set_backend call leaving the pointer to the vs_tpg we then free via: kfree(vs->vs_tpg); vs->vs_tpg = vs_tpg; If a scsi request is then sent we do: vhost_scsi_handle_vq -> vhost_scsi_get_req -> vhost_vq_get_backend which sees the vs_tpg we just did a kfree on. 2. Tpg dir removal hang: This patch fixes an issue where we cannot remove a LIO/target layer tpg (and structs above it like the target) dir due to the refcount dropping to -1. The problem is that if vhost_scsi_set_endpoint detects a tpg is already in the vs->vs_tpg array or if the tpg has been removed so target_depend_item fails, the undepend goto handler will do target_undepend_item on all tpgs in the vs_tpg array dropping their refcount to 0. At this time vs_tpg contains both the tpgs we have added in the current vhost_scsi_set_endpoint call as well as tpgs we added in previous calls which are also in vs->vs_tpg. Later, when vhost_scsi_clear_endpoint runs it will do target_undepend_item on all the tpgs in the vs->vs_tpg which will drop their refcount to -1. Userspace will then not be able to remove the tpg and will hang when it tries to do rmdir on the tpg dir. 3. Tpg leak: This fixes a bug where we can leak tpgs and cause them to be un-removable because the target name is overwritten when vhost_scsi_set_endpoint is called multiple times but with different target names. The bug occurs if a user has called VHOST_SCSI_SET_ENDPOINT and setup a vhost-scsi device to target/tpg mapping, then calls VHOST_SCSI_SET_ENDPOINT again with a new target name that has tpgs we haven't seen before (target1 has tpg1 but target2 has tpg2). When this happens we don't teardown the old target tpg mapping and just overwrite the target name and the vs->vs_tpg array. Later when we do vhost_scsi_clear_endpoint, we are passed in either target1 or target2's name and we will only match that target's tpgs when we loop over the vs->vs_tpg. We will then return from the function without doing target_undepend_item on the tpgs. Because of all these bugs, it looks like being able to call vhost_scsi_set_endpoint multiple times was never supported. The major user, QEMU, already has checks to prevent this use case. So to fix the issues, this patch prevents vhost_scsi_set_endpoint from being called if it's already successfully added tpgs. To add, remove or change the tpg config or target name, you must do a vhost_scsi_clear_endpoint first. Fixes: 25b98b64e284 ("vhost scsi: alloc cmds per vq instead of session") Fixes: 4f7f46d32c98 ("tcm_vhost: Use vq->private_data to indicate if the endpoint is setup") Reported-by: Haoran Zhang Closes: https://lore.kernel.org/virtualization/e418a5ee-45ca-4d18-9b5d-6f8b6b1add8e@oracle.com/T/#me6c0041ce376677419b9b2563494172a01487ecb Signed-off-by: Mike Christie Reviewed-by: Stefan Hajnoczi Message-Id: <20250129210922.121533-1-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefano Garzarella --- drivers/vhost/scsi.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 718fa4e0b31e..7aeff435c1d8 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1699,14 +1699,19 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } } + if (vs->vs_tpg) { + pr_err("vhost-scsi endpoint already set for %s.\n", + vs->vs_vhost_wwpn); + ret = -EEXIST; + goto out; + } + len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; vs_tpg = kzalloc(len, GFP_KERNEL); if (!vs_tpg) { ret = -ENOMEM; goto out; } - if (vs->vs_tpg) - memcpy(vs_tpg, vs->vs_tpg, len); mutex_lock(&vhost_scsi_mutex); list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { @@ -1722,12 +1727,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, tv_tport = tpg->tport; if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { - if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { - mutex_unlock(&tpg->tv_tpg_mutex); - mutex_unlock(&vhost_scsi_mutex); - ret = -EEXIST; - goto undepend; - } /* * In order to ensure individual vhost-scsi configfs * groups cannot be removed while in use by vhost ioctl, @@ -1774,15 +1773,15 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } ret = 0; } else { - ret = -EEXIST; + ret = -ENODEV; + goto free_tpg; } /* - * Act as synchronize_rcu to make sure access to - * old vs->vs_tpg is finished. + * Act as synchronize_rcu to make sure requests after this point + * see a fully setup device. */ vhost_scsi_flush(vs); - kfree(vs->vs_tpg); vs->vs_tpg = vs_tpg; goto out; @@ -1802,6 +1801,7 @@ undepend: target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); } } +free_tpg: kfree(vs_tpg); out: mutex_unlock(&vs->dev.mutex); @@ -1904,6 +1904,7 @@ free_vs_tpg: vhost_scsi_flush(vs); kfree(vs->vs_tpg); vs->vs_tpg = NULL; + memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn)); WARN_ON(vs->vs_events_nr); mutex_unlock(&vs->dev.mutex); return 0; From 439252e167ac45a5d46f573aac1da7d8f3e051ad Mon Sep 17 00:00:00 2001 From: Konstantin Shkolnyy Date: Tue, 4 Feb 2025 11:31:27 -0600 Subject: [PATCH 06/17] vdpa/mlx5: Fix mlx5_vdpa_get_config() endianness on big-endian machines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mlx5_vdpa_dev_add() doesn’t initialize mvdev->actual_features. It’s initialized later by mlx5_vdpa_set_driver_features(). However, mlx5_vdpa_get_config() depends on the VIRTIO_F_VERSION_1 flag in actual_features, to return data with correct endianness. When it’s called before mlx5_vdpa_set_driver_features(), the data are incorrectly returned as big-endian on big-endian machines, while QEMU then interprets them as little-endian. The fix is to initialize this VIRTIO_F_VERSION_1 as early as possible, especially considering that mlx5_vdpa_dev_add() insists on this flag to always be set anyway. Signed-off-by: Konstantin Shkolnyy Message-Id: <20250204173127.166673-1-kshk@linux.ibm.com> Signed-off-by: Michael S. Tsirkin Reviewed-by: Dragos Tatulea Acked-by: Jason Wang --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 36099047560d..cccc49a08a1a 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -3884,6 +3884,9 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ndev->mvdev.max_vqs = max_vqs; mvdev = &ndev->mvdev; mvdev->mdev = mdev; + /* cpu_to_mlx5vdpa16() below depends on this flag */ + mvdev->actual_features = + (device_features & BIT_ULL(VIRTIO_F_VERSION_1)); ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); From a6097e0a54a5c24f8d577ffecbc35289ae281c2e Mon Sep 17 00:00:00 2001 From: Si-Wei Liu Date: Thu, 20 Feb 2025 21:37:33 +0200 Subject: [PATCH 07/17] vdpa/mlx5: Fix oversized null mkey longer than 32bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit create_user_mr() has correct code to count the number of null keys used to fill in a hole for the memory map. However, fill_indir() does not follow the same to cap the range up to the 1GB limit correspondingly. Fill in more null keys for the gaps in between, so that null keys are correctly populated. Fixes: 94abbccdf291 ("vdpa/mlx5: Add shared memory registration code") Cc: stable@vger.kernel.org Reported-by: Cong Meng Signed-off-by: Si-Wei Liu Signed-off-by: Dragos Tatulea Acked-by: Eugenio Pérez Message-Id: <20250220193732.521462-2-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vdpa/mlx5/core/mr.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 8455f08f5d40..61424342c096 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -190,9 +190,12 @@ again: klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start)); preve = dmr->end; } else { + u64 bcount = min_t(u64, dmr->start - preve, MAX_KLM_SIZE); + klm->key = cpu_to_be32(mvdev->res.null_mkey); - klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve)); - preve = dmr->start; + klm->bcount = cpu_to_be32(klm_bcount(bcount)); + preve += bcount; + goto again; } } From 3c7df2e27346eb40a0e86230db1ccab195c97cfe Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 16 Jan 2025 11:40:59 -0800 Subject: [PATCH 08/17] sound/virtio: Fix cancel_sync warnings on uninitialized work_structs Betty reported hitting the following warning: [ 8.709131][ T221] WARNING: CPU: 2 PID: 221 at kernel/workqueue.c:4182 ... [ 8.713282][ T221] Call trace: [ 8.713365][ T221] __flush_work+0x8d0/0x914 [ 8.713468][ T221] __cancel_work_sync+0xac/0xfc [ 8.713570][ T221] cancel_work_sync+0x24/0x34 [ 8.713667][ T221] virtsnd_remove+0xa8/0xf8 [virtio_snd ab15f34d0dd772f6d11327e08a81d46dc9c36276] [ 8.713868][ T221] virtsnd_probe+0x48c/0x664 [virtio_snd ab15f34d0dd772f6d11327e08a81d46dc9c36276] [ 8.714035][ T221] virtio_dev_probe+0x28c/0x390 [ 8.714139][ T221] really_probe+0x1bc/0x4c8 ... It seems we're hitting the error path in virtsnd_probe(), which triggers a virtsnd_remove() which iterates over the substreams calling cancel_work_sync() on the elapsed_period work_struct. Looking at the code, from earlier in: virtsnd_probe()->virtsnd_build_devs()->virtsnd_pcm_parse_cfg() We set snd->nsubstreams, allocate the snd->substreams, and if we then hit an error on the info allocation or something in virtsnd_ctl_query_info() fails, we will exit without having initialized the elapsed_period work_struct. When that error path unwinds we then call virtsnd_remove() which as long as the substreams array is allocated, will iterate through calling cancel_work_sync() on the uninitialized work struct hitting this warning. Takashi Iwai suggested this fix, which initializes the substreams structure right after allocation, so that if we hit the error paths we avoid trying to cleanup uninitialized data. Note: I have not yet managed to reproduce the issue myself, so this patch has had limited testing. Feedback or thoughts would be appreciated! Cc: Anton Yakovlev Cc: "Michael S. Tsirkin" Cc: Jaroslav Kysela Cc: Takashi Iwai Cc: virtualization@lists.linux.dev Cc: linux-sound@vger.kernel.org Cc: kernel-team@android.com Reported-by: Betty Zhou Suggested-by: Takashi Iwai Signed-off-by: John Stultz Message-Id: <20250116194114.3375616-1-jstultz@google.com> Signed-off-by: Michael S. Tsirkin --- sound/virtio/virtio_pcm.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/sound/virtio/virtio_pcm.c b/sound/virtio/virtio_pcm.c index 967e4c45be9b..2f7c5e709f07 100644 --- a/sound/virtio/virtio_pcm.c +++ b/sound/virtio/virtio_pcm.c @@ -339,6 +339,21 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd) if (!snd->substreams) return -ENOMEM; + /* + * Initialize critical substream fields early in case we hit an + * error path and end up trying to clean up uninitialized structures + * elsewhere. + */ + for (i = 0; i < snd->nsubstreams; ++i) { + struct virtio_pcm_substream *vss = &snd->substreams[i]; + + vss->snd = snd; + vss->sid = i; + INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed); + init_waitqueue_head(&vss->msg_empty); + spin_lock_init(&vss->lock); + } + info = kcalloc(snd->nsubstreams, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; @@ -352,12 +367,6 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd) struct virtio_pcm_substream *vss = &snd->substreams[i]; struct virtio_pcm *vpcm; - vss->snd = snd; - vss->sid = i; - INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed); - init_waitqueue_head(&vss->msg_empty); - spin_lock_init(&vss->lock); - rc = virtsnd_pcm_build_hw(vss, &info[i]); if (rc) goto on_exit; From fc80842a2799f83fd0fe73bafdaa8eaae5336ed0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Tue, 21 Jan 2025 11:33:46 +0100 Subject: [PATCH 09/17] vduse: add virtio_fs to allowed dev id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A VDUSE device that implements virtiofs device works fine just by adding the device id to the whitelist. Signed-off-by: Eugenio Pérez Message-Id: <20250121103346.1030165-1-eperezma@redhat.com> Signed-off-by: Michael S. Tsirkin Reviewed-by: Stefan Hajnoczi --- drivers/vdpa/vdpa_user/vduse_dev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 7ae99691efdf..6a9a37351310 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -144,6 +144,7 @@ static struct workqueue_struct *vduse_irq_bound_wq; static u32 allowed_device_id[] = { VIRTIO_ID_BLOCK, VIRTIO_ID_NET, + VIRTIO_ID_FS, }; static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa) From 4c1f3a7d74277903b290dd989cbd2ea8627fc925 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Dec 2024 13:15:08 -0600 Subject: [PATCH 10/17] vhost-scsi: Reduce mem use by moving upages to per queue Each worker thread can process 1 command at a time so there's no need to allocate a upages array per cmd. This patch moves it to per queue. Even a small device with 128 cmds and 1 queue this brings mem use for the array from 2 MB = 8 bytes per page pointer * 2048 pointers * 128 cmds to 16K = 8 bytes per pointer * 2048 * 1 queue Signed-off-by: Mike Christie Message-Id: <20241203191705.19431-2-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefan Hajnoczi --- drivers/vhost/scsi.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 7aeff435c1d8..c266171045c5 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -83,7 +83,6 @@ struct vhost_scsi_cmd { /* Pointer to the SGL formatted memory from virtio-scsi */ struct scatterlist *tvc_sgl; struct scatterlist *tvc_prot_sgl; - struct page **tvc_upages; /* Pointer to response header iovec */ struct iovec *tvc_resp_iov; /* Pointer to vhost_scsi for our device */ @@ -187,6 +186,7 @@ struct vhost_scsi_virtqueue { struct vhost_scsi_cmd *scsi_cmds; struct sbitmap scsi_tags; int max_cmds; + struct page **upages; struct vhost_work completion_work; struct llist_head completion_list; @@ -619,7 +619,6 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, struct vhost_scsi_nexus *tv_nexus; struct scatterlist *sg, *prot_sg; struct iovec *tvc_resp_iov; - struct page **pages; int tag; tv_nexus = tpg->tpg_nexus; @@ -637,12 +636,10 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, cmd = &svq->scsi_cmds[tag]; sg = cmd->tvc_sgl; prot_sg = cmd->tvc_prot_sgl; - pages = cmd->tvc_upages; tvc_resp_iov = cmd->tvc_resp_iov; memset(cmd, 0, sizeof(*cmd)); cmd->tvc_sgl = sg; cmd->tvc_prot_sgl = prot_sg; - cmd->tvc_upages = pages; cmd->tvc_se_cmd.map_tag = tag; cmd->tvc_tag = scsi_tag; cmd->tvc_lun = lun; @@ -669,7 +666,9 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, struct scatterlist *sgl, bool is_prot) { - struct page **pages = cmd->tvc_upages; + struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq, + struct vhost_scsi_virtqueue, vq); + struct page **pages = svq->upages; struct scatterlist *sg = sgl; ssize_t bytes, mapped_bytes; size_t offset, mapped_offset; @@ -1598,11 +1597,11 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq) kfree(tv_cmd->tvc_sgl); kfree(tv_cmd->tvc_prot_sgl); - kfree(tv_cmd->tvc_upages); kfree(tv_cmd->tvc_resp_iov); } sbitmap_free(&svq->scsi_tags); + kfree(svq->upages); kfree(svq->scsi_cmds); svq->scsi_cmds = NULL; } @@ -1628,6 +1627,11 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) return -ENOMEM; } + svq->upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, sizeof(struct page *), + GFP_KERNEL); + if (!svq->upages) + goto out; + for (i = 0; i < max_cmds; i++) { tv_cmd = &svq->scsi_cmds[i]; @@ -1639,14 +1643,6 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) goto out; } - tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, - sizeof(struct page *), - GFP_KERNEL); - if (!tv_cmd->tvc_upages) { - pr_err("Unable to allocate tv_cmd->tvc_upages\n"); - goto out; - } - tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV, sizeof(struct iovec), GFP_KERNEL); From bf2d650391be508dd8b5e188b65ed32300cf3489 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Dec 2024 13:15:09 -0600 Subject: [PATCH 11/17] vhost-scsi: Allocate T10 PI structs only when enabled T10 PI is not a widely used feature. This has us only allocate the structs for it if the feature has been enabled. For a common small setup where you have 1 virtqueue and 128 commands per queue, this saves: 8MB = 32 bytes per sg * 2048 entries * 128 commands per vhost-scsi device. Signed-off-by: Mike Christie Message-Id: <20241203191705.19431-3-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefan Hajnoczi --- drivers/vhost/scsi.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c266171045c5..e361c43d0730 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1651,12 +1651,14 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) goto out; } - tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS, - sizeof(struct scatterlist), - GFP_KERNEL); - if (!tv_cmd->tvc_prot_sgl) { - pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); - goto out; + if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { + tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS, + sizeof(struct scatterlist), + GFP_KERNEL); + if (!tv_cmd->tvc_prot_sgl) { + pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); + goto out; + } } } return 0; From 3ca51662f8186b569b8fb282242c20ccbb3993c2 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Dec 2024 13:15:10 -0600 Subject: [PATCH 12/17] vhost-scsi: Add better resource allocation failure handling If we can't allocate mem to map in data for a request or can't find a tag for a command, we currently drop the command. This leads to the error handler running to clean it up. Instead of dropping the command this has us return an error telling the initiator that it queued more commands than we can handle. The initiator will then reduce how many commands it will send us and retry later. Signed-off-by: Mike Christie Message-Id: <20241203191705.19431-4-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefan Hajnoczi --- drivers/vhost/scsi.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index e361c43d0730..ed4fd45da287 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -629,7 +629,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, tag = sbitmap_get(&svq->scsi_tags); if (tag < 0) { - pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); + pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n"); return ERR_PTR(-ENOMEM); } @@ -928,6 +928,24 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd) target_submit(se_cmd); } +static void +vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq, + int head, unsigned int out, u8 status) +{ + struct virtio_scsi_cmd_resp __user *resp; + struct virtio_scsi_cmd_resp rsp; + int ret; + + memset(&rsp, 0, sizeof(rsp)); + rsp.status = status; + resp = vq->iov[out].iov_base; + ret = __copy_to_user(resp, &rsp, sizeof(rsp)); + if (!ret) + vhost_add_used_and_signal(&vs->dev, vq, head, 0); + else + pr_err("Faulted on virtio_scsi_cmd_resp\n"); +} + static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, struct vhost_virtqueue *vq, @@ -1215,8 +1233,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) exp_data_len + prot_bytes, data_direction); if (IS_ERR(cmd)) { - vq_err(vq, "vhost_scsi_get_cmd failed %ld\n", - PTR_ERR(cmd)); + ret = PTR_ERR(cmd); + vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret); goto err; } cmd->tvc_vhost = vs; @@ -1253,11 +1271,15 @@ err: * EINVAL: Invalid response buffer, drop the request * EIO: Respond with bad target * EAGAIN: Pending request + * ENOMEM: Could not allocate resources for request */ if (ret == -ENXIO) break; else if (ret == -EIO) vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); + else if (ret == -ENOMEM) + vhost_scsi_send_status(vs, vq, vc.head, vc.out, + SAM_STAT_TASK_SET_FULL); } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); out: mutex_unlock(&vq->mutex); From 891b99eab0f89dbe08d216f4ab71acbeaf7a3102 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Dec 2024 13:15:11 -0600 Subject: [PATCH 13/17] vhost-scsi: Return queue full for page alloc failures during copy This has us return queue full if we can't allocate a page during the copy operation so the initiator can retry. Signed-off-by: Mike Christie Message-Id: <20241203191705.19431-5-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefan Hajnoczi --- drivers/vhost/scsi.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index ed4fd45da287..c087fbc6f8d3 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -756,7 +756,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, size_t len = iov_iter_count(iter); unsigned int nbytes = 0; struct page *page; - int i; + int i, ret; if (cmd->tvc_data_direction == DMA_FROM_DEVICE) { cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter, @@ -769,6 +769,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, page = alloc_page(GFP_KERNEL); if (!page) { i--; + ret = -ENOMEM; goto err; } @@ -776,8 +777,10 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, sg_set_page(&sg[i], page, nbytes, 0); if (cmd->tvc_data_direction == DMA_TO_DEVICE && - copy_page_from_iter(page, 0, nbytes, iter) != nbytes) + copy_page_from_iter(page, 0, nbytes, iter) != nbytes) { + ret = -EFAULT; goto err; + } len -= nbytes; } @@ -792,7 +795,7 @@ err: for (; i >= 0; i--) __free_page(sg_page(&sg[i])); kfree(cmd->saved_iter_addr); - return -ENOMEM; + return ret; } static int @@ -1249,9 +1252,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) " %d\n", cmd, exp_data_len, prot_bytes, data_direction); if (data_direction != DMA_NONE) { - if (unlikely(vhost_scsi_mapal(cmd, prot_bytes, - &prot_iter, exp_data_len, - &data_iter))) { + ret = vhost_scsi_mapal(cmd, prot_bytes, &prot_iter, + exp_data_len, &data_iter); + if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); goto err; From bca939d5bcd00d6faea99c47eafd60bed573ef03 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Dec 2024 13:15:12 -0600 Subject: [PATCH 14/17] vhost-scsi: Dynamically allocate scatterlists We currently preallocate scatterlists which have 2048 entries for each command. For a small device with just 1 queue this results in: 8 MB = 32 bytes per sg * 2048 entries * 128 cmd When mq is turned on and we increase the virtqueue_size so we can handle commands from multiple queues in parallel, then this can sky rocket. This patch allows us to dynamically allocate the scatterlist like is done with drivers like NVMe and SCSI. For small IO (4-16K) IOPs testing, we didn't see any regressions, but for throughput testing we sometimes saw a 2-5% regression when the backend device was very fast (8 NVMe drives in a MD RAID0 config or a memory backed device). As a result this patch makes the dynamic allocation feature a modparam so userspace can decide how it wants to balance mem use and perf. Signed-off-by: Mike Christie Message-Id: <20241203191705.19431-6-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefan Hajnoczi --- drivers/vhost/Kconfig | 1 + drivers/vhost/scsi.c | 260 ++++++++++++++++++++++++++++-------------- 2 files changed, 173 insertions(+), 88 deletions(-) diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index b455d9ab6f3d..020d4fbb947c 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -47,6 +47,7 @@ config VHOST_SCSI tristate "VHOST_SCSI TCM fabric driver" depends on TARGET_CORE && EVENTFD select VHOST + select SG_POOL default n help Say M here to enable the vhost_scsi TCM fabric module diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c087fbc6f8d3..c7bb8485e447 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -46,6 +46,50 @@ #define VHOST_SCSI_PREALLOC_UPAGES 2048 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048 +static unsigned int vhost_scsi_inline_sg_cnt = VHOST_SCSI_PREALLOC_SGLS; + +#ifdef CONFIG_ARCH_NO_SG_CHAIN +static int vhost_scsi_set_inline_sg_cnt(const char *buf, + const struct kernel_param *kp) +{ + pr_err("Setting inline_sg_cnt is not supported.\n"); + return -EOPNOTSUPP; +} +#else +static int vhost_scsi_set_inline_sg_cnt(const char *buf, + const struct kernel_param *kp) +{ + unsigned int cnt; + int ret; + + ret = kstrtouint(buf, 10, &cnt); + if (ret) + return ret; + + if (ret > VHOST_SCSI_PREALLOC_SGLS) { + pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS); + return -EINVAL; + } + + vhost_scsi_inline_sg_cnt = cnt; + return 0; +} +#endif + +static int vhost_scsi_get_inline_sg_cnt(char *buf, + const struct kernel_param *kp) +{ + return sprintf(buf, "%u\n", vhost_scsi_inline_sg_cnt); +} + +static const struct kernel_param_ops vhost_scsi_inline_sg_cnt_op = { + .get = vhost_scsi_get_inline_sg_cnt, + .set = vhost_scsi_set_inline_sg_cnt, +}; + +module_param_cb(inline_sg_cnt, &vhost_scsi_inline_sg_cnt_op, NULL, 0644); +MODULE_PARM_DESC(inline_sg_cnt, "Set the number of scatterlist entries to pre-allocate. The default is 2048."); + /* Max number of requests before requeueing the job. * Using this limit prevents one virtqueue from starving others with * request. @@ -80,9 +124,10 @@ struct vhost_scsi_cmd { u32 copied_iov:1; const void *saved_iter_addr; struct iov_iter saved_iter; - /* Pointer to the SGL formatted memory from virtio-scsi */ - struct scatterlist *tvc_sgl; - struct scatterlist *tvc_prot_sgl; + struct scatterlist *sgl; + struct sg_table table; + struct scatterlist *prot_sgl; + struct sg_table prot_table; /* Pointer to response header iovec */ struct iovec *tvc_resp_iov; /* Pointer to vhost_scsi for our device */ @@ -206,6 +251,8 @@ struct vhost_scsi { bool vs_events_missed; /* any missed events, protected by vq->mutex */ int vs_events_nr; /* num of pending events, protected by vq->mutex */ + + unsigned int inline_sg_cnt; }; struct vhost_scsi_tmf { @@ -328,23 +375,35 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) { struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, struct vhost_scsi_cmd, tvc_se_cmd); + struct vhost_scsi *vs = tv_cmd->tvc_vhost; struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); struct vhost_scsi_inflight *inflight = tv_cmd->inflight; + struct scatterlist *sg; + struct page *page; int i; if (tv_cmd->tvc_sgl_count) { - for (i = 0; i < tv_cmd->tvc_sgl_count; i++) { + for_each_sgtable_sg(&tv_cmd->table, sg, i) { + page = sg_page(sg); + if (!page) + continue; + if (tv_cmd->copied_iov) - __free_page(sg_page(&tv_cmd->tvc_sgl[i])); + __free_page(page); else - put_page(sg_page(&tv_cmd->tvc_sgl[i])); + put_page(page); } kfree(tv_cmd->saved_iter_addr); + sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt); } if (tv_cmd->tvc_prot_sgl_count) { - for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++) - put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); + for_each_sgtable_sg(&tv_cmd->prot_table, sg, i) { + page = sg_page(sg); + if (page) + put_page(page); + } + sg_free_table_chained(&tv_cmd->prot_table, vs->inline_sg_cnt); } sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag); @@ -534,14 +593,17 @@ static void vhost_scsi_evt_work(struct vhost_work *work) static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd) { struct iov_iter *iter = &cmd->saved_iter; - struct scatterlist *sg = cmd->tvc_sgl; + struct scatterlist *sg; struct page *page; size_t len; int i; - for (i = 0; i < cmd->tvc_sgl_count; i++) { - page = sg_page(&sg[i]); - len = sg[i].length; + for_each_sgtable_sg(&cmd->table, sg, i) { + page = sg_page(sg); + if (!page) + continue; + + len = sg->length; if (copy_page_to_iter(page, 0, len, iter) != len) { pr_err("Could not copy data while handling misaligned cmd. Error %zu\n", @@ -617,7 +679,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, struct vhost_scsi_virtqueue, vq); struct vhost_scsi_cmd *cmd; struct vhost_scsi_nexus *tv_nexus; - struct scatterlist *sg, *prot_sg; + struct scatterlist *sgl, *prot_sgl; struct iovec *tvc_resp_iov; int tag; @@ -634,12 +696,12 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, } cmd = &svq->scsi_cmds[tag]; - sg = cmd->tvc_sgl; - prot_sg = cmd->tvc_prot_sgl; + sgl = cmd->sgl; + prot_sgl = cmd->prot_sgl; tvc_resp_iov = cmd->tvc_resp_iov; memset(cmd, 0, sizeof(*cmd)); - cmd->tvc_sgl = sg; - cmd->tvc_prot_sgl = prot_sg; + cmd->sgl = sgl; + cmd->prot_sgl = prot_sgl; cmd->tvc_se_cmd.map_tag = tag; cmd->tvc_tag = scsi_tag; cmd->tvc_lun = lun; @@ -655,6 +717,27 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, return cmd; } +static void vhost_scsi_revert_map_iov_to_sgl(struct iov_iter *iter, + struct scatterlist *curr, + struct scatterlist *end) +{ + size_t revert_bytes = 0; + struct page *page; + + while (curr != end) { + page = sg_page(curr); + + if (page) { + put_page(page); + revert_bytes += curr->length; + } + /* Clear so we can re-use it for the copy path */ + sg_set_page(curr, NULL, 0, 0); + curr = sg_next(curr); + } + iov_iter_revert(iter, revert_bytes); +} + /* * Map a user memory range into a scatterlist * @@ -663,16 +746,17 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, static int vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, - struct scatterlist *sgl, + struct sg_table *sg_table, + struct scatterlist **sgl, bool is_prot) { struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); struct page **pages = svq->upages; - struct scatterlist *sg = sgl; - ssize_t bytes, mapped_bytes; - size_t offset, mapped_offset; - unsigned int npages = 0; + struct scatterlist *sg = *sgl; + ssize_t bytes; + size_t offset; + unsigned int n, npages = 0; bytes = iov_iter_get_pages2(iter, pages, LONG_MAX, VHOST_SCSI_PREALLOC_UPAGES, &offset); @@ -680,11 +764,8 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, if (bytes <= 0) return bytes < 0 ? bytes : -EFAULT; - mapped_bytes = bytes; - mapped_offset = offset; - while (bytes) { - unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes); + n = min_t(unsigned int, PAGE_SIZE - offset, bytes); /* * The block layer requires bios/requests to be a multiple of * 512 bytes, but Windows can send us vecs that are misaligned. @@ -705,25 +786,24 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, goto revert_iter_get_pages; } - sg_set_page(sg++, pages[npages++], n, offset); + sg_set_page(sg, pages[npages++], n, offset); + sg = sg_next(sg); bytes -= n; offset = 0; } + *sgl = sg; return npages; revert_iter_get_pages: - iov_iter_revert(iter, mapped_bytes); + vhost_scsi_revert_map_iov_to_sgl(iter, *sgl, sg); - npages = 0; - while (mapped_bytes) { - unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset, - mapped_bytes); + iov_iter_revert(iter, bytes); + while (bytes) { + n = min_t(unsigned int, PAGE_SIZE, bytes); put_page(pages[npages++]); - - mapped_bytes -= n; - mapped_offset = 0; + bytes -= n; } return -EINVAL; @@ -751,10 +831,11 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) static int vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, - struct scatterlist *sg, int sg_count) + struct sg_table *sg_table, int sg_count) { size_t len = iov_iter_count(iter); unsigned int nbytes = 0; + struct scatterlist *sg; struct page *page; int i, ret; @@ -765,16 +846,15 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, return -ENOMEM; } - for (i = 0; i < sg_count; i++) { + for_each_sgtable_sg(sg_table, sg, i) { page = alloc_page(GFP_KERNEL); if (!page) { - i--; ret = -ENOMEM; goto err; } nbytes = min_t(unsigned int, PAGE_SIZE, len); - sg_set_page(&sg[i], page, nbytes, 0); + sg_set_page(sg, page, nbytes, 0); if (cmd->tvc_data_direction == DMA_TO_DEVICE && copy_page_from_iter(page, 0, nbytes, iter) != nbytes) { @@ -792,39 +872,29 @@ err: pr_err("Could not read %u bytes while handling misaligned cmd\n", nbytes); - for (; i >= 0; i--) - __free_page(sg_page(&sg[i])); + for_each_sgtable_sg(sg_table, sg, i) { + page = sg_page(sg); + if (page) + __free_page(page); + } kfree(cmd->saved_iter_addr); return ret; } static int vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, - struct scatterlist *sg, int sg_count, bool is_prot) + struct sg_table *sg_table, int sg_count, bool is_prot) { - struct scatterlist *p = sg; - size_t revert_bytes; + struct scatterlist *sg = sg_table->sgl; int ret; while (iov_iter_count(iter)) { - ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot); + ret = vhost_scsi_map_to_sgl(cmd, iter, sg_table, &sg, is_prot); if (ret < 0) { - revert_bytes = 0; - - while (p < sg) { - struct page *page = sg_page(p); - - if (page) { - put_page(page); - revert_bytes += p->length; - } - p++; - } - - iov_iter_revert(iter, revert_bytes); + vhost_scsi_revert_map_iov_to_sgl(iter, sg_table->sgl, + sg); return ret; } - sg += ret; } return 0; @@ -835,23 +905,29 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, size_t prot_bytes, struct iov_iter *prot_iter, size_t data_bytes, struct iov_iter *data_iter) { + struct vhost_scsi *vs = cmd->tvc_vhost; int sgl_count, ret; if (prot_bytes) { sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, VHOST_SCSI_PREALLOC_PROT_SGLS); - if (sgl_count < 0) - return sgl_count; + cmd->prot_table.sgl = cmd->prot_sgl; + ret = sg_alloc_table_chained(&cmd->prot_table, sgl_count, + cmd->prot_table.sgl, + vs->inline_sg_cnt); + if (ret) + return ret; - sg_init_table(cmd->tvc_prot_sgl, sgl_count); cmd->tvc_prot_sgl_count = sgl_count; pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, - cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); + cmd->prot_table.sgl, cmd->tvc_prot_sgl_count); ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter, - cmd->tvc_prot_sgl, + &cmd->prot_table, cmd->tvc_prot_sgl_count, true); if (ret < 0) { + sg_free_table_chained(&cmd->prot_table, + vs->inline_sg_cnt); cmd->tvc_prot_sgl_count = 0; return ret; } @@ -861,20 +937,23 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, if (sgl_count < 0) return sgl_count; - sg_init_table(cmd->tvc_sgl, sgl_count); + cmd->table.sgl = cmd->sgl; + ret = sg_alloc_table_chained(&cmd->table, sgl_count, cmd->table.sgl, + vs->inline_sg_cnt); + if (ret) + return ret; + cmd->tvc_sgl_count = sgl_count; pr_debug("%s data_sg %p data_sgl_count %u\n", __func__, - cmd->tvc_sgl, cmd->tvc_sgl_count); + cmd->table.sgl, cmd->tvc_sgl_count); - ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl, + ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, &cmd->table, cmd->tvc_sgl_count, false); - if (ret == -EINVAL) { - sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count); - ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl, + if (ret == -EINVAL) + ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table, cmd->tvc_sgl_count); - } - if (ret < 0) { + sg_free_table_chained(&cmd->table, vs->inline_sg_cnt); cmd->tvc_sgl_count = 0; return ret; } @@ -906,10 +985,10 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd) /* FIXME: BIDI operation */ if (cmd->tvc_sgl_count) { - sg_ptr = cmd->tvc_sgl; + sg_ptr = cmd->table.sgl; if (cmd->tvc_prot_sgl_count) - sg_prot_ptr = cmd->tvc_prot_sgl; + sg_prot_ptr = cmd->prot_table.sgl; else se_cmd->prot_pto = true; } else { @@ -1620,8 +1699,8 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq) for (i = 0; i < svq->max_cmds; i++) { tv_cmd = &svq->scsi_cmds[i]; - kfree(tv_cmd->tvc_sgl); - kfree(tv_cmd->tvc_prot_sgl); + kfree(tv_cmd->sgl); + kfree(tv_cmd->prot_sgl); kfree(tv_cmd->tvc_resp_iov); } @@ -1635,6 +1714,7 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) { struct vhost_scsi_virtqueue *svq = container_of(vq, struct vhost_scsi_virtqueue, vq); + struct vhost_scsi *vs = svq->vs; struct vhost_scsi_cmd *tv_cmd; unsigned int i; @@ -1660,12 +1740,14 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) for (i = 0; i < max_cmds; i++) { tv_cmd = &svq->scsi_cmds[i]; - tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS, - sizeof(struct scatterlist), - GFP_KERNEL); - if (!tv_cmd->tvc_sgl) { - pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); - goto out; + if (vs->inline_sg_cnt) { + tv_cmd->sgl = kcalloc(vs->inline_sg_cnt, + sizeof(struct scatterlist), + GFP_KERNEL); + if (!tv_cmd->sgl) { + pr_err("Unable to allocate tv_cmd->sgl\n"); + goto out; + } } tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV, @@ -1676,12 +1758,13 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) goto out; } - if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { - tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS, - sizeof(struct scatterlist), - GFP_KERNEL); - if (!tv_cmd->tvc_prot_sgl) { - pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); + if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) && + vs->inline_sg_cnt) { + tv_cmd->prot_sgl = kcalloc(vs->inline_sg_cnt, + sizeof(struct scatterlist), + GFP_KERNEL); + if (!tv_cmd->prot_sgl) { + pr_err("Unable to allocate tv_cmd->prot_sgl\n"); goto out; } } @@ -1972,6 +2055,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vs = kvzalloc(sizeof(*vs), GFP_KERNEL); if (!vs) goto err_vs; + vs->inline_sg_cnt = vhost_scsi_inline_sg_cnt; if (nvqs > VHOST_SCSI_MAX_IO_VQ) { pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs, From ddc5b5f68ec553a037a822f81150acbbdefa3ad1 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Dec 2024 13:15:13 -0600 Subject: [PATCH 15/17] vhost-scsi: Stop duplicating se_cmd fields When setting up the command we will initially set values like lun and data direction on the vhost scsi command. We then pass them to LIO which stores them again on the LIO se_cmd. The se_cmd is actually stored in the vhost scsi command so we are storing these values twice on the same struct. So this patch has stop duplicating the storing of SCSI values like lun, data dir, data len, cdb, etc on the vhost scsi command and just pass them to LIO which will store them on the se_cmd. Signed-off-by: Mike Christie Message-Id: <20241203191705.19431-7-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefan Hajnoczi --- drivers/vhost/scsi.c | 95 +++++++++++++++++--------------------------- 1 file changed, 36 insertions(+), 59 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c7bb8485e447..fbcdc9866e80 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -106,21 +106,11 @@ struct vhost_scsi_inflight { struct vhost_scsi_cmd { /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ int tvc_vq_desc; - /* virtio-scsi initiator task attribute */ - int tvc_task_attr; /* virtio-scsi response incoming iovecs */ int tvc_in_iovs; - /* virtio-scsi initiator data direction */ - enum dma_data_direction tvc_data_direction; - /* Expected data transfer length from virtio-scsi header */ - u32 tvc_exp_data_len; - /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ - u64 tvc_tag; /* The number of scatterlists associated with this cmd */ u32 tvc_sgl_count; u32 tvc_prot_sgl_count; - /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */ - u32 tvc_lun; u32 copied_iov:1; const void *saved_iter_addr; struct iov_iter saved_iter; @@ -130,16 +120,10 @@ struct vhost_scsi_cmd { struct sg_table prot_table; /* Pointer to response header iovec */ struct iovec *tvc_resp_iov; - /* Pointer to vhost_scsi for our device */ - struct vhost_scsi *tvc_vhost; /* Pointer to vhost_virtqueue for the cmd */ struct vhost_virtqueue *tvc_vq; - /* Pointer to vhost nexus memory */ - struct vhost_scsi_nexus *tvc_nexus; /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd tvc_se_cmd; - /* Copy of the incoming SCSI command descriptor block (CDB) */ - unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE]; /* Sense buffer that will be mapped into outgoing status */ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; /* Completed commands list, serviced from vhost worker thread */ @@ -375,9 +359,9 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) { struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, struct vhost_scsi_cmd, tvc_se_cmd); - struct vhost_scsi *vs = tv_cmd->tvc_vhost; struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); + struct vhost_scsi *vs = svq->vs; struct vhost_scsi_inflight *inflight = tv_cmd->inflight; struct scatterlist *sg; struct page *page; @@ -671,24 +655,15 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) } static struct vhost_scsi_cmd * -vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, - unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, - u32 exp_data_len, int data_direction) +vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag) { struct vhost_scsi_virtqueue *svq = container_of(vq, struct vhost_scsi_virtqueue, vq); struct vhost_scsi_cmd *cmd; - struct vhost_scsi_nexus *tv_nexus; struct scatterlist *sgl, *prot_sgl; struct iovec *tvc_resp_iov; int tag; - tv_nexus = tpg->tpg_nexus; - if (!tv_nexus) { - pr_err("Unable to locate active struct vhost_scsi_nexus\n"); - return ERR_PTR(-EIO); - } - tag = sbitmap_get(&svq->scsi_tags); if (tag < 0) { pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n"); @@ -703,17 +678,9 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, cmd->sgl = sgl; cmd->prot_sgl = prot_sgl; cmd->tvc_se_cmd.map_tag = tag; - cmd->tvc_tag = scsi_tag; - cmd->tvc_lun = lun; - cmd->tvc_task_attr = task_attr; - cmd->tvc_exp_data_len = exp_data_len; - cmd->tvc_data_direction = data_direction; - cmd->tvc_nexus = tv_nexus; cmd->inflight = vhost_scsi_get_inflight(vq); cmd->tvc_resp_iov = tvc_resp_iov; - memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE); - return cmd; } @@ -831,7 +798,8 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) static int vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, - struct sg_table *sg_table, int sg_count) + struct sg_table *sg_table, int sg_count, + int data_dir) { size_t len = iov_iter_count(iter); unsigned int nbytes = 0; @@ -839,7 +807,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, struct page *page; int i, ret; - if (cmd->tvc_data_direction == DMA_FROM_DEVICE) { + if (data_dir == DMA_FROM_DEVICE) { cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter, GFP_KERNEL); if (!cmd->saved_iter_addr) @@ -856,7 +824,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, nbytes = min_t(unsigned int, PAGE_SIZE, len); sg_set_page(sg, page, nbytes, 0); - if (cmd->tvc_data_direction == DMA_TO_DEVICE && + if (data_dir == DMA_TO_DEVICE && copy_page_from_iter(page, 0, nbytes, iter) != nbytes) { ret = -EFAULT; goto err; @@ -901,11 +869,10 @@ vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, } static int -vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, +vhost_scsi_mapal(struct vhost_scsi *vs, struct vhost_scsi_cmd *cmd, size_t prot_bytes, struct iov_iter *prot_iter, - size_t data_bytes, struct iov_iter *data_iter) + size_t data_bytes, struct iov_iter *data_iter, int data_dir) { - struct vhost_scsi *vs = cmd->tvc_vhost; int sgl_count, ret; if (prot_bytes) { @@ -951,7 +918,7 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, cmd->tvc_sgl_count, false); if (ret == -EINVAL) ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table, - cmd->tvc_sgl_count); + cmd->tvc_sgl_count, data_dir); if (ret < 0) { sg_free_table_chained(&cmd->table, vs->inline_sg_cnt); cmd->tvc_sgl_count = 0; @@ -977,10 +944,13 @@ static int vhost_scsi_to_tcm_attr(int attr) return TCM_SIMPLE_TAG; } -static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd) +static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus, + struct vhost_scsi_cmd *cmd, + unsigned char *cdb, u16 lun, + int task_attr, int data_dir, + u32 exp_data_len) { struct se_cmd *se_cmd = &cmd->tvc_se_cmd; - struct vhost_scsi_nexus *tv_nexus; struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; /* FIXME: BIDI operation */ @@ -994,15 +964,13 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd) } else { sg_ptr = NULL; } - tv_nexus = cmd->tvc_nexus; se_cmd->tag = 0; - target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0], - cmd->tvc_lun, cmd->tvc_exp_data_len, - vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), - cmd->tvc_data_direction, TARGET_SCF_ACK_KREF); + target_init_cmd(se_cmd, nexus->tvn_se_sess, &cmd->tvc_sense_buf[0], + lun, exp_data_len, vhost_scsi_to_tcm_attr(task_attr), + data_dir, TARGET_SCF_ACK_KREF); - if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr, + if (target_submit_prep(se_cmd, cdb, sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count, GFP_KERNEL)) return; @@ -1159,6 +1127,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) struct vhost_scsi_tpg **vs_tpg, *tpg; struct virtio_scsi_cmd_req v_req; struct virtio_scsi_cmd_req_pi v_req_pi; + struct vhost_scsi_nexus *nexus; struct vhost_scsi_ctx vc; struct vhost_scsi_cmd *cmd; struct iov_iter in_iter, prot_iter, data_iter; @@ -1168,7 +1137,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) u16 lun; u8 task_attr; bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); - void *cdb; + u8 *cdb; mutex_lock(&vq->mutex); /* @@ -1311,28 +1280,34 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); goto err; } - cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr, - exp_data_len + prot_bytes, - data_direction); + + nexus = tpg->tpg_nexus; + if (!nexus) { + vq_err(vq, "Unable to locate active struct vhost_scsi_nexus\n"); + ret = -EIO; + goto err; + } + + cmd = vhost_scsi_get_cmd(vq, tag); if (IS_ERR(cmd)) { ret = PTR_ERR(cmd); vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret); goto err; } - cmd->tvc_vhost = vs; cmd->tvc_vq = vq; for (i = 0; i < vc.in ; i++) cmd->tvc_resp_iov[i] = vq->iov[vc.out + i]; cmd->tvc_in_iovs = vc.in; pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", - cmd->tvc_cdb[0], cmd->tvc_lun); + cdb[0], lun); pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:" " %d\n", cmd, exp_data_len, prot_bytes, data_direction); if (data_direction != DMA_NONE) { - ret = vhost_scsi_mapal(cmd, prot_bytes, &prot_iter, - exp_data_len, &data_iter); + ret = vhost_scsi_mapal(vs, cmd, prot_bytes, &prot_iter, + exp_data_len, &data_iter, + data_direction); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); @@ -1345,7 +1320,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() */ cmd->tvc_vq_desc = vc.head; - vhost_scsi_target_queue_cmd(cmd); + vhost_scsi_target_queue_cmd(nexus, cmd, cdb, lun, task_attr, + data_direction, + exp_data_len + prot_bytes); ret = 0; err: /* From fd47976581333765964f4ce0ea01176fa1e646d5 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Dec 2024 13:15:14 -0600 Subject: [PATCH 16/17] vhost-scsi: Allocate iov_iter used for unaligned copies when needed It's extremely rare that we get unaligned requests that need to drop down to the data copy code path. However, the iov_iter is almost 5% of the mem used for the vhost_scsi_cmd. This patch has us allocate the iov_iter only when needed since it's not a perf path that uses the struct. This along with the patches that removed the duplicated fields on the vhost_scsd_cmd allow us to reduce mem use by 1 MB in mid size setups where we have 16 virtqueues and are doing 1024 cmds per queue. Signed-off-by: Mike Christie Message-Id: <20241203191705.19431-8-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefan Hajnoczi --- drivers/vhost/scsi.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index fbcdc9866e80..82389e2ac0c9 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -112,8 +112,8 @@ struct vhost_scsi_cmd { u32 tvc_sgl_count; u32 tvc_prot_sgl_count; u32 copied_iov:1; - const void *saved_iter_addr; - struct iov_iter saved_iter; + const void *read_iov; + struct iov_iter *read_iter; struct scatterlist *sgl; struct sg_table table; struct scatterlist *prot_sgl; @@ -378,7 +378,8 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) else put_page(page); } - kfree(tv_cmd->saved_iter_addr); + kfree(tv_cmd->read_iter); + kfree(tv_cmd->read_iov); sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt); } if (tv_cmd->tvc_prot_sgl_count) { @@ -576,7 +577,7 @@ static void vhost_scsi_evt_work(struct vhost_work *work) static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd) { - struct iov_iter *iter = &cmd->saved_iter; + struct iov_iter *iter = cmd->read_iter; struct scatterlist *sg; struct page *page; size_t len; @@ -624,7 +625,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) cmd, se_cmd->residual_count, se_cmd->scsi_status); memset(&v_rsp, 0, sizeof(v_rsp)); - if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) { + if (cmd->read_iter && vhost_scsi_copy_sgl_to_iov(cmd)) { v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET; } else { v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, @@ -808,10 +809,15 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, int i, ret; if (data_dir == DMA_FROM_DEVICE) { - cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter, - GFP_KERNEL); - if (!cmd->saved_iter_addr) + cmd->read_iter = kzalloc(sizeof(*cmd->read_iter), GFP_KERNEL); + if (!cmd->read_iter) return -ENOMEM; + + cmd->read_iov = dup_iter(cmd->read_iter, iter, GFP_KERNEL); + if (!cmd->read_iov) { + ret = -ENOMEM; + goto free_iter; + } } for_each_sgtable_sg(sg_table, sg, i) { @@ -845,7 +851,9 @@ err: if (page) __free_page(page); } - kfree(cmd->saved_iter_addr); + kfree(cmd->read_iov); +free_iter: + kfree(cmd->read_iter); return ret; } From 9d8960672d63db4b3b04542f5622748b345c637a Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Tue, 3 Dec 2024 13:15:15 -0600 Subject: [PATCH 17/17] vhost-scsi: Reduce response iov mem use We have to save N iov entries to copy the virtio_scsi_cmd_resp struct back to the guest's buffer. The difficulty is that we can't assume the virtio_scsi_cmd_resp will be in 1 iov because older virtio specs allowed you to break it up. The worst case is that the guest was doing something like breaking up the virtio_scsi_cmd_resp struct into 108 (the struct is 108 bytes) byte sized vecs like: iov[0].iov_base = ((unsigned char *)virtio_scsi_cmd_resp)[0] iov[0].iov_len = 1 iov[1].iov_base = ((unsigned char *)virtio_scsi_cmd_resp)[1] iov[1].iov_len = 1 .... iov[107].iov_base = ((unsigned char *)virtio_scsi_cmd_resp)[107] iov[1].iov_len = 1 Right now we allocate UIO_MAXIOV vecs which is 1024 and so for a small device with just 1 queue and 128 commands per queue, we are wasting 1.8 MB = (1024 current entries - 108) * 16 bytes per entry * 128 cmds The most common case is going to be where the initiator puts the entire virtio_scsi_cmd_resp in the first iov and does not split it. We've always done it this way for Linux and the windows driver looks like it's always done the same. It's highly unlikely anyone has ever split the response and if they did it might just be where they have the sense in a second iov but that doesn't seem likely as well. So to optimize for the common implementation, this has us only pre-allocate the single iovec. If we do hit the split up response case this has us allocate the needed iovec when needed. Signed-off-by: Mike Christie Message-Id: <20241203191705.19431-9-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin Acked-by: Stefan Hajnoczi --- drivers/vhost/scsi.c | 82 ++++++++++++++++++++++++++++++++------------ 1 file changed, 60 insertions(+), 22 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 82389e2ac0c9..f6f5a7ac7894 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -45,6 +45,11 @@ #define VHOST_SCSI_PREALLOC_SGLS 2048 #define VHOST_SCSI_PREALLOC_UPAGES 2048 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048 +/* + * For the legacy descriptor case we allocate an iov per byte in the + * virtio_scsi_cmd_resp struct. + */ +#define VHOST_SCSI_MAX_RESP_IOVS sizeof(struct virtio_scsi_cmd_resp) static unsigned int vhost_scsi_inline_sg_cnt = VHOST_SCSI_PREALLOC_SGLS; @@ -106,8 +111,6 @@ struct vhost_scsi_inflight { struct vhost_scsi_cmd { /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ int tvc_vq_desc; - /* virtio-scsi response incoming iovecs */ - int tvc_in_iovs; /* The number of scatterlists associated with this cmd */ u32 tvc_sgl_count; u32 tvc_prot_sgl_count; @@ -118,8 +121,12 @@ struct vhost_scsi_cmd { struct sg_table table; struct scatterlist *prot_sgl; struct sg_table prot_table; - /* Pointer to response header iovec */ - struct iovec *tvc_resp_iov; + /* Fast path response header iovec used when only one vec is needed */ + struct iovec tvc_resp_iov; + /* Number of iovs for response */ + unsigned int tvc_resp_iovs_cnt; + /* Pointer to response header iovecs if more than one is needed */ + struct iovec *tvc_resp_iovs; /* Pointer to vhost_virtqueue for the cmd */ struct vhost_virtqueue *tvc_vq; /* The TCM I/O descriptor that is accessed via container_of() */ @@ -391,6 +398,8 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) sg_free_table_chained(&tv_cmd->prot_table, vs->inline_sg_cnt); } + if (tv_cmd->tvc_resp_iovs != &tv_cmd->tvc_resp_iov) + kfree(tv_cmd->tvc_resp_iovs); sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag); vhost_scsi_put_inflight(inflight); } @@ -638,8 +647,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) se_cmd->scsi_sense_length); } - iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov, - cmd->tvc_in_iovs, sizeof(v_rsp)); + iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iovs, + cmd->tvc_resp_iovs_cnt, sizeof(v_rsp)); ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); if (likely(ret == sizeof(v_rsp))) { signal = true; @@ -662,7 +671,6 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag) struct vhost_scsi_virtqueue, vq); struct vhost_scsi_cmd *cmd; struct scatterlist *sgl, *prot_sgl; - struct iovec *tvc_resp_iov; int tag; tag = sbitmap_get(&svq->scsi_tags); @@ -674,13 +682,11 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag) cmd = &svq->scsi_cmds[tag]; sgl = cmd->sgl; prot_sgl = cmd->prot_sgl; - tvc_resp_iov = cmd->tvc_resp_iov; memset(cmd, 0, sizeof(*cmd)); cmd->sgl = sgl; cmd->prot_sgl = prot_sgl; cmd->tvc_se_cmd.map_tag = tag; cmd->inflight = vhost_scsi_get_inflight(vq); - cmd->tvc_resp_iov = tvc_resp_iov; return cmd; } @@ -1124,6 +1130,43 @@ out: return ret; } +static int +vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs, + unsigned int in_iovs_cnt) +{ + int i, cnt; + + if (!in_iovs_cnt) + return 0; + /* + * Initiator's normally just put the virtio_scsi_cmd_resp in the first + * iov, but just in case they wedged in some data with it we check for + * greater than or equal to the response struct. + */ + if (in_iovs[0].iov_len >= sizeof(struct virtio_scsi_cmd_resp)) { + cmd->tvc_resp_iovs = &cmd->tvc_resp_iov; + cmd->tvc_resp_iovs_cnt = 1; + } else { + /* + * Legacy descriptor layouts didn't specify that we must put + * the entire response in one iov. Worst case we have a + * iov per byte. + */ + cnt = min(VHOST_SCSI_MAX_RESP_IOVS, in_iovs_cnt); + cmd->tvc_resp_iovs = kcalloc(cnt, sizeof(struct iovec), + GFP_KERNEL); + if (!cmd->tvc_resp_iovs) + return -ENOMEM; + + cmd->tvc_resp_iovs_cnt = cnt; + } + + for (i = 0; i < cmd->tvc_resp_iovs_cnt; i++) + cmd->tvc_resp_iovs[i] = in_iovs[i]; + + return 0; +} + static u16 vhost_buf_to_lun(u8 *lun_buf) { return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF; @@ -1141,7 +1184,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) struct iov_iter in_iter, prot_iter, data_iter; u64 tag; u32 exp_data_len, data_direction; - int ret, prot_bytes, i, c = 0; + int ret, prot_bytes, c = 0; u16 lun; u8 task_attr; bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); @@ -1303,9 +1346,13 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) goto err; } cmd->tvc_vq = vq; - for (i = 0; i < vc.in ; i++) - cmd->tvc_resp_iov[i] = vq->iov[vc.out + i]; - cmd->tvc_in_iovs = vc.in; + + ret = vhost_scsi_setup_resp_iovs(cmd, &vq->iov[vc.out], vc.in); + if (ret) { + vq_err(vq, "Failed to alloc recv iovs\n"); + vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); + goto err; + } pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", cdb[0], lun); @@ -1686,7 +1733,6 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq) kfree(tv_cmd->sgl); kfree(tv_cmd->prot_sgl); - kfree(tv_cmd->tvc_resp_iov); } sbitmap_free(&svq->scsi_tags); @@ -1735,14 +1781,6 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) } } - tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV, - sizeof(struct iovec), - GFP_KERNEL); - if (!tv_cmd->tvc_resp_iov) { - pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n"); - goto out; - } - if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) && vs->inline_sg_cnt) { tv_cmd->prot_sgl = kcalloc(vs->inline_sg_cnt,