vhost,vdpa,virtio: cleanups, fixes
A very quiet cycle, no new features. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAl+QSnEPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRpvzoIAIAJPV0OTShpvv8JXmBDngDGysuAcQah+d3u g2vDzRb9J3lYH7hJgkHans/4s3wYtWcJei7tgU2UkSODTSPK/l+hp4sTuVowsqPD Cvp6k7/ipzJscl2AAiflSn5gBUORHXU8oxEeDvUAJbVkSwWdKvKgvDGPbVxZCU0V kGlUctRq96e/TQCNekVthZ1Q4cgPKgx4zMFZjLSbj0yDN2JJJp+0Y+y5NJ5u9eTE VneaFZOJxlhjmNZZP1Bu/MOcvgPbjxZjDRRUP75sv8c7IkoGiubHbbwcDhbE5gVd Ve/ByiFTJe9ydKVVLm1O81AqO7uB13W46LjF5yotyk/dKX6s5eU= =1Gdh -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio updates from Michael Tsirkin: "vhost, vdpa, and virtio cleanups and fixes A very quiet cycle, no new features" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: MAINTAINERS: add URL for virtio-mem vhost_vdpa: remove unnecessary spin_lock in vhost_vring_call vringh: fix __vringh_iov() when riov and wiov are different vdpa/mlx5: Setup driver only if VIRTIO_CONFIG_S_DRIVER_OK s390: virtio: PV needs VIRTIO I/O device protection virtio: let arch advertise guest's memory access restrictions vhost_vdpa: Fix duplicate included kernel.h vhost: reduce stack usage in log_used virtio-mem: Constify mem_id_table virtio_input: Constify id_table virtio-balloon: Constify id_table vdpa/mlx5: Fix failure to bring link up vdpa/mlx5: Make use of a specific 16 bit endianness API
This commit is contained in:
commit
9313f80263
|
@ -18629,6 +18629,7 @@ VIRTIO MEM DRIVER
|
|||
M: David Hildenbrand <david@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
W: https://virtio-mem.gitlab.io/
|
||||
F: drivers/virtio/virtio_mem.c
|
||||
F: include/uapi/linux/virtio_mem.h
|
||||
|
||||
|
|
|
@ -810,6 +810,7 @@ menu "Virtualization"
|
|||
config PROTECTED_VIRTUALIZATION_GUEST
|
||||
def_bool n
|
||||
prompt "Protected virtualization guest support"
|
||||
select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
|
||||
help
|
||||
Select this option, if you want to be able to run this
|
||||
kernel as a protected virtualization KVM guest.
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include <asm/kasan.h>
|
||||
#include <asm/dma-mapping.h>
|
||||
#include <asm/uv.h>
|
||||
#include <linux/virtio_config.h>
|
||||
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
|
||||
|
||||
|
@ -162,6 +163,16 @@ bool force_dma_unencrypted(struct device *dev)
|
|||
return is_prot_virt_guest();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
|
||||
|
||||
int arch_has_restricted_virtio_memory_access(void)
|
||||
{
|
||||
return is_prot_virt_guest();
|
||||
}
|
||||
EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);
|
||||
|
||||
#endif
|
||||
|
||||
/* protected virtualization */
|
||||
static void pv_init(void)
|
||||
{
|
||||
|
|
|
@ -1522,6 +1522,11 @@ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
|
|||
(mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
|
||||
}
|
||||
|
||||
static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
|
||||
{
|
||||
return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
|
||||
}
|
||||
|
||||
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
||||
{
|
||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||
|
@ -1535,8 +1540,8 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
|
|||
return err;
|
||||
|
||||
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
|
||||
ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev),
|
||||
ndev->mtu);
|
||||
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
|
||||
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1653,6 +1658,9 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *
|
|||
if (err)
|
||||
goto err_mr;
|
||||
|
||||
if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||
return 0;
|
||||
|
||||
restore_channels_info(ndev);
|
||||
err = setup_driver(ndev);
|
||||
if (err)
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <linux/nospec.h>
|
||||
#include <linux/vhost.h>
|
||||
#include <linux/virtio_net.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "vhost.h"
|
||||
|
||||
|
@ -97,26 +96,20 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
|
|||
return;
|
||||
|
||||
irq = ops->get_vq_irq(vdpa, qid);
|
||||
spin_lock(&vq->call_ctx.ctx_lock);
|
||||
irq_bypass_unregister_producer(&vq->call_ctx.producer);
|
||||
if (!vq->call_ctx.ctx || irq < 0) {
|
||||
spin_unlock(&vq->call_ctx.ctx_lock);
|
||||
if (!vq->call_ctx.ctx || irq < 0)
|
||||
return;
|
||||
}
|
||||
|
||||
vq->call_ctx.producer.token = vq->call_ctx.ctx;
|
||||
vq->call_ctx.producer.irq = irq;
|
||||
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
|
||||
spin_unlock(&vq->call_ctx.ctx_lock);
|
||||
}
|
||||
|
||||
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
|
||||
{
|
||||
struct vhost_virtqueue *vq = &v->vqs[qid];
|
||||
|
||||
spin_lock(&vq->call_ctx.ctx_lock);
|
||||
irq_bypass_unregister_producer(&vq->call_ctx.producer);
|
||||
spin_unlock(&vq->call_ctx.ctx_lock);
|
||||
}
|
||||
|
||||
static void vhost_vdpa_reset(struct vhost_vdpa *v)
|
||||
|
|
|
@ -302,7 +302,6 @@ static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
|
|||
{
|
||||
call_ctx->ctx = NULL;
|
||||
memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
|
||||
spin_lock_init(&call_ctx->ctx_lock);
|
||||
}
|
||||
|
||||
static void vhost_vq_reset(struct vhost_dev *dev,
|
||||
|
@ -1650,9 +1649,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
|
|||
break;
|
||||
}
|
||||
|
||||
spin_lock(&vq->call_ctx.ctx_lock);
|
||||
swap(ctx, vq->call_ctx.ctx);
|
||||
spin_unlock(&vq->call_ctx.ctx_lock);
|
||||
break;
|
||||
case VHOST_SET_VRING_ERR:
|
||||
if (copy_from_user(&f, argp, sizeof f)) {
|
||||
|
@ -1897,7 +1894,7 @@ static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
|
|||
|
||||
static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
|
||||
{
|
||||
struct iovec iov[64];
|
||||
struct iovec *iov = vq->log_iov;
|
||||
int i, ret;
|
||||
|
||||
if (!vq->iotlb)
|
||||
|
|
|
@ -64,7 +64,6 @@ enum vhost_uaddr_type {
|
|||
struct vhost_vring_call {
|
||||
struct eventfd_ctx *ctx;
|
||||
struct irq_bypass_producer producer;
|
||||
spinlock_t ctx_lock;
|
||||
};
|
||||
|
||||
/* The virtqueue structure describes a queue attached to a device. */
|
||||
|
@ -123,6 +122,7 @@ struct vhost_virtqueue {
|
|||
/* Log write descriptors */
|
||||
void __user *log_base;
|
||||
struct vhost_log *log;
|
||||
struct iovec log_iov[64];
|
||||
|
||||
/* Ring endianness. Defaults to legacy native endianness.
|
||||
* Set to true when starting a modern virtio device. */
|
||||
|
|
|
@ -284,13 +284,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
|
|||
desc_max = vrh->vring.num;
|
||||
up_next = -1;
|
||||
|
||||
/* You must want something! */
|
||||
if (WARN_ON(!riov && !wiov))
|
||||
return -EINVAL;
|
||||
|
||||
if (riov)
|
||||
riov->i = riov->used = 0;
|
||||
else if (wiov)
|
||||
if (wiov)
|
||||
wiov->i = wiov->used = 0;
|
||||
else
|
||||
/* You must want something! */
|
||||
BUG();
|
||||
|
||||
for (;;) {
|
||||
void *addr;
|
||||
|
|
|
@ -6,6 +6,12 @@ config VIRTIO
|
|||
bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG
|
||||
or CONFIG_S390_GUEST.
|
||||
|
||||
config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
|
||||
bool
|
||||
help
|
||||
This option is selected if the architecture may need to enforce
|
||||
VIRTIO_F_ACCESS_PLATFORM
|
||||
|
||||
menuconfig VIRTIO_MENU
|
||||
bool "Virtio drivers"
|
||||
default y
|
||||
|
|
|
@ -176,6 +176,21 @@ int virtio_finalize_features(struct virtio_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = arch_has_restricted_virtio_memory_access();
|
||||
if (ret) {
|
||||
if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
|
||||
dev_warn(&dev->dev,
|
||||
"device must provide VIRTIO_F_VERSION_1\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!virtio_has_feature(dev, VIRTIO_F_ACCESS_PLATFORM)) {
|
||||
dev_warn(&dev->dev,
|
||||
"device must provide VIRTIO_F_ACCESS_PLATFORM\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ struct virtio_balloon {
|
|||
struct page_reporting_dev_info pr_dev_info;
|
||||
};
|
||||
|
||||
static struct virtio_device_id id_table[] = {
|
||||
static const struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
|
||||
{ 0 },
|
||||
};
|
||||
|
|
|
@ -363,7 +363,7 @@ static int virtinput_restore(struct virtio_device *vdev)
|
|||
static unsigned int features[] = {
|
||||
/* none */
|
||||
};
|
||||
static struct virtio_device_id id_table[] = {
|
||||
static const struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_INPUT, VIRTIO_DEV_ANY_ID },
|
||||
{ 0 },
|
||||
};
|
||||
|
|
|
@ -1927,7 +1927,7 @@ static unsigned int virtio_mem_features[] = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static struct virtio_device_id virtio_mem_id_table[] = {
|
||||
static const struct virtio_device_id virtio_mem_id_table[] = {
|
||||
{ VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
|
||||
{ 0 },
|
||||
};
|
||||
|
|
|
@ -557,4 +557,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev,
|
|||
virtio_cread_le((vdev), structname, member, ptr); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
|
||||
int arch_has_restricted_virtio_memory_access(void);
|
||||
#else
|
||||
static inline int arch_has_restricted_virtio_memory_access(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */
|
||||
|
||||
#endif /* _LINUX_VIRTIO_CONFIG_H */
|
||||
|
|
Loading…
Reference in New Issue
Block a user