aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/Kconfig14
-rw-r--r--virt/kvm/dirty_ring.c4
-rw-r--r--virt/kvm/kvm_main.c9
-rw-r--r--virt/kvm/vfio.c45
4 files changed, 56 insertions, 16 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index a8c5c9f06b3c..800f9470e36b 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -19,6 +19,20 @@ config HAVE_KVM_IRQ_ROUTING
config HAVE_KVM_DIRTY_RING
bool
+# Only strongly ordered architectures can select this, as it doesn't
+# put any explicit constraint on userspace ordering. They can also
+# select the _ACQ_REL version.
+config HAVE_KVM_DIRTY_RING_TSO
+ bool
+ select HAVE_KVM_DIRTY_RING
+ depends on X86
+
+# Weakly ordered architectures can only select this, advertising
+# to userspace the additional ordering requirements.
+config HAVE_KVM_DIRTY_RING_ACQ_REL
+ bool
+ select HAVE_KVM_DIRTY_RING
+
config HAVE_KVM_EVENTFD
bool
select EVENTFD
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index f4c2a6eb1666..d6fabf238032 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -74,7 +74,7 @@ int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
{
- gfn->flags = 0;
+ smp_store_release(&gfn->flags, 0);
}
static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
@@ -84,7 +84,7 @@ static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
{
- return gfn->flags & KVM_DIRTY_GFN_F_RESET;
+ return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
}
int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 26383e63d9dd..e30f1b4ecfa5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4473,7 +4473,13 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS;
case KVM_CAP_DIRTY_LOG_RING:
-#ifdef CONFIG_HAVE_KVM_DIRTY_RING
+#ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
+ return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
+#else
+ return 0;
+#endif
+ case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
+#ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
#else
return 0;
@@ -4578,6 +4584,7 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
return 0;
}
case KVM_CAP_DIRTY_LOG_RING:
+ case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
default:
return kvm_vm_ioctl_enable_cap(kvm, cap);
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index ce1b01d02c51..495ceabffe88 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -24,6 +24,9 @@
struct kvm_vfio_group {
struct list_head node;
struct file *file;
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ struct iommu_group *iommu_group;
+#endif
};
struct kvm_vfio {
@@ -61,6 +64,23 @@ static bool kvm_vfio_file_enforced_coherent(struct file *file)
return ret;
}
+static bool kvm_vfio_file_is_group(struct file *file)
+{
+ bool (*fn)(struct file *file);
+ bool ret;
+
+ fn = symbol_get(vfio_file_is_group);
+ if (!fn)
+ return false;
+
+ ret = fn(file);
+
+ symbol_put(vfio_file_is_group);
+
+ return ret;
+}
+
+#ifdef CONFIG_SPAPR_TCE_IOMMU
static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
{
struct iommu_group *(*fn)(struct file *file);
@@ -77,16 +97,15 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
return ret;
}
-#ifdef CONFIG_SPAPR_TCE_IOMMU
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
struct kvm_vfio_group *kvg)
{
- struct iommu_group *grp = kvm_vfio_file_iommu_group(kvg->file);
-
- if (WARN_ON_ONCE(!grp))
+ if (WARN_ON_ONCE(!kvg->iommu_group))
return;
- kvm_spapr_tce_release_iommu_group(kvm, grp);
+ kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
+ iommu_group_put(kvg->iommu_group);
+ kvg->iommu_group = NULL;
}
#endif
@@ -136,7 +155,7 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
return -EBADF;
/* Ensure the FD is a vfio group FD.*/
- if (!kvm_vfio_file_iommu_group(filp)) {
+ if (!kvm_vfio_file_is_group(filp)) {
ret = -EINVAL;
goto err_fput;
}
@@ -236,19 +255,19 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
- struct iommu_group *grp;
-
if (kvg->file != f.file)
continue;
- grp = kvm_vfio_file_iommu_group(kvg->file);
- if (WARN_ON_ONCE(!grp)) {
- ret = -EIO;
- goto err_fdput;
+ if (!kvg->iommu_group) {
+ kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
+ if (WARN_ON_ONCE(!kvg->iommu_group)) {
+ ret = -EIO;
+ goto err_fdput;
+ }
}
ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
- grp);
+ kvg->iommu_group);
break;
}