summaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2020-09-30 21:22:22 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2020-11-15 09:49:15 -0500
commitfb04a1eddb1a65b6588a021bdc132270d5ae48bb (patch)
tree0faceb84959c8f1b65f91d51c1bcc0c0a38ece43 /virt/kvm/kvm_main.c
parent28bd726aa404c0da8fd6852fe69bb4538a103b71 (diff)
KVM: X86: Implement ring-based dirty memory tracking
This patch is heavily based on previous work from Lei Cao <lei.cao@stratus.com> and Paolo Bonzini <pbonzini@redhat.com>. [1] KVM currently uses large bitmaps to track dirty memory. These bitmaps are copied to userspace when userspace queries KVM for its dirty page information. The use of bitmaps is mostly sufficient for live migration, as large parts of memory are be dirtied from one log-dirty pass to another. However, in a checkpointing system, the number of dirty pages is small and in fact it is often bounded---the VM is paused when it has dirtied a pre-defined number of pages. Traversing a large, sparsely populated bitmap to find set bits is time-consuming, as is copying the bitmap to user-space. A similar issue will be there for live migration when the guest memory is huge while the page dirty procedure is trivial. In that case for each dirty sync we need to pull the whole dirty bitmap to userspace and analyse every bit even if it's mostly zeros. The preferred data structure for above scenarios is a dense list of guest frame numbers (GFN). This patch series stores the dirty list in kernel memory that can be memory mapped into userspace to allow speedy harvesting. This patch enables dirty ring for X86 only. However it should be easily extended to other archs as well. [1] https://patchwork.kernel.org/patch/10471409/ Signed-off-by: Lei Cao <lei.cao@stratus.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Peter Xu <peterx@redhat.com> Message-Id: <20201001012222.5767-1-peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c113
1 files changed, 112 insertions, 1 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 68598fdba226..78ef414512bf 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -63,6 +63,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
+#include <linux/kvm_dirty_ring.h>
+
/* Worst case buffer size needed for holding an integer. */
#define ITOA_MAX_LEN 12
@@ -415,6 +417,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ kvm_dirty_ring_free(&vcpu->dirty_ring);
kvm_arch_vcpu_destroy(vcpu);
/*
@@ -2644,8 +2647,13 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
{
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
+ u32 slot = (memslot->as_id << 16) | memslot->id;
- set_bit_le(rel_gfn, memslot->dirty_bitmap);
+ if (kvm->dirty_ring_size)
+ kvm_dirty_ring_push(kvm_dirty_ring_get(kvm),
+ slot, rel_gfn);
+ else
+ set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
@@ -3005,6 +3013,17 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
+static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
+{
+#if KVM_DIRTY_LOG_PAGE_OFFSET > 0
+ return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
+ (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
+ kvm->dirty_ring_size / PAGE_SIZE);
+#else
+ return false;
+#endif
+}
+
static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
{
struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
@@ -3020,6 +3039,10 @@ static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
#endif
+ else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
+ page = kvm_dirty_ring_get_page(
+ &vcpu->dirty_ring,
+ vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
else
return kvm_arch_vcpu_fault(vcpu, vmf);
get_page(page);
@@ -3033,6 +3056,14 @@ static const struct vm_operations_struct kvm_vcpu_vm_ops = {
static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct kvm_vcpu *vcpu = file->private_data;
+ unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+ if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
+ kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
+ ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
+ return -EINVAL;
+
vma->vm_ops = &kvm_vcpu_vm_ops;
return 0;
}
@@ -3126,6 +3157,13 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (r)
goto vcpu_free_run_page;
+ if (kvm->dirty_ring_size) {
+ r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
+ id, kvm->dirty_ring_size);
+ if (r)
+ goto arch_vcpu_destroy;
+ }
+
mutex_lock(&kvm->lock);
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
@@ -3159,6 +3197,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
+ kvm_dirty_ring_free(&vcpu->dirty_ring);
+arch_vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
vcpu_free_run_page:
free_page((unsigned long)vcpu->run);
@@ -3631,12 +3671,78 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#endif
case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS;
+ case KVM_CAP_DIRTY_LOG_RING:
+#if KVM_DIRTY_LOG_PAGE_OFFSET > 0
+ return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
+#else
+ return 0;
+#endif
default:
break;
}
return kvm_vm_ioctl_check_extension(kvm, arg);
}
+static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
+{
+ int r;
+
+ if (!KVM_DIRTY_LOG_PAGE_OFFSET)
+ return -EINVAL;
+
+ /* the size should be power of 2 */
+ if (!size || (size & (size - 1)))
+ return -EINVAL;
+
+ /* Should be bigger to keep the reserved entries, or a page */
+ if (size < kvm_dirty_ring_get_rsvd_entries() *
+ sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
+ return -EINVAL;
+
+ if (size > KVM_DIRTY_RING_MAX_ENTRIES *
+ sizeof(struct kvm_dirty_gfn))
+ return -E2BIG;
+
+ /* We only allow it to set once */
+ if (kvm->dirty_ring_size)
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+
+ if (kvm->created_vcpus) {
+ /* We don't allow to change this value after vcpu created */
+ r = -EINVAL;
+ } else {
+ kvm->dirty_ring_size = size;
+ r = 0;
+ }
+
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
+static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+ int cleared = 0;
+
+ if (!kvm->dirty_ring_size)
+ return -EINVAL;
+
+ mutex_lock(&kvm->slots_lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
+
+ mutex_unlock(&kvm->slots_lock);
+
+ if (cleared)
+ kvm_flush_remote_tlbs(kvm);
+
+ return cleared;
+}
+
int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
@@ -3667,6 +3773,8 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
kvm->max_halt_poll_ns = cap->args[0];
return 0;
}
+ case KVM_CAP_DIRTY_LOG_RING:
+ return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
default:
return kvm_vm_ioctl_enable_cap(kvm, cap);
}
@@ -3851,6 +3959,9 @@ static long kvm_vm_ioctl(struct file *filp,
case KVM_CHECK_EXTENSION:
r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
break;
+ case KVM_RESET_DIRTY_RINGS:
+ r = kvm_vm_ioctl_reset_dirty_pages(kvm);
+ break;
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}