Commit a54d8066 authored by Maciej S. Szmigiero's avatar Maciej S. Szmigiero Committed by Paolo Bonzini
Browse files

KVM: Keep memslots in tree-based structures instead of array-based ones



The current memslot code uses a (reverse gfn-ordered) memslot array for
keeping track of them.

Because the memslot array that is currently in use cannot be modified
every memslot management operation (create, delete, move, change flags)
has to make a copy of the whole array so it has a scratch copy to work on.

Strictly speaking, however, it is only necessary to make copy of the
memslot that is being modified, copying all the memslots currently present
is just a limitation of the array-based memslot implementation.

Two memslot sets, however, are still needed so the VM continues to run
on the currently active set while the requested operation is being
performed on the second, currently inactive one.

In order to have two memslot sets, but only one copy of actual memslots
it is necessary to split out the memslot data from the memslot sets.

The memslots themselves should be also kept independent of each other
so they can be individually added or deleted.

These two memslot sets should normally point to the same set of
memslots. They can, however, be desynchronized when performing a
memslot management operation by replacing the memslot to be modified
by its copy.  After the operation is complete, both memslot sets once
again point to the same, common set of memslot data.

This commit implements the aforementioned idea.

For tracking of gfns an ordinary rbtree is used since memslots cannot
overlap in the guest address space and so this data structure is
sufficient for ensuring that lookups are done quickly.

The "last used slot" mini-caches (both per-slot set one and per-vCPU one),
that keep track of the last found-by-gfn memslot, are still present in the
new code.

Co-developed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <17c0cf3663b760a0d3753d4ac08c0753e941b811.1638817641.git.maciej.szmigiero@oracle.com>
parent 6a656832
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -210,13 +210,13 @@ static void stage2_flush_vm(struct kvm *kvm)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	int idx;
	int idx, bkt;

	idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);

	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots)
	kvm_for_each_memslot(memslot, bkt, slots)
		stage2_flush_memslot(kvm, memslot);

	spin_unlock(&kvm->mmu_lock);
@@ -595,14 +595,14 @@ void stage2_unmap_vm(struct kvm *kvm)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	int idx;
	int idx, bkt;

	idx = srcu_read_lock(&kvm->srcu);
	mmap_read_lock(current->mm);
	spin_lock(&kvm->mmu_lock);

	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots)
	kvm_for_each_memslot(memslot, bkt, slots)
		stage2_unmap_memslot(kvm, memslot);

	spin_unlock(&kvm->mmu_lock);
+2 −2
Original line number Diff line number Diff line
@@ -734,11 +734,11 @@ void kvmppc_rmap_reset(struct kvm *kvm)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	int srcu_idx;
	int srcu_idx, bkt;

	srcu_idx = srcu_read_lock(&kvm->srcu);
	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots) {
	kvm_for_each_memslot(memslot, bkt, slots) {
		/* Mutual exclusion with kvm_unmap_hva_range etc. */
		spin_lock(&kvm->mmu_lock);
		/*
+2 −1
Original line number Diff line number Diff line
@@ -5880,11 +5880,12 @@ static int kvmhv_svm_off(struct kvm *kvm)
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		struct kvm_memory_slot *memslot;
		struct kvm_memslots *slots = __kvm_memslots(kvm, i);
		int bkt;

		if (!slots)
			continue;

		kvm_for_each_memslot(memslot, slots) {
		kvm_for_each_memslot(memslot, bkt, slots) {
			kvmppc_uvmem_drop_pages(memslot, kvm, true);
			uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
		}
+2 −2
Original line number Diff line number Diff line
@@ -749,7 +749,7 @@ void kvmhv_release_all_nested(struct kvm *kvm)
	struct kvm_nested_guest *gp;
	struct kvm_nested_guest *freelist = NULL;
	struct kvm_memory_slot *memslot;
	int srcu_idx;
	int srcu_idx, bkt;

	spin_lock(&kvm->mmu_lock);
	for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
@@ -770,7 +770,7 @@ void kvmhv_release_all_nested(struct kvm *kvm)
	}

	srcu_idx = srcu_read_lock(&kvm->srcu);
	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
	kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
		kvmhv_free_memslot_nest_rmap(memslot);
	srcu_read_unlock(&kvm->srcu, srcu_idx);
}
+7 −7
Original line number Diff line number Diff line
@@ -459,7 +459,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot, *m;
	int ret = H_SUCCESS;
	int srcu_idx;
	int srcu_idx, bkt;

	kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;

@@ -478,7 +478,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)

	/* register the memslot */
	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots) {
	kvm_for_each_memslot(memslot, bkt, slots) {
		ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
		if (ret)
			break;
@@ -486,7 +486,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)

	if (ret) {
		slots = kvm_memslots(kvm);
		kvm_for_each_memslot(m, slots) {
		kvm_for_each_memslot(m, bkt, slots) {
			if (m == memslot)
				break;
			__kvmppc_uvmem_memslot_delete(kvm, memslot);
@@ -647,7 +647,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,

unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
{
	int srcu_idx;
	int srcu_idx, bkt;
	struct kvm_memory_slot *memslot;

	/*
@@ -662,7 +662,7 @@ unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)

	srcu_idx = srcu_read_lock(&kvm->srcu);

	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
	kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
		kvmppc_uvmem_drop_pages(memslot, kvm, false);

	srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -821,7 +821,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	int srcu_idx;
	int srcu_idx, bkt;
	long ret = H_SUCCESS;

	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
@@ -830,7 +830,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
	/* migrate any unmoved normal pfn to device pfns*/
	srcu_idx = srcu_read_lock(&kvm->srcu);
	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots) {
	kvm_for_each_memslot(memslot, bkt, slots) {
		ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
		if (ret) {
			/*
Loading