Loading arch/arm64/kvm/hyp/include/nvhe/gfp.h +1 −0 Original line number Diff line number Diff line Loading @@ -24,6 +24,7 @@ struct hyp_pool { /* Allocation */ void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order); void hyp_split_page(struct hyp_page *page); void hyp_get_page(struct hyp_pool *pool, void *addr); void hyp_put_page(struct hyp_pool *pool, void *addr); Loading arch/arm64/kvm/hyp/nvhe/mem_protect.c +12 −1 Original line number Diff line number Diff line Loading @@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1; static void *host_s2_zalloc_pages_exact(size_t size) { return hyp_alloc_pages(&host_s2_pool, get_order(size)); void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); hyp_split_page(hyp_virt_to_page(addr)); /* * The size of concatenated PGDs is always a power of two of PAGE_SIZE, * so there should be no need to free any of the tail pages to make the * allocation exact. */ WARN_ON(size != (PAGE_SIZE << get_order(size))); return addr; } static void *host_s2_zalloc_page(void *pool) Loading arch/arm64/kvm/hyp/nvhe/page_alloc.c +15 −0 Original line number Diff line number Diff line Loading @@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p) static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) { BUG_ON(!p->refcount); p->refcount--; return (p->refcount == 0); } Loading Loading @@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr) hyp_spin_unlock(&pool->lock); } void hyp_split_page(struct hyp_page *p) { unsigned short order = p->order; unsigned int i; p->order = 0; for (i = 1; i < (1 << order); i++) { struct hyp_page *tail = p + i; tail->order = 0; hyp_set_page_refcounted(tail); } } void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) { unsigned short i = order; Loading arch/arm64/kvm/mmu.c +4 −2 Original line number Diff line number Diff line Loading @@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * when updating the PG_mte_tagged page flag, see * sanitise_mte_tags for more details. */ if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) return -EINVAL; if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) { ret = -EINVAL; break; } if (vma->vm_flags & VM_PFNMAP) { /* IO region dirty page logging not allowed */ Loading arch/x86/kvm/svm/sev.c +1 −1 Original line number Diff line number Diff line Loading @@ -2583,7 +2583,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) return -EINVAL; return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, svm->ghcb_sa_len, in); svm->ghcb_sa, svm->ghcb_sa_len / size, in); } void sev_es_init_vmcb(struct vcpu_svm *svm) Loading Loading
arch/arm64/kvm/hyp/include/nvhe/gfp.h +1 −0 Original line number Diff line number Diff line Loading @@ -24,6 +24,7 @@ struct hyp_pool { /* Allocation */ void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order); void hyp_split_page(struct hyp_page *page); void hyp_get_page(struct hyp_pool *pool, void *addr); void hyp_put_page(struct hyp_pool *pool, void *addr); Loading
arch/arm64/kvm/hyp/nvhe/mem_protect.c +12 −1 Original line number Diff line number Diff line Loading @@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1; static void *host_s2_zalloc_pages_exact(size_t size) { return hyp_alloc_pages(&host_s2_pool, get_order(size)); void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); hyp_split_page(hyp_virt_to_page(addr)); /* * The size of concatenated PGDs is always a power of two of PAGE_SIZE, * so there should be no need to free any of the tail pages to make the * allocation exact. */ WARN_ON(size != (PAGE_SIZE << get_order(size))); return addr; } static void *host_s2_zalloc_page(void *pool) Loading
arch/arm64/kvm/hyp/nvhe/page_alloc.c +15 −0 Original line number Diff line number Diff line Loading @@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p) static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) { BUG_ON(!p->refcount); p->refcount--; return (p->refcount == 0); } Loading Loading @@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr) hyp_spin_unlock(&pool->lock); } void hyp_split_page(struct hyp_page *p) { unsigned short order = p->order; unsigned int i; p->order = 0; for (i = 1; i < (1 << order); i++) { struct hyp_page *tail = p + i; tail->order = 0; hyp_set_page_refcounted(tail); } } void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) { unsigned short i = order; Loading
arch/arm64/kvm/mmu.c +4 −2 Original line number Diff line number Diff line Loading @@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * when updating the PG_mte_tagged page flag, see * sanitise_mte_tags for more details. */ if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) return -EINVAL; if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) { ret = -EINVAL; break; } if (vma->vm_flags & VM_PFNMAP) { /* IO region dirty page logging not allowed */ Loading
arch/x86/kvm/svm/sev.c +1 −1 Original line number Diff line number Diff line Loading @@ -2583,7 +2583,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) return -EINVAL; return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, svm->ghcb_sa_len, in); svm->ghcb_sa, svm->ghcb_sa_len / size, in); } void sev_es_init_vmcb(struct vcpu_svm *svm) Loading