Loading arch/s390/kernel/uv.c +11 −21 Original line number Diff line number Diff line Loading @@ -192,21 +192,10 @@ static int expected_page_refs(struct page *page) return res; } static int make_secure_pte(pte_t *ptep, unsigned long addr, struct page *exp_page, struct uv_cb_header *uvcb) static int make_page_secure(struct page *page, struct uv_cb_header *uvcb) { pte_t entry = READ_ONCE(*ptep); struct page *page; int expected, cc = 0; if (!pte_present(entry)) return -ENXIO; if (pte_val(entry) & _PAGE_INVALID) return -ENXIO; page = pte_page(entry); if (page != exp_page) return -ENXIO; if (PageWriteback(page)) return -EAGAIN; expected = expected_page_refs(page); Loading Loading @@ -304,17 +293,18 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) goto out; rc = -ENXIO; page = follow_page(vma, uaddr, FOLL_WRITE); if (IS_ERR_OR_NULL(page)) goto out; lock_page(page); ptep = get_locked_pte(gmap->mm, uaddr, &ptelock); if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) { page = pte_page(*ptep); rc = -EAGAIN; if (trylock_page(page)) { if (should_export_before_import(uvcb, gmap->mm)) uv_convert_from_secure(page_to_phys(page)); rc = make_secure_pte(ptep, uaddr, page, uvcb); pte_unmap_unlock(ptep, ptelock); rc = make_page_secure(page, uvcb); unlock_page(page); } } pte_unmap_unlock(ptep, ptelock); out: mmap_read_unlock(gmap->mm); Loading arch/s390/kvm/pv.c +5 −0 Original line number Diff line number Diff line Loading @@ -314,6 +314,11 @@ int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc) */ if (kvm->arch.pv.set_aside) return -EINVAL; /* Guest with segment type ASCE, refuse to destroy asynchronously */ if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) return -EINVAL; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; Loading arch/s390/mm/gmap.c +7 −0 Original line number Diff line number Diff line Loading @@ -2840,6 +2840,9 @@ EXPORT_SYMBOL_GPL(s390_unlist_old_asce); * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy * @gmap: the gmap whose ASCE needs to be replaced * * If the ASCE is a SEGMENT type then this function will return -EINVAL, * otherwise the pointers in the host_to_guest radix tree will keep pointing * to the wrong pages, causing use-after-free and memory corruption. * If the allocation of the new top level page table fails, the ASCE is not * replaced. * In any case, the old ASCE is always removed from the gmap CRST list. Loading @@ -2854,6 +2857,10 @@ int s390_replace_asce(struct gmap *gmap) s390_unlist_old_asce(gmap); /* Replacing segment type ASCEs would cause serious issues */ if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) return -EINVAL; page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); if (!page) return -ENOMEM; Loading Loading
arch/s390/kernel/uv.c +11 −21 Original line number Diff line number Diff line Loading @@ -192,21 +192,10 @@ static int expected_page_refs(struct page *page) return res; } static int make_secure_pte(pte_t *ptep, unsigned long addr, struct page *exp_page, struct uv_cb_header *uvcb) static int make_page_secure(struct page *page, struct uv_cb_header *uvcb) { pte_t entry = READ_ONCE(*ptep); struct page *page; int expected, cc = 0; if (!pte_present(entry)) return -ENXIO; if (pte_val(entry) & _PAGE_INVALID) return -ENXIO; page = pte_page(entry); if (page != exp_page) return -ENXIO; if (PageWriteback(page)) return -EAGAIN; expected = expected_page_refs(page); Loading Loading @@ -304,17 +293,18 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) goto out; rc = -ENXIO; page = follow_page(vma, uaddr, FOLL_WRITE); if (IS_ERR_OR_NULL(page)) goto out; lock_page(page); ptep = get_locked_pte(gmap->mm, uaddr, &ptelock); if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) { page = pte_page(*ptep); rc = -EAGAIN; if (trylock_page(page)) { if (should_export_before_import(uvcb, gmap->mm)) uv_convert_from_secure(page_to_phys(page)); rc = make_secure_pte(ptep, uaddr, page, uvcb); pte_unmap_unlock(ptep, ptelock); rc = make_page_secure(page, uvcb); unlock_page(page); } } pte_unmap_unlock(ptep, ptelock); out: mmap_read_unlock(gmap->mm); Loading
arch/s390/kvm/pv.c +5 −0 Original line number Diff line number Diff line Loading @@ -314,6 +314,11 @@ int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc) */ if (kvm->arch.pv.set_aside) return -EINVAL; /* Guest with segment type ASCE, refuse to destroy asynchronously */ if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) return -EINVAL; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; Loading
arch/s390/mm/gmap.c +7 −0 Original line number Diff line number Diff line Loading @@ -2840,6 +2840,9 @@ EXPORT_SYMBOL_GPL(s390_unlist_old_asce); * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy * @gmap: the gmap whose ASCE needs to be replaced * * If the ASCE is a SEGMENT type then this function will return -EINVAL, * otherwise the pointers in the host_to_guest radix tree will keep pointing * to the wrong pages, causing use-after-free and memory corruption. * If the allocation of the new top level page table fails, the ASCE is not * replaced. * In any case, the old ASCE is always removed from the gmap CRST list. Loading @@ -2854,6 +2857,10 @@ int s390_replace_asce(struct gmap *gmap) s390_unlist_old_asce(gmap); /* Replacing segment type ASCEs would cause serious issues */ if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT) return -EINVAL; page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); if (!page) return -ENOMEM; Loading