Commit 9c6eb531 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-s390-next-5.16-1' of...

Merge tag 'kvm-s390-next-5.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Fixes and Features for 5.16

- SIGP Fixes
- initial preparations for lazy destroy of secure VMs
- storage key improvements/fixes
- Log the guest CPNC
parents 7c8de080 3fd8417f
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -1074,8 +1074,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
	pte_t res;

	res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
	/* At this point the reference through the mapping is still present */
	if (mm_is_protected(mm) && pte_present(res))
		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
		uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
	return res;
}

@@ -1091,8 +1092,9 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
	pte_t res;

	res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
	/* At this point the reference through the mapping is still present */
	if (mm_is_protected(vma->vm_mm) && pte_present(res))
		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
		uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
	return res;
}

@@ -1116,8 +1118,9 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
	} else {
		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
	}
	/* At this point the reference through the mapping is still present */
	if (mm_is_protected(mm) && pte_present(res))
		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
		uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
	return res;
}

+13 −2
Original line number Diff line number Diff line
@@ -18,6 +18,11 @@
#include <asm/page.h>
#include <asm/gmap.h>

#define UVC_CC_OK	0
#define UVC_CC_ERROR	1
#define UVC_CC_BUSY	2
#define UVC_CC_PARTIAL	3

#define UVC_RC_EXECUTED		0x0001
#define UVC_RC_INV_CMD		0x0002
#define UVC_RC_INV_STATE	0x0003
@@ -351,8 +356,9 @@ static inline int is_prot_virt_host(void)
}

int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
int uv_destroy_page(unsigned long paddr);
int uv_destroy_owned_page(unsigned long paddr);
int uv_convert_from_secure(unsigned long paddr);
int uv_convert_owned_from_secure(unsigned long paddr);
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);

void setup_uv(void);
@@ -360,7 +366,7 @@ void setup_uv(void);
#define is_prot_virt_host() 0
static inline void setup_uv(void) {}

static inline int uv_destroy_page(unsigned long paddr)
static inline int uv_destroy_owned_page(unsigned long paddr)
{
	return 0;
}
@@ -369,6 +375,11 @@ static inline int uv_convert_from_secure(unsigned long paddr)
{
	return 0;
}

static inline int uv_convert_owned_from_secure(unsigned long paddr)
{
	return 0;
}
#endif

#endif /* _ASM_S390_UV_H */
+57 −8
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ static int uv_pin_shared(unsigned long paddr)
 *
 * @paddr: Absolute host address of page to be destroyed
 */
int uv_destroy_page(unsigned long paddr)
static int uv_destroy_page(unsigned long paddr)
{
	struct uv_cb_cfs uvcb = {
		.header.cmd = UVC_CMD_DESTR_SEC_STOR,
@@ -120,6 +120,22 @@ int uv_destroy_page(unsigned long paddr)
	return 0;
}

/*
 * The caller must already hold a reference to the page
 */
int uv_destroy_owned_page(unsigned long paddr)
{
	struct page *page = phys_to_page(paddr);
	int rc;

	get_page(page);
	rc = uv_destroy_page(paddr);
	if (!rc)
		clear_bit(PG_arch_1, &page->flags);
	put_page(page);
	return rc;
}

/*
 * Requests the Ultravisor to encrypt a guest page and make it
 * accessible to the host for paging (export).
@@ -139,6 +155,22 @@ int uv_convert_from_secure(unsigned long paddr)
	return 0;
}

/*
 * The caller must already hold a reference to the page
 */
int uv_convert_owned_from_secure(unsigned long paddr)
{
	struct page *page = phys_to_page(paddr);
	int rc;

	get_page(page);
	rc = uv_convert_from_secure(paddr);
	if (!rc)
		clear_bit(PG_arch_1, &page->flags);
	put_page(page);
	return rc;
}

/*
 * Calculate the expected ref_count for a page that would otherwise have no
 * further pins. This was cribbed from similar functions in other places in
@@ -165,7 +197,7 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr,
{
	pte_t entry = READ_ONCE(*ptep);
	struct page *page;
	int expected, rc = 0;
	int expected, cc = 0;

	if (!pte_present(entry))
		return -ENXIO;
@@ -181,12 +213,25 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr,
	if (!page_ref_freeze(page, expected))
		return -EBUSY;
	set_bit(PG_arch_1, &page->flags);
	rc = uv_call(0, (u64)uvcb);
	/*
	 * If the UVC does not succeed or fail immediately, we don't want to
	 * loop for long, or we might get stall notifications.
	 * On the other hand, this is a complex scenario and we are holding a lot of
	 * locks, so we can't easily sleep and reschedule. We try only once,
	 * and if the UVC returned busy or partial completion, we return
	 * -EAGAIN and we let the callers deal with it.
	 */
	cc = __uv_call(0, (u64)uvcb);
	page_ref_unfreeze(page, expected);
	/* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
	if (rc)
		rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
	return rc;
	/*
	 * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
	 * If busy or partially completed, return -EAGAIN.
	 */
	if (cc == UVC_CC_OK)
		return 0;
	else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
		return -EAGAIN;
	return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
}

/*
@@ -212,7 +257,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
	uaddr = __gmap_translate(gmap, gaddr);
	if (IS_ERR_VALUE(uaddr))
		goto out;
	vma = find_vma(gmap->mm, uaddr);
	vma = vma_lookup(gmap->mm, uaddr);
	if (!vma)
		goto out;
	/*
@@ -239,6 +284,10 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
	mmap_read_unlock(gmap->mm);

	if (rc == -EAGAIN) {
		/*
		 * If we are here because the UVC returned busy or partial
		 * completion, this is just a useless check, but it is safe.
		 */
		wait_on_page_writeback(page);
	} else if (rc == -EBUSY) {
		/*
+5 −0
Original line number Diff line number Diff line
@@ -518,6 +518,11 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu)
	 */
	if (rc == -EINVAL)
		return 0;
	/*
	 * If we got -EAGAIN here, we simply return it. It will eventually
	 * get propagated all the way to userspace, which should then try
	 * again.
	 */
	return rc;
}

+3 −2
Original line number Diff line number Diff line
@@ -3053,13 +3053,14 @@ static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
	int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
	struct kvm_vcpu *vcpu;
	u8 vcpu_isc_mask;

	for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
		if (psw_ioint_disabled(vcpu))
			continue;
		deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
		if (deliverable_mask) {
		vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
		if (deliverable_mask & vcpu_isc_mask) {
			/* lately kicked but not yet running */
			if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
				return;
Loading