Commit 07fbdf7f authored by Claudio Imbrenda's avatar Claudio Imbrenda Committed by Janosch Frank
Browse files

KVM: s390: pv: usage counter instead of flag



Use the new protected_count field as a counter instead of the old
is_protected flag. This will be used in upcoming patches.

Increment the counter when a secure configuration is created, and
decrement it when it is destroyed. Previously the flag was set when the
set secure parameters UVC was performed.

Signed-off-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Acked-by: default avatarJanosch Frank <frankja@linux.ibm.com>
Link: https://lore.kernel.org/r/20220628135619.32410-6-imbrenda@linux.ibm.com


Message-Id: <20220628135619.32410-6-imbrenda@linux.ibm.com>
Signed-off-by: default avatarJanosch Frank <frankja@linux.ibm.com>
parent 6f73517d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@ typedef struct {
	unsigned long asce_limit;
	unsigned long vdso_base;
	/* The mmu context belongs to a secure guest. */
	atomic_t is_protected;
	atomic_t protected_count;
	/*
	 * The following bitfields need a down_write on the mm
	 * semaphore when they are written to. As they are only
+1 −1
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@ static inline int init_new_context(struct task_struct *tsk,
	INIT_LIST_HEAD(&mm->context.gmap_list);
	cpumask_clear(&mm->context.cpu_attach_mask);
	atomic_set(&mm->context.flush_count, 0);
	atomic_set(&mm->context.is_protected, 0);
	atomic_set(&mm->context.protected_count, 0);
	mm->context.gmap_asce = 0;
	mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE
+1 −1
Original line number Diff line number Diff line
@@ -525,7 +525,7 @@ static inline int mm_has_pgste(struct mm_struct *mm)
static inline int mm_is_protected(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (unlikely(atomic_read(&mm->context.is_protected)))
	if (unlikely(atomic_read(&mm->context.protected_count)))
		return 1;
#endif
	return 0;
+7 −5
Original line number Diff line number Diff line
@@ -167,7 +167,8 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
	cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
			   UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
	atomic_set(&kvm->mm->context.is_protected, 0);
	if (!cc)
		atomic_dec(&kvm->mm->context.protected_count);
	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
	WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
	/* Intended memory leak on "impossible" error */
@@ -209,11 +210,14 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
	/* Outputs */
	kvm->arch.pv.handle = uvcb.guest_handle;

	atomic_inc(&kvm->mm->context.protected_count);
	if (cc) {
		if (uvcb.header.rc & UVC_RC_NEED_DESTROY)
		if (uvcb.header.rc & UVC_RC_NEED_DESTROY) {
			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
		else
		} else {
			atomic_dec(&kvm->mm->context.protected_count);
			kvm_s390_pv_dealloc_vm(kvm);
		}
		return -EIO;
	}
	kvm->arch.gmap->guest_handle = uvcb.guest_handle;
@@ -236,8 +240,6 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
	*rrc = uvcb.header.rrc;
	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
		     *rc, *rrc);
	if (!cc)
		atomic_set(&kvm->mm->context.is_protected, 1);
	return cc ? -EINVAL : 0;
}