Commit 730c627d authored by Bibo Mao's avatar Bibo Mao Committed by openeuler-sync-bot
Browse files

loongarch/kvm: Remove SW timer switch when vcpu is halt polling

LoongArch inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8I8NK



------------------------------------------

This patches removes SW timer switch during vcpu block stage. VM uses HW
timer rather than SW PV timer on LoongArch system, it can check pending
HW timer interrupt status directly, rather than switch to SW timer and
check injected SW timer interrupt.

When SW timer is not used in vcpu halt-polling mode, the relative
SW timer handling before entering guest can be removed also. Timer
emulation is simpler than before, SW timer emuation is only used in vcpu
thread context switch.

Signed-off-by: default avatarBibo Mao <maobibo@loongson.cn>
(cherry picked from commit c15e40a7)
parent 42d7d2ee
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -243,6 +243,7 @@ struct kvm_vcpu_arch {
	u64 perf_ctrl[4];
	u64 perf_cntr[4];

	int blocking;
};

static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
@@ -319,8 +320,6 @@ static inline void kvm_arch_free_memslot(struct kvm *kvm,
					struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}

extern int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
+2 −13
Original line number Diff line number Diff line
@@ -24,20 +24,9 @@ int _kvm_emu_idle(struct kvm_vcpu *vcpu)
{
	++vcpu->stat.idle_exits;
	trace_kvm_exit(vcpu, KVM_TRACE_EXIT_IDLE);
	if (!vcpu->arch.irq_pending) {
		kvm_save_timer(vcpu);
		kvm_vcpu_block(vcpu);

		/*
		 * We we are runnable, then definitely go off to user space to
		 * check if any I/O interrupts are pending.
		 */
		if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
	kvm_vcpu_block(vcpu);
	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
		}
	}

	return EMULATE_DONE;
}

+0 −1
Original line number Diff line number Diff line
@@ -99,7 +99,6 @@ void kvm_restore_lasx_upper(struct kvm_vcpu *cpu);
void kvm_lose_hw_perf(struct kvm_vcpu *vcpu);
void kvm_restore_hw_perf(struct kvm_vcpu *vcpu);

void kvm_acquire_timer(struct kvm_vcpu *vcpu);
void kvm_reset_timer(struct kvm_vcpu *vcpu);
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
+17 −33
Original line number Diff line number Diff line
@@ -246,7 +246,6 @@ int kvm_arch_hardware_enable(void)
	 */
	gcfg |= KVM_GCFG_GCI_SECURE;
	gcfg |= KVM_GCFG_MATC_ROOT;
	gcfg |= KVM_GCFG_TIT;
	kvm_write_csr_gcfg(gcfg);
	kvm_flush_tlb_all();

@@ -457,9 +456,6 @@ static int _kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)

	local_irq_disable();

	if (ret == RESUME_GUEST)
		kvm_acquire_timer(vcpu);

	if (!(ret & RESUME_HOST)) {
		_kvm_deliver_intr(vcpu);
		/* Only check for signals if not already exiting to userspace */
@@ -652,7 +648,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
	smp_store_mb(vcpu->mode, IN_GUEST_MODE);

	cpu = smp_processor_id();
	kvm_acquire_timer(vcpu);
	/* Check if we have any exceptions/interrupts pending */
	_kvm_deliver_intr(vcpu);

@@ -698,23 +693,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
	return -ENOIOCTLCMD;
}

/**
 * kvm_migrate_count() - Migrate timer.
 * @vcpu:       Virtual CPU.
 *
 * Migrate hrtimer to the current CPU by cancelling and restarting it
 * if it was running prior to being cancelled.
 *
 * Must be called when the VCPU is migrated to a different CPU to ensure that
 * timer expiry during guest execution interrupts the guest and causes the
 * interrupt to be delivered in a timely manner.
 */
static void kvm_migrate_count(struct kvm_vcpu *vcpu)
{
	if (hrtimer_cancel(&vcpu->arch.swtimer))
		hrtimer_restart(&vcpu->arch.swtimer);
}

static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	struct kvm_context *context;
@@ -822,22 +800,22 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

	local_irq_save(flags);
	vcpu->cpu = cpu;
	if (vcpu->arch.last_sched_cpu != cpu) {
		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
				vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
		/*
		 * Migrate the timer interrupt to the current CPU so that it
		 * always interrupts the guest and synchronously triggers a
		 * guest timer interrupt.
		 */
		kvm_migrate_count(vcpu);
	}

	/* restore guest state to registers */
	_kvm_vcpu_load(vcpu, cpu);
	local_irq_restore(flags);
}

void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	vcpu->arch.blocking = 1;
}

void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	vcpu->arch.blocking = 0;
}

static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
{
	struct loongarch_csrs *csr = vcpu->arch.csr;
@@ -1712,9 +1690,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
	return _kvm_pending_timer(vcpu) ||
	int ret;

	/* protect from TOD sync and vcpu_load/put */
	preempt_disable();
	ret = _kvm_pending_timer(vcpu) ||
		kvm_read_hw_gcsr(KVM_CSR_ESTAT) &
			(1 << (KVM_INT_TIMER - KVM_INT_START));
	preempt_enable();
	return ret;
}

int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+21 −44
Original line number Diff line number Diff line
@@ -97,6 +97,12 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
		return;
	}

	/*
	 * Freeze the soft-timer and sync the guest stable timer with it. We do
	 * this with interrupts disabled to avoid latency.
	 */
	hrtimer_cancel(&vcpu->arch.swtimer);

	/*
	 * set remainder tick value if not expired
	 */
@@ -113,8 +119,7 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
			delta = 0;
		/*
		 * inject timer here though sw timer should inject timer
		 * interrupt async already, since sw timer may be cancelled
		 * during injecting intr async in function kvm_acquire_timer
		 * interrupt async already
		 */
		_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
	}
@@ -122,31 +127,6 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
	kvm_write_gcsr_timertick(delta);
}

/*
 *
 * Restore hard timer state and enable guest to access timer registers
 * without trap
 *
 * it is called with irq disabled
 */
void kvm_acquire_timer(struct kvm_vcpu *vcpu)
{
	unsigned long cfg;

	cfg = kvm_read_csr_gcfg();
	if (!(cfg & CSR_GCFG_TIT))
		return;

	/* enable guest access to hard timer */
	kvm_write_csr_gcfg(cfg & ~CSR_GCFG_TIT);

	/*
	 * Freeze the soft-timer and sync the guest stable timer with it. We do
	 * this with interrupts disabled to avoid latency.
	 */
	hrtimer_cancel(&vcpu->arch.swtimer);
}

/*
 * Save guest timer state and switch to software emulation of guest
 * timer. The hard timer must already be in use, so preemption should be
@@ -168,14 +148,17 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
		 * HRTIMER_MODE_PINNED is suggested since vcpu may run in
		 * the same physical cpu in next time
		 */
		hrtimer_cancel(&vcpu->arch.swtimer);
		hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
	} else
	} else if (vcpu->arch.blocking) {
		/*
		 * inject timer interrupt so that hall polling can dectect
		 * and exit
		 * Inject timer interrupt so that hall polling can dectect and exit
		 * kvm_queue_irq is not enough, hrtimer had better be used since vcpu
		 * is halt-polling and scheduled out already
		 */
		_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
		expire = ktime_add_ns(ktime_get(), 10);  // 10ns is enough here
		vcpu->arch.expire = expire;
		hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
	}
}

/*
@@ -185,20 +168,14 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
void kvm_save_timer(struct kvm_vcpu *vcpu)
{
	struct loongarch_csrs *csr = vcpu->arch.csr;
	unsigned long cfg;

	preempt_disable();
	cfg = kvm_read_csr_gcfg();
	if (!(cfg & CSR_GCFG_TIT)) {
		/* disable guest use of hard timer */
		kvm_write_csr_gcfg(cfg | CSR_GCFG_TIT);

	/* save hard timer state */
	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
	if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
		_kvm_save_timer(vcpu);
	}

	/* save timer-related state to vCPU context */
	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);