Loading arch/powerpc/kvm/book3s_hv.c +15 −14 Original line number Diff line number Diff line Loading @@ -927,6 +927,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; unsigned long req = kvmppc_get_gpr(vcpu, 3); unsigned long target, ret = H_SUCCESS; int yield_count; Loading @@ -942,7 +943,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; case H_PROD: target = kvmppc_get_gpr(vcpu, 4); tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); tvcpu = kvmppc_find_vcpu(kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; Loading @@ -956,7 +957,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) target = kvmppc_get_gpr(vcpu, 4); if (target == -1) break; tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); tvcpu = kvmppc_find_vcpu(kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; Loading @@ -972,12 +973,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) kvmppc_get_gpr(vcpu, 6)); break; case H_RTAS: if (list_empty(&vcpu->kvm->arch.rtas_tokens)) if (list_empty(&kvm->arch.rtas_tokens)) return RESUME_HOST; idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx); if (rc == -ENOENT) return RESUME_HOST; Loading Loading @@ -1064,12 +1065,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SET_PARTITION_TABLE: ret = H_FUNCTION; if (nesting_enabled(vcpu->kvm)) if (nesting_enabled(kvm)) ret = kvmhv_set_partition_table(vcpu); break; case H_ENTER_NESTED: ret = H_FUNCTION; if (!nesting_enabled(vcpu->kvm)) if (!nesting_enabled(kvm)) break; ret = kvmhv_enter_nested_guest(vcpu); if (ret == H_INTERRUPT) { Loading @@ -1084,12 +1085,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; case H_TLB_INVALIDATE: ret = H_FUNCTION; if (nesting_enabled(vcpu->kvm)) if (nesting_enabled(kvm)) ret = kvmhv_do_nested_tlbie(vcpu); break; case H_COPY_TOFROM_GUEST: ret = H_FUNCTION; if (nesting_enabled(vcpu->kvm)) if (nesting_enabled(kvm)) ret = kvmhv_copy_tofrom_guest_nested(vcpu); break; case H_PAGE_INIT: Loading @@ -1100,7 +1101,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SVM_PAGE_IN: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_page_in(vcpu->kvm, ret = kvmppc_h_svm_page_in(kvm, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); Loading @@ -1108,7 +1109,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SVM_PAGE_OUT: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_page_out(vcpu->kvm, ret = kvmppc_h_svm_page_out(kvm, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); Loading @@ -1116,12 +1117,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SVM_INIT_START: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_init_start(vcpu->kvm); ret = kvmppc_h_svm_init_start(kvm); break; case H_SVM_INIT_DONE: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_init_done(vcpu->kvm); ret = kvmppc_h_svm_init_done(kvm); break; case H_SVM_INIT_ABORT: /* Loading @@ -1131,7 +1132,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) * Instead the kvm->arch.secure_guest flag is checked inside * kvmppc_h_svm_init_abort(). */ ret = kvmppc_h_svm_init_abort(vcpu->kvm); ret = kvmppc_h_svm_init_abort(kvm); break; default: Loading Loading
arch/powerpc/kvm/book3s_hv.c +15 −14 Original line number Diff line number Diff line Loading @@ -927,6 +927,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; unsigned long req = kvmppc_get_gpr(vcpu, 3); unsigned long target, ret = H_SUCCESS; int yield_count; Loading @@ -942,7 +943,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; case H_PROD: target = kvmppc_get_gpr(vcpu, 4); tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); tvcpu = kvmppc_find_vcpu(kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; Loading @@ -956,7 +957,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) target = kvmppc_get_gpr(vcpu, 4); if (target == -1) break; tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); tvcpu = kvmppc_find_vcpu(kvm, target); if (!tvcpu) { ret = H_PARAMETER; break; Loading @@ -972,12 +973,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) kvmppc_get_gpr(vcpu, 6)); break; case H_RTAS: if (list_empty(&vcpu->kvm->arch.rtas_tokens)) if (list_empty(&kvm->arch.rtas_tokens)) return RESUME_HOST; idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx); if (rc == -ENOENT) return RESUME_HOST; Loading Loading @@ -1064,12 +1065,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SET_PARTITION_TABLE: ret = H_FUNCTION; if (nesting_enabled(vcpu->kvm)) if (nesting_enabled(kvm)) ret = kvmhv_set_partition_table(vcpu); break; case H_ENTER_NESTED: ret = H_FUNCTION; if (!nesting_enabled(vcpu->kvm)) if (!nesting_enabled(kvm)) break; ret = kvmhv_enter_nested_guest(vcpu); if (ret == H_INTERRUPT) { Loading @@ -1084,12 +1085,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; case H_TLB_INVALIDATE: ret = H_FUNCTION; if (nesting_enabled(vcpu->kvm)) if (nesting_enabled(kvm)) ret = kvmhv_do_nested_tlbie(vcpu); break; case H_COPY_TOFROM_GUEST: ret = H_FUNCTION; if (nesting_enabled(vcpu->kvm)) if (nesting_enabled(kvm)) ret = kvmhv_copy_tofrom_guest_nested(vcpu); break; case H_PAGE_INIT: Loading @@ -1100,7 +1101,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SVM_PAGE_IN: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_page_in(vcpu->kvm, ret = kvmppc_h_svm_page_in(kvm, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); Loading @@ -1108,7 +1109,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SVM_PAGE_OUT: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_page_out(vcpu->kvm, ret = kvmppc_h_svm_page_out(kvm, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); Loading @@ -1116,12 +1117,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SVM_INIT_START: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_init_start(vcpu->kvm); ret = kvmppc_h_svm_init_start(kvm); break; case H_SVM_INIT_DONE: ret = H_UNSUPPORTED; if (kvmppc_get_srr1(vcpu) & MSR_S) ret = kvmppc_h_svm_init_done(vcpu->kvm); ret = kvmppc_h_svm_init_done(kvm); break; case H_SVM_INIT_ABORT: /* Loading @@ -1131,7 +1132,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) * Instead the kvm->arch.secure_guest flag is checked inside * kvmppc_h_svm_init_abort(). */ ret = kvmppc_h_svm_init_abort(vcpu->kvm); ret = kvmppc_h_svm_init_abort(kvm); break; default: Loading