From: Dario Faggioli <dfaggioli@suse.com>
Date: Fri, 12 Mar 2021 17:38:53 +0000
Subject: kABI fix after "nVMX: Properly handle userspace interrupt window request"
Patch-mainline: never, kABI fix
References: bsc#1183427
kvm-nvmx-properly-handle-userspace-interrupt-window-request gets rid of
the second argument of a member function of kvm_x86_ops, which is kABI.
Fix that by adding back the formal argument, but marking it as __unused
(and, of course, updating the callers as well).
Signed-off-by: Dario Faggioli <dfaggioli@suse.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/vmx/nested.c | 2 +-
arch/x86/kvm/x86.c | 6 +++---
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ef14f9adc644..740035bfd9ee 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1138,7 +1138,7 @@ struct kvm_x86_ops {
bool (*pt_supported)(void);
bool (*pku_supported)(void);
- int (*check_nested_events)(struct kvm_vcpu *vcpu);
+ int (*check_nested_events)(struct kvm_vcpu *vcpu, bool __unused);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 63d6ce668196..ee9a2b7bae1a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3437,7 +3437,7 @@ static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
vcpu->arch.exception.payload);
}
-static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
+static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool __unused)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qual;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0753b3e31e90..6f24890d6b6c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7489,7 +7489,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
* from L2 to L1.
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
- r = kvm_x86_ops->check_nested_events(vcpu);
+ r = kvm_x86_ops->check_nested_events(vcpu, /* unused */ 0);
if (r != 0)
return r;
}
@@ -7551,7 +7551,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
* KVM_REQ_EVENT only on certain events and not unconditionally?
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
- r = kvm_x86_ops->check_nested_events(vcpu);
+ r = kvm_x86_ops->check_nested_events(vcpu, /* unused */ 0);
if (r != 0)
return r;
}
@@ -8254,7 +8254,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
- kvm_x86_ops->check_nested_events(vcpu);
+ kvm_x86_ops->check_nested_events(vcpu, /* unused */ 0);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);