diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 873e3b6948ffc1cb952e28e4f8b3d435c092a93d..621dd396af6340a4a35c367f97fde1cce1d985e8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3459,8 +3459,6 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, u64 spte = 0ull; uint retry_count = 0; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - return false; if (!page_fault_can_be_fast(error_code)) return false; @@ -3957,9 +3955,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull; int root, leaf; bool reserved = false; - - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - goto exit; +、 walk_shadow_page_lockless_begin(vcpu); @@ -3990,7 +3986,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) root--; } } -exit: + *sptep = spte; return reserved; } @@ -4054,8 +4050,6 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) struct kvm_shadow_walk_iterator iterator; u64 spte; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - return; walk_shadow_page_lockless_begin(vcpu); for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { @@ -5385,6 +5379,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, enum emulation_result er; bool direct = vcpu->arch.mmu.direct_map; + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) + return RET_PF_RETRY; + /* With shadow page tables, fault_address contains a GVA or nGPA. */ if (vcpu->arch.mmu.direct_map) { vcpu->arch.gpa_available = true;