[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] hvm: Fix up guest_table handling after p2m changes.
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1189191269 -3600 # Node ID a53aaea4c69813a7143daa677b9e65d1d2f15b6b # Parent f8e7f06b351c7a526e0187ab701457f8ed65b835 hvm: Fix up guest_table handling after p2m changes. Fixes a host crash on HVM guest restore. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 24 ++--------- xen/arch/x86/hvm/svm/svm.c | 56 ++++++++++++++------------- xen/arch/x86/hvm/vmx/vmx.c | 84 ++++++++++++++++++++--------------------- xen/arch/x86/mm/shadow/multi.c | 14 ------ 4 files changed, 77 insertions(+), 101 deletions(-) diff -r f8e7f06b351c -r a53aaea4c698 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Fri Sep 07 19:53:57 2007 +0100 +++ b/xen/arch/x86/hvm/hvm.c Fri Sep 07 19:54:29 2007 +0100 @@ -586,8 +586,7 @@ int hvm_set_cr0(unsigned long value) if ( !paging_mode_hap(v->domain) ) { - put_page(mfn_to_page(get_mfn_from_gpfn( - v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT))); + put_page(pagetable_get_page(v->arch.guest_table)); v->arch.guest_table = pagetable_null(); } } @@ -603,21 +602,11 @@ int hvm_set_cr0(unsigned long value) int hvm_set_cr3(unsigned long value) { - unsigned long old_base_mfn, mfn; + unsigned long mfn; struct vcpu *v = current; - if ( paging_mode_hap(v->domain) || !hvm_paging_enabled(v) ) - { - /* Nothing to do. */ - } - else if ( value == v->arch.hvm_vcpu.guest_cr[3] ) - { - /* Shadow-mode TLB flush. Invalidate the shadow. */ - mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT); - if ( mfn != pagetable_get_pfn(v->arch.guest_table) ) - goto bad_cr3; - } - else + if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) && + (value != v->arch.hvm_vcpu.guest_cr[3]) ) { /* Shadow-mode CR3 change. Check PDBR and then make a new shadow. */ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); @@ -625,11 +614,8 @@ int hvm_set_cr3(unsigned long value) if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) goto bad_cr3; - old_base_mfn = pagetable_get_pfn(v->arch.guest_table); + put_page(pagetable_get_page(v->arch.guest_table)); v->arch.guest_table = pagetable_from_pfn(mfn); - - if ( old_base_mfn ) - put_page(mfn_to_page(old_base_mfn)); HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value); } diff -r f8e7f06b351c -r a53aaea4c698 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri Sep 07 19:53:57 2007 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Sep 07 19:54:29 2007 +0100 @@ -337,8 +337,36 @@ int svm_vmcb_save(struct vcpu *v, struct int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) { - unsigned long mfn, old_base_mfn; - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + unsigned long mfn = 0; + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + + if ( c->pending_valid && + ((c->pending_type == 1) || (c->pending_type > 6) || + (c->pending_reserved != 0)) ) + { + gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32".\n", + c->pending_event); + return -EINVAL; + } + + if ( !paging_mode_hap(v->domain) ) + { + if ( c->cr0 & X86_CR0_PG ) + { + mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT); + if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) + { + gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", + c->cr3); + return -EINVAL; + } + } + + if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG ) + put_page(pagetable_get_page(v->arch.guest_table)); + + v->arch.guest_table = pagetable_from_pfn(mfn); + } vmcb->rip = c->rip; vmcb->rsp = c->rsp; @@ -357,18 +385,6 @@ int svm_vmcb_restore(struct vcpu *v, str __func__, c->cr3, c->cr0, c->cr4); #endif - if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) ) - { - HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3); - mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT); - if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) - goto bad_cr3; - old_base_mfn = pagetable_get_pfn(v->arch.guest_table); - v->arch.guest_table = pagetable_from_pfn(mfn); - if ( old_base_mfn ) - put_page(mfn_to_page(old_base_mfn)); - } - vmcb->idtr.limit = c->idtr_limit; vmcb->idtr.base = c->idtr_base; @@ -435,14 +451,6 @@ int svm_vmcb_restore(struct vcpu *v, str gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n", c->pending_event, c->error_code); - if ( (c->pending_type == 1) || (c->pending_type > 6) || - (c->pending_reserved != 0) ) - { - gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32"\n", - c->pending_event); - return -EINVAL; - } - if ( hvm_event_needs_reinjection(c->pending_type, c->pending_vector) ) { vmcb->eventinj.bytes = c->pending_event; @@ -453,10 +461,6 @@ int svm_vmcb_restore(struct vcpu *v, str paging_update_paging_modes(v); return 0; - - bad_cr3: - gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", c->cr3); - return -EINVAL; } diff -r f8e7f06b351c -r a53aaea4c698 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Sep 07 19:53:57 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Sep 07 19:54:29 2007 +0100 @@ -565,7 +565,31 @@ void vmx_vmcs_save(struct vcpu *v, struc int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c) { - unsigned long mfn, old_base_mfn; + unsigned long mfn = 0; + + if ( c->pending_valid && + ((c->pending_type == 1) || (c->pending_type > 6) || + (c->pending_reserved != 0)) ) + { + gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32".\n", + c->pending_event); + return -EINVAL; + } + + if ( c->cr0 & X86_CR0_PG ) + { + mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT); + if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) + { + gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", c->cr3); + return -EINVAL; + } + } + + if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG ) + put_page(pagetable_get_page(v->arch.guest_table)); + + v->arch.guest_table = pagetable_from_pfn(mfn); vmx_vmcs_enter(v); @@ -586,18 +610,6 @@ int vmx_vmcs_restore(struct vcpu *v, str __func__, c->cr3, c->cr0, c->cr4); #endif - if ( hvm_paging_enabled(v) ) - { - HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3); - mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT); - if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) - goto bad_cr3; - old_base_mfn = pagetable_get_pfn(v->arch.guest_table); - v->arch.guest_table = pagetable_from_pfn(mfn); - if ( old_base_mfn ) - put_page(mfn_to_page(old_base_mfn)); - } - v->arch.hvm_vcpu.guest_efer = c->msr_efer; vmx_update_guest_efer(v); @@ -661,14 +673,6 @@ int vmx_vmcs_restore(struct vcpu *v, str { gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n", c->pending_event, c->error_code); - - if ( (c->pending_type == 1) || (c->pending_type > 6) || - (c->pending_reserved != 0) ) - { - gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32".\n", - c->pending_event); - return -EINVAL; - } if ( hvm_event_needs_reinjection(c->pending_type, c->pending_vector) ) { @@ -680,11 +684,6 @@ int vmx_vmcs_restore(struct vcpu *v, str } return 0; - - bad_cr3: - gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", c->cr3); - vmx_vmcs_exit(v); - return -EINVAL; } #if defined(__x86_64__) && defined(HVM_DEBUG_SUSPEND) @@ -1905,7 +1904,22 @@ static void vmx_world_save(struct vcpu * static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c) { - unsigned long mfn, old_base_mfn; + unsigned long mfn = 0; + + if ( c->cr0 & X86_CR0_PG ) + { + mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT); + if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) + { + gdprintk(XENLOG_ERR, "Invalid CR3 value=%x", c->cr3); + return -EINVAL; + } + } + + if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG ) + put_page(pagetable_get_page(v->arch.guest_table)); + + v->arch.guest_table = pagetable_from_pfn(mfn); __vmwrite(GUEST_RIP, c->eip); __vmwrite(GUEST_RSP, c->esp); @@ -1917,18 +1931,6 @@ static int vmx_world_restore(struct vcpu vmx_update_guest_cr(v, 0); vmx_update_guest_cr(v, 4); - if ( hvm_paging_enabled(v) ) - { - HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %x", c->cr3); - mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT); - if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) - goto bad_cr3; - old_base_mfn = pagetable_get_pfn(v->arch.guest_table); - v->arch.guest_table = pagetable_from_pfn(mfn); - if ( old_base_mfn ) - put_page(mfn_to_page(old_base_mfn)); - } - __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit); __vmwrite(GUEST_IDTR_BASE, c->idtr_base); @@ -1977,10 +1979,6 @@ static int vmx_world_restore(struct vcpu paging_update_paging_modes(v); return 0; - - bad_cr3: - gdprintk(XENLOG_ERR, "Invalid CR3 value=%x", c->cr3); - return -EINVAL; } enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE }; diff -r f8e7f06b351c -r a53aaea4c698 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Fri Sep 07 19:53:57 2007 +0100 +++ b/xen/arch/x86/mm/shadow/multi.c Fri Sep 07 19:54:29 2007 +0100 @@ -3502,24 +3502,12 @@ sh_update_cr3(struct vcpu *v, int do_loc /* Double-check that the HVM code has sent us a sane guest_table */ if ( is_hvm_domain(d) ) { - gfn_t gfn; - ASSERT(shadow_mode_external(d)); - - // Is paging enabled on this vcpu? if ( hvm_paging_enabled(v) ) - { - gfn = _gfn(paddr_to_pfn(v->arch.hvm_vcpu.guest_cr[3])); - gmfn = gfn_to_mfn(d, gfn); - ASSERT(mfn_valid(gmfn)); - ASSERT(pagetable_get_pfn(v->arch.guest_table) == mfn_x(gmfn)); - } + ASSERT(pagetable_get_pfn(v->arch.guest_table)); else - { - /* Paging disabled: guest_table points at a 32-bit 1-to-1 map */ ASSERT(v->arch.guest_table.pfn == d->arch.paging.shadow.unpaged_pagetable.pfn); - } } #endif _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |