[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v14 06/17] pvh: Disable unneeded features of HVM containers
Things kept: * cacheattr_region lists * irq-related structures * paging * tm_list * hvm params Things disabled for now: * compat xlation Things disabled: * Emulated timers and clock sources * IO/MMIO io requests * msix tables * hvm_funcs * nested HVM * Fast-path for emulated lapic accesses Getting rid of the hvm_params struct required a couple other places to check for its existence before attempting to read the params. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> --- v14: - Also free the params struct for pvh domains, since we've allocated it - Fail io for pvh VMs further down the stack, as we will be using the emulation code before calling into the pv pio handlers v13: - Removed unnecessary comment - Allocate params for pvh domains; remove null checks necessary in last patch - Add ASSERT(!is_pvh) to handle_pio CC: Jan Beulich <jbeulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> CC: Keir Fraser <keir@xxxxxxx> --- xen/arch/x86/hvm/emulate.c | 11 +++++++++- xen/arch/x86/hvm/hvm.c | 50 +++++++++++++++++++++++++++++++++++++------ xen/arch/x86/hvm/irq.c | 3 +++ xen/arch/x86/hvm/vmx/intr.c | 3 ++- 4 files changed, 58 insertions(+), 9 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index f39c173..a41eaa1 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -57,12 +57,21 @@ static int hvmemul_do_io( int value_is_ptr = (p_data == NULL); struct vcpu *curr = current; struct hvm_vcpu_io *vio; - ioreq_t *p = get_ioreq(curr); + ioreq_t *p; unsigned long ram_gfn = paddr_to_pfn(ram_gpa); p2m_type_t p2mt; struct page_info *ram_page; int rc; + /* PVH doesn't have an ioreq infrastructure */ + if ( is_pvh_vcpu(curr) ) + { + gdprintk(XENLOG_WARNING, "Unexpected io from PVH guest\n"); + return X86EMUL_UNHANDLEABLE; + } + + p = get_ioreq(curr); + /* Check for paged out page */ ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE); if ( p2m_is_paging(p2mt) ) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 87a6f42..72ca936 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -301,6 +301,10 @@ u64 hvm_get_guest_tsc_adjust(struct vcpu *v) void hvm_migrate_timers(struct vcpu *v) { + /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism. */ + if ( is_pvh_vcpu(v) ) + return; + rtc_migrate_timers(v); pt_migrate(v); } @@ -342,10 +346,13 @@ void hvm_do_resume(struct vcpu *v) { ioreq_t *p; - pt_restore_timer(v); - check_wakeup_from_wait(); + if ( is_pvh_vcpu(v) ) + goto check_inject_trap; + + pt_restore_timer(v); + /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ p = get_ioreq(v); while ( p->state != STATE_IOREQ_NONE ) @@ -368,6 +375,7 @@ void hvm_do_resume(struct vcpu *v) } } + check_inject_trap: /* Inject pending hw/sw trap */ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) { @@ -528,10 +536,16 @@ int hvm_domain_initialise(struct domain *d) if ( rc != 0 ) goto fail0; + rc = -ENOMEM; d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); + if ( !d->arch.hvm_domain.params ) + goto fail1; + + if ( is_pvh_domain(d) ) + return 0; + d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler); - rc = -ENOMEM; - if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ) + if ( !d->arch.hvm_domain.io_handler ) goto fail1; d->arch.hvm_domain.io_handler->num_slot = 0; @@ -578,6 +592,11 @@ int hvm_domain_initialise(struct domain *d) void hvm_domain_relinquish_resources(struct domain *d) { + xfree(d->arch.hvm_domain.params); + + if ( is_pvh_domain(d) ) + return; + if ( hvm_funcs.nhvm_domain_relinquish_resources ) hvm_funcs.nhvm_domain_relinquish_resources(d); @@ -596,12 +615,15 @@ void hvm_domain_relinquish_resources(struct domain *d) } xfree(d->arch.hvm_domain.io_handler); - xfree(d->arch.hvm_domain.params); } void hvm_domain_destroy(struct domain *d) { hvm_destroy_cacheattr_region_list(d); + + if ( is_pvh_domain(d) ) + return; + hvm_funcs.domain_destroy(d); rtc_deinit(d); stdvga_deinit(d); @@ -1103,7 +1125,9 @@ int hvm_vcpu_initialise(struct vcpu *v) goto fail1; /* NB: vlapic_init must be called before hvm_funcs.vcpu_initialise */ - if ( (rc = vlapic_init(v)) != 0 ) /* teardown: vlapic_destroy */ + if ( is_hvm_vcpu(v) ) + rc = vlapic_init(v); + if ( rc != 0 ) /* teardown: vlapic_destroy */ goto fail2; if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */ @@ -1118,6 +1142,14 @@ int hvm_vcpu_initialise(struct vcpu *v) v->arch.hvm_vcpu.inject_trap.vector = -1; + if ( is_pvh_vcpu(v) ) + { + v->arch.hvm_vcpu.hcall_64bit = 1; /* PVH 32bitfixme. */ + /* This for hvm_long_mode_enabled(v). */ + v->arch.hvm_vcpu.guest_efer = EFER_SCE | EFER_LMA | EFER_LME; + return 0; + } + rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */ if ( rc != 0 ) goto fail4; @@ -1189,7 +1221,10 @@ void hvm_vcpu_destroy(struct vcpu *v) tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet); hvm_vcpu_cacheattr_destroy(v); - vlapic_destroy(v); + + if ( is_hvm_vcpu(v) ) + vlapic_destroy(v); + hvm_funcs.vcpu_destroy(v); /* Event channel is already freed by evtchn_destroy(). */ @@ -1390,6 +1425,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs, * a fast path for LAPIC accesses, skipping the p2m lookup. */ if ( !nestedhvm_vcpu_in_guestmode(v) + && is_hvm_vcpu(v) && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) ) { if ( !handle_mmio() ) diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index 6a6fb68..677fbcd 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -405,6 +405,9 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) && vcpu_info(v, evtchn_upcall_pending) ) return hvm_intack_vector(plat->irq.callback_via.vector); + if ( is_pvh_vcpu(v) ) + return hvm_intack_none; + if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output ) return hvm_intack_pic(0); diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index 1942e31..7757910 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -236,7 +236,8 @@ void vmx_intr_assist(void) } /* Crank the handle on interrupt state. */ - pt_vector = pt_update_irq(v); + if ( is_hvm_vcpu(v) ) + pt_vector = pt_update_irq(v); do { unsigned long intr_info; -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |