[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v15 07/19] pvh: Disable unneeded features of HVM containers



Things kept:
* cacheattr_region lists
* irq-related structures
* paging
* tm_list
* hvm params
* hvm_domaim.io_handler (for handling PV io)

Things disabled for now:
* compat xlation

Things disabled:
* Emulated timers and clock sources
* IO/MMIO ioreq pages, event channels
* msix tables
* hvm_funcs
* nested HVM
* Fast-path for emulated lapic accesses

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
---
v15:
 - Fix typo in comment
 - Allocate io_handler (used later when using HVM io path for PV io)
 - Port hvm_do_resume to "hvm no ioreq" patch
v14:
 - Also free the params struct for pvh domains, since we've allocated it
 - Fail io for pvh VMs further down the stack, as we will be using the emulation
   code before calling into the pv pio handlers
v13:
 - Removed unnecessary comment
 - Allocate params for pvh domains; remove null checks necessary in last patch
 - Add ASSERT(!is_pvh) to handle_pio
CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Eddie Dong <eddie.dong@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c      |   43 ++++++++++++++++++++++++++++++++++++-------
 xen/arch/x86/hvm/irq.c      |    3 +++
 xen/arch/x86/hvm/vmx/intr.c |    3 ++-
 3 files changed, 41 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0a9c922..10ddc34 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -304,6 +304,10 @@ u64 hvm_get_guest_tsc_adjust(struct vcpu *v)
 
 void hvm_migrate_timers(struct vcpu *v)
 {
+    /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism. */
+    if ( is_pvh_vcpu(v) )
+        return;
+
     rtc_migrate_timers(v);
     pt_migrate(v);
 }
@@ -345,10 +349,11 @@ void hvm_do_resume(struct vcpu *v)
 {
     ioreq_t *p;
 
-    pt_restore_timer(v);
-
     check_wakeup_from_wait();
 
+    if ( is_hvm_vcpu(v) )
+        pt_restore_timer(v);
+
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
     if ( !(p = get_ioreq(v)) )
         goto check_inject_trap;
@@ -560,6 +565,9 @@ int hvm_domain_initialise(struct domain *d)
         goto fail1;
     d->arch.hvm_domain.io_handler->num_slot = 0;
 
+    if ( is_pvh_domain(d) )
+        return 0;
+
     hvm_init_guest_time(d);
 
     d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
@@ -600,6 +608,12 @@ int hvm_domain_initialise(struct domain *d)
 
 void hvm_domain_relinquish_resources(struct domain *d)
 {
+    xfree(d->arch.hvm_domain.io_handler);
+    xfree(d->arch.hvm_domain.params);
+
+    if ( is_pvh_domain(d) )
+        return;
+
     if ( hvm_funcs.nhvm_domain_relinquish_resources )
         hvm_funcs.nhvm_domain_relinquish_resources(d);
 
@@ -616,14 +630,15 @@ void hvm_domain_relinquish_resources(struct domain *d)
         pmtimer_deinit(d);
         hpet_deinit(d);
     }
-
-    xfree(d->arch.hvm_domain.io_handler);
-    xfree(d->arch.hvm_domain.params);
 }
 
 void hvm_domain_destroy(struct domain *d)
 {
     hvm_destroy_cacheattr_region_list(d);
+
+    if ( is_pvh_domain(d) )
+        return;
+
     hvm_funcs.domain_destroy(d);
     rtc_deinit(d);
     stdvga_deinit(d);
@@ -1125,7 +1140,9 @@ int hvm_vcpu_initialise(struct vcpu *v)
         goto fail1;
 
     /* NB: vlapic_init must be called before hvm_funcs.vcpu_initialise */
-    if ( (rc = vlapic_init(v)) != 0 ) /* teardown: vlapic_destroy */
+    if ( is_hvm_vcpu(v) )
+        rc = vlapic_init(v);
+    if ( rc != 0 ) /* teardown: vlapic_destroy */
         goto fail2;
 
     if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: 
hvm_funcs.vcpu_destroy */
@@ -1140,6 +1157,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
 
     v->arch.hvm_vcpu.inject_trap.vector = -1;
 
+    if ( is_pvh_vcpu(v) )
+    {
+        v->arch.hvm_vcpu.hcall_64bit = 1;    /* PVH 32bitfixme. */
+        /* This is for hvm_long_mode_enabled(v). */
+        v->arch.hvm_vcpu.guest_efer = EFER_SCE | EFER_LMA | EFER_LME;
+        return 0;
+    }
+
     rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
     if ( rc != 0 )
         goto fail4;
@@ -1211,7 +1236,10 @@ void hvm_vcpu_destroy(struct vcpu *v)
 
     tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
     hvm_vcpu_cacheattr_destroy(v);
-    vlapic_destroy(v);
+
+    if ( is_hvm_vcpu(v) )
+        vlapic_destroy(v);
+
     hvm_funcs.vcpu_destroy(v);
 
     /* Event channel is already freed by evtchn_destroy(). */
@@ -1414,6 +1442,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
     /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
      * a fast path for LAPIC accesses, skipping the p2m lookup. */
     if ( !nestedhvm_vcpu_in_guestmode(v)
+         && is_hvm_vcpu(v)
          && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) )
     {
         if ( !handle_mmio() )
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 6a6fb68..677fbcd 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -405,6 +405,9 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
          && vcpu_info(v, evtchn_upcall_pending) )
         return hvm_intack_vector(plat->irq.callback_via.vector);
 
+    if ( is_pvh_vcpu(v) )
+        return hvm_intack_none;
+
     if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
         return hvm_intack_pic(0);
 
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 1942e31..7757910 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -236,7 +236,8 @@ void vmx_intr_assist(void)
     }
 
     /* Crank the handle on interrupt state. */
-    pt_vector = pt_update_irq(v);
+    if ( is_hvm_vcpu(v) )
+        pt_vector = pt_update_irq(v);
 
     do {
         unsigned long intr_info;
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.