[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] HVM cleanups:



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 6a26f49d3b168496025d3c7edf3861d503c3a03b
# Parent  d19b8542865b61ae884f49340ca5dbaddadc2973
HVM cleanups:
1) make vmx/svm time functions hvm common, since they are actually the same.
2) move hvm_send_assist_req from platform.c to hvm.c.
2) rename VMX MSR context switch functions to make them more readable.
3) misc coding style clean ups.

Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c             |   56 ++++++++-
 xen/arch/x86/hvm/platform.c        |   26 ----
 xen/arch/x86/hvm/svm/svm.c         |   22 ---
 xen/arch/x86/hvm/vmx/vmx.c         |  215 ++++++++++++++++---------------------
 xen/include/asm-x86/hvm/hvm.h      |    3 
 xen/include/asm-x86/hvm/vmx/vmcs.h |   30 ++---
 xen/include/asm-x86/hvm/vmx/vmx.h  |    4 
 7 files changed, 166 insertions(+), 190 deletions(-)

diff -r d19b8542865b -r 6a26f49d3b16 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Nov 17 10:24:22 2006 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Fri Nov 17 10:29:08 2006 +0000
@@ -74,6 +74,30 @@ void hvm_set_guest_time(struct vcpu *v, 
     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
 }
 
+u64 hvm_get_guest_time(struct vcpu *v)
+{
+    u64    host_tsc;
+
+    rdtscll(host_tsc);
+    return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
+}
+
+void hvm_freeze_time(struct vcpu *v)
+{
+    struct periodic_time *pt=&v->domain->arch.hvm_domain.pl_time.periodic_tm;
+
+    if ( pt->enabled && pt->first_injected
+            && (v->vcpu_id == pt->bind_vcpu)
+            && !v->arch.hvm_vcpu.guest_time ) {
+        v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
+        if ( !test_bit(_VCPUF_blocked, &v->vcpu_flags) )
+        {
+            stop_timer(&pt->timer);
+            rtc_freeze(v);
+        }
+    }
+}
+
 void hvm_migrate_timers(struct vcpu *v)
 {
     struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
@@ -203,7 +227,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
                pt_timer_fn, v, v->processor);
     pit_init(v, cpu_khz);
     rtc_init(v, RTC_PORT(0), RTC_IRQ);
-    pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS); 
+    pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
 
     /* Init guest TSC to start from zero. */
     hvm_set_guest_time(v, 0);
@@ -224,14 +248,6 @@ void pic_irq_request(void *data, int lev
 {
     int *interrupt_request = data;
     *interrupt_request = level;
-}
-
-u64 hvm_get_guest_time(struct vcpu *v)
-{
-    u64    host_tsc;
-    
-    rdtscll(host_tsc);
-    return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
 }
 
 int cpu_get_interrupt(struct vcpu *v, int *type)
@@ -282,6 +298,28 @@ static void hvm_vcpu_down(void)
                 d->domain_id);
         domain_shutdown(d, SHUTDOWN_poweroff);
     }
+}
+
+void hvm_send_assist_req(struct vcpu *v)
+{
+    ioreq_t *p;
+
+    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+    if ( unlikely(p->state != STATE_IOREQ_NONE) )
+    {
+        /* This indicates a bug in the device model.  Crash the domain. */
+        gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
+        domain_crash_synchronous();
+    }
+
+    prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
+
+    /*
+     * Following happens /after/ blocking and setting up ioreq contents.
+     * prepare_wait_on_xen_event_channel() is an implicit barrier.
+     */
+    p->state = STATE_IOREQ_READY;
+    notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
 }
 
 void hvm_hlt(unsigned long rflags)
diff -r d19b8542865b -r 6a26f49d3b16 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Fri Nov 17 10:24:22 2006 +0000
+++ b/xen/arch/x86/hvm/platform.c       Fri Nov 17 10:29:08 2006 +0000
@@ -346,7 +346,7 @@ static int reg_mem(unsigned char size, u
     return DECODE_success;
 }
 
-static int hvm_decode(int realmode, unsigned char *opcode,
+static int mmio_decode(int realmode, unsigned char *opcode,
                       struct hvm_io_op *mmio_op, unsigned char *op_size)
 {
     unsigned char size_reg = 0;
@@ -720,28 +720,6 @@ int inst_copy_from_guest(unsigned char *
     if ( hvm_copy_from_guest_virt(buf, guest_eip, inst_len) )
         return 0;
     return inst_len;
-}
-
-static void hvm_send_assist_req(struct vcpu *v)
-{
-    ioreq_t *p;
-
-    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
-    if ( unlikely(p->state != STATE_IOREQ_NONE) )
-    {
-        /* This indicates a bug in the device model.  Crash the domain. */
-        gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
-        domain_crash_synchronous();
-    }
-
-    prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
-
-    /*
-     * Following happens /after/ blocking and setting up ioreq contents.
-     * prepare_wait_on_xen_event_channel() is an implicit barrier.
-     */
-    p->state = STATE_IOREQ_READY;
-    notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
 }
 
 void send_pio_req(unsigned long port, unsigned long count, int size,
@@ -927,7 +905,7 @@ void handle_mmio(unsigned long gpa)
         domain_crash_synchronous();
     }
 
-    if ( hvm_decode(realmode, inst, mmio_op, &op_size) == DECODE_failure ) {
+    if ( mmio_decode(realmode, inst, mmio_op, &op_size) == DECODE_failure ) {
         printk("handle_mmio: failed to decode instruction\n");
         printk("mmio opcode: gpa 0x%lx, len %d:", gpa, inst_len);
         for ( i = 0; i < inst_len; i++ )
diff -r d19b8542865b -r 6a26f49d3b16 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Fri Nov 17 10:24:22 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Nov 17 10:29:08 2006 +0000
@@ -714,26 +714,9 @@ static void arch_svm_do_launch(struct vc
     reset_stack_and_jump(svm_asm_do_launch);
 }
 
-static void svm_freeze_time(struct vcpu *v)
-{
-    struct periodic_time *pt=&v->domain->arch.hvm_domain.pl_time.periodic_tm;
-
-    if ( pt->enabled && pt->first_injected
-            && (v->vcpu_id == pt->bind_vcpu)
-            && !v->arch.hvm_vcpu.guest_time ) {
-        v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
-        if ( test_bit(_VCPUF_blocked, &v->vcpu_flags) )
-        {
-            stop_timer(&pt->timer);
-            rtc_freeze(v);
-        }
-    }
-}
-
-
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
-    svm_freeze_time(v);
+    hvm_freeze_time(v);
     svm_save_dr(v);
 }
 
@@ -852,7 +835,6 @@ int start_svm(void)
     return 1;
 }
 
-
 void arch_svm_do_resume(struct vcpu *v) 
 {
     /* pinning VCPU to a different core? */
@@ -870,8 +852,6 @@ void arch_svm_do_resume(struct vcpu *v)
         reset_stack_and_jump( svm_asm_do_resume );
     }
 }
-
-
 
 static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
 {
diff -r d19b8542865b -r 6a26f49d3b16 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Nov 17 10:24:22 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Nov 17 10:29:08 2006 +0000
@@ -78,75 +78,48 @@ static void vmx_vcpu_destroy(struct vcpu
 
 #ifdef __x86_64__
 
-static DEFINE_PER_CPU(struct vmx_msr_state, percpu_msr);
-
-static u32 msr_data_index[VMX_MSR_COUNT] =
+static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
+
+static u32 msr_index[VMX_MSR_COUNT] =
 {
     MSR_LSTAR, MSR_STAR, MSR_CSTAR,
     MSR_SYSCALL_MASK, MSR_EFER,
 };
 
-static void vmx_save_segments(struct vcpu *v)
-{
-    rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_content.shadow_gs);
-}
-
-/*
- * To avoid MSR save/restore at every VM exit/entry time, we restore
- * the x86_64 specific MSRs at domain switch time. Since those MSRs are
- * are not modified once set for generic domains, we don't save them,
- * but simply reset them to the values set at percpu_traps_init().
- */
-static void vmx_load_msrs(void)
-{
-    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
+static void vmx_save_host_msrs(void)
+{
+    struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
     int i;
 
-    while ( host_state->flags )
-    {
-        i = find_first_set_bit(host_state->flags);
-        wrmsrl(msr_data_index[i], host_state->msr_items[i]);
-        clear_bit(i, &host_state->flags);
-    }
-}
-
-static void vmx_save_init_msrs(void)
-{
-    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
-    int i;
-
     for ( i = 0; i < VMX_MSR_COUNT; i++ )
-        rdmsrl(msr_data_index[i], host_state->msr_items[i]);
-}
-
-#define CASE_READ_MSR(address)              \
-    case MSR_ ## address:                 \
-    msr_content = msr->msr_items[VMX_INDEX_MSR_ ## address]; \
-    break
-
-#define CASE_WRITE_MSR(address)                                     \
-    case MSR_ ## address:                                           \
-    {                                                               \
-        msr->msr_items[VMX_INDEX_MSR_ ## address] = msr_content;    \
-        if (!test_bit(VMX_INDEX_MSR_ ## address, &msr->flags)) {    \
-            set_bit(VMX_INDEX_MSR_ ## address, &msr->flags);        \
-        }                                                           \
-        wrmsrl(MSR_ ## address, msr_content);                       \
-        set_bit(VMX_INDEX_MSR_ ## address, &host_state->flags);     \
-    }                                                               \
-    break
+        rdmsrl(msr_index[i], host_msr_state->msrs[i]);
+}
+
+#define CASE_READ_MSR(address)                                              \
+    case MSR_ ## address:                                                   \
+        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_ ## address];     \
+        break
+
+#define CASE_WRITE_MSR(address)                                             \
+    case MSR_ ## address:                                                   \
+        guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content;     \
+        if ( !test_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags) )\
+            set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags);    \
+        wrmsrl(MSR_ ## address, msr_content);                               \
+        set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags);         \
+        break
 
 #define IS_CANO_ADDRESS(add) 1
 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
 {
     u64 msr_content = 0;
     struct vcpu *v = current;
-    struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
+    struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
 
     switch ( regs->ecx ) {
     case MSR_EFER:
         HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
-        msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
+        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_EFER];
         break;
 
     case MSR_FS_BASE:
@@ -164,7 +137,7 @@ static inline int long_mode_do_msr_read(
         break;
 
     case MSR_SHADOW_GS_BASE:
-        msr_content = msr->shadow_gs;
+        msr_content = guest_msr_state->shadow_gs;
         break;
 
     CASE_READ_MSR(STAR);
@@ -193,8 +166,8 @@ static inline int long_mode_do_msr_write
 {
     u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
     struct vcpu *v = current;
-    struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
-    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
+    struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
+    struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
 
     HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n",
                 (unsigned long)regs->ecx, msr_content);
@@ -211,7 +184,7 @@ static inline int long_mode_do_msr_write
         }
 
         if ( (msr_content & EFER_LME)
-             &&  !(msr->msr_items[VMX_INDEX_MSR_EFER] & EFER_LME) )
+             &&  !(guest_msr_state->msrs[VMX_INDEX_MSR_EFER] & EFER_LME) )
         {
             if ( unlikely(vmx_paging_enabled(v)) )
             {
@@ -221,7 +194,7 @@ static inline int long_mode_do_msr_write
             }
         }
         else if ( !(msr_content & EFER_LME)
-                  && (msr->msr_items[VMX_INDEX_MSR_EFER] & EFER_LME) )
+                  && (guest_msr_state->msrs[VMX_INDEX_MSR_EFER] & EFER_LME) )
         {
             if ( unlikely(vmx_paging_enabled(v)) )
             {
@@ -231,12 +204,12 @@ static inline int long_mode_do_msr_write
             }
         }
 
-        msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
+        guest_msr_state->msrs[VMX_INDEX_MSR_EFER] = msr_content;
         break;
 
     case MSR_FS_BASE:
     case MSR_GS_BASE:
-        if ( !(vmx_long_mode_enabled(v)) )
+        if ( !vmx_long_mode_enabled(v) )
             goto exit_and_crash;
 
         if ( !IS_CANO_ADDRESS(msr_content) )
@@ -257,7 +230,7 @@ static inline int long_mode_do_msr_write
         if ( !(vmx_long_mode_enabled(v)) )
             goto exit_and_crash;
 
-        v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
+        v->arch.hvm_vmx.msr_state.shadow_gs = msr_content;
         wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
         break;
 
@@ -278,40 +251,57 @@ static inline int long_mode_do_msr_write
     return 1; /* handled */
 }
 
-static void vmx_restore_msrs(struct vcpu *v)
-{
-    int i = 0;
-    struct vmx_msr_state *guest_state;
-    struct vmx_msr_state *host_state;
-    unsigned long guest_flags ;
-
-    guest_state = &v->arch.hvm_vmx.msr_content;;
-    host_state = &this_cpu(percpu_msr);
-
-    wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
-    guest_flags = guest_state->flags;
-    if (!guest_flags)
+/*
+ * To avoid MSR save/restore at every VM exit/entry time, we restore
+ * the x86_64 specific MSRs at domain switch time. Since these MSRs
+ * are not modified once set for para domains, we don't save them,
+ * but simply reset them to values set in percpu_traps_init().
+ */
+static void vmx_restore_host_msrs(void)
+{
+    struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
+    int i;
+
+    while ( host_msr_state->flags )
+    {
+        i = find_first_set_bit(host_msr_state->flags);
+        wrmsrl(msr_index[i], host_msr_state->msrs[i]);
+        clear_bit(i, &host_msr_state->flags);
+    }
+}
+
+static void vmx_restore_guest_msrs(struct vcpu *v)
+{
+    struct vmx_msr_state *guest_msr_state, *host_msr_state;
+    unsigned long guest_flags;
+    int i;
+
+    guest_msr_state = &v->arch.hvm_vmx.msr_state;
+    host_msr_state = &this_cpu(host_msr_state);
+
+    wrmsrl(MSR_SHADOW_GS_BASE, guest_msr_state->shadow_gs);
+
+    guest_flags = guest_msr_state->flags;
+    if ( !guest_flags )
         return;
 
-    while (guest_flags){
+    while ( guest_flags ) {
         i = find_first_set_bit(guest_flags);
 
         HVM_DBG_LOG(DBG_LEVEL_2,
-                    "restore guest's index %d msr %lx with %lx\n",
-                    i, (unsigned long)msr_data_index[i],
-                    (unsigned long)guest_state->msr_items[i]);
-        set_bit(i, &host_state->flags);
-        wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
+                    "restore guest's index %d msr %x with value %lx",
+                    i, msr_index[i], guest_msr_state->msrs[i]);
+        set_bit(i, &host_msr_state->flags);
+        wrmsrl(msr_index[i], guest_msr_state->msrs[i]);
         clear_bit(i, &guest_flags);
     }
 }
 
 #else  /* __i386__ */
 
-#define vmx_save_segments(v)      ((void)0)
-#define vmx_load_msrs()           ((void)0)
-#define vmx_restore_msrs(v)       ((void)0)
-#define vmx_save_init_msrs()      ((void)0)
+#define vmx_save_host_msrs()        ((void)0)
+#define vmx_restore_host_msrs()     ((void)0)
+#define vmx_restore_guest_msrs(v)   ((void)0)
 
 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
 {
@@ -325,9 +315,9 @@ static inline int long_mode_do_msr_write
 
 #endif /* __i386__ */
 
-#define loaddebug(_v,_reg) \
+#define loaddebug(_v,_reg)  \
     __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
-#define savedebug(_v,_reg) \
+#define savedebug(_v,_reg)  \
     __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" 
((_v)->debugreg[_reg]))
 
 static inline void vmx_save_dr(struct vcpu *v)
@@ -374,34 +364,21 @@ static inline void vmx_restore_dr(struct
         __restore_debug_registers(v);
 }
 
-static void vmx_freeze_time(struct vcpu *v)
-{
-    struct hvm_domain *plat = &v->domain->arch.hvm_domain;
-    struct periodic_time *pt = &plat->pl_time.periodic_tm;
-
-    if ( pt->enabled && pt->first_injected
-            && (v->vcpu_id == pt->bind_vcpu)
-            && !v->arch.hvm_vcpu.guest_time ) {
-        v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
-        if ( !test_bit(_VCPUF_blocked, &v->vcpu_flags) )
-        {
-            stop_timer(&pt->timer);
-            rtc_freeze(v);
-        }
-    }
-}
-
 static void vmx_ctxt_switch_from(struct vcpu *v)
 {
-    vmx_freeze_time(v);
-    vmx_save_segments(v);
-    vmx_load_msrs();
+    hvm_freeze_time(v);
+
+    /* NB. MSR_SHADOW_GS_BASE may be changed by swapgs instrucion in guest,
+     * so we must save it. */
+    rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_state.shadow_gs);
+
+    vmx_restore_host_msrs();
     vmx_save_dr(v);
 }
 
 static void vmx_ctxt_switch_to(struct vcpu *v)
 {
-    vmx_restore_msrs(v);
+    vmx_restore_guest_msrs(v);
     vmx_restore_dr(v);
 }
 
@@ -409,6 +386,7 @@ static void stop_vmx(void)
 {
     if ( !(read_cr4() & X86_CR4_VMXE) )
         return;
+
     __vmxoff();
     clear_in_cr4(X86_CR4_VMXE);
 }
@@ -706,7 +684,7 @@ int start_vmx(void)
 
     printk("VMXON is done\n");
 
-    vmx_save_init_msrs();
+    vmx_save_host_msrs();
 
     vmx_setup_hvm_funcs();
 
@@ -843,14 +821,14 @@ static void vmx_do_cpuid(struct cpu_user
 
             if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
                 clear_bit(X86_FEATURE_APIC, &edx);
-    
+
 #if CONFIG_PAGING_LEVELS >= 3
             if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
 #endif
                 clear_bit(X86_FEATURE_PAE, &edx);
             clear_bit(X86_FEATURE_PSE36, &edx);
 
-            ebx &= NUM_THREADS_RESET_MASK;  
+            ebx &= NUM_THREADS_RESET_MASK;
 
             /* Unsupportable for virtualised CPUs. */
             ecx &= ~(bitmaskof(X86_FEATURE_VMXE)  |
@@ -863,7 +841,7 @@ static void vmx_do_cpuid(struct cpu_user
                      bitmaskof(X86_FEATURE_ACPI)  |
                      bitmaskof(X86_FEATURE_ACC) );
         }
-        else if (  ( input == CPUID_LEAF_0x6 ) 
+        else if (  ( input == CPUID_LEAF_0x6 )
                 || ( input == CPUID_LEAF_0x9 )
                 || ( input == CPUID_LEAF_0xA ))
         {
@@ -1319,7 +1297,7 @@ static int vmx_assist(struct vcpu *v, in
                 goto error;
             if ( vmx_world_restore(v, &c) != 0 )
                 goto error;
-            v->arch.hvm_vmx.vmxassist_enabled = 1;            
+            v->arch.hvm_vmx.vmxassist_enabled = 1;
             return 1;
         }
         break;
@@ -1389,7 +1367,7 @@ static int vmx_set_cr0(unsigned long val
         mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
         if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
         {
-            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
+            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
                      v->arch.hvm_vmx.cpu_cr3, mfn);
             domain_crash(v->domain);
             return 0;
@@ -1404,10 +1382,10 @@ static int vmx_set_cr0(unsigned long val
                             "with EFER.LME set but not CR4.PAE\n");
                 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
             }
-            else 
+            else
             {
                 HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
-                v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
+                v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER]
                     |= EFER_LMA;
                 vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
                 vm_entry_value |= VM_ENTRY_IA32E_MODE;
@@ -1461,7 +1439,7 @@ static int vmx_set_cr0(unsigned long val
              */
             if ( vmx_long_mode_enabled(v) )
             {
-                v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
+                v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER]
                     &= ~EFER_LMA;
                 vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
                 vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
@@ -1494,8 +1472,7 @@ static int vmx_set_cr0(unsigned long val
     {
         if ( vmx_long_mode_enabled(v) )
         {
-            v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
-              &= ~EFER_LMA;
+            v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER] &= ~EFER_LMA;
             vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
             vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
             __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
@@ -1853,8 +1830,8 @@ static inline void vmx_do_msr_write(stru
         {
             struct periodic_time *pt =
                 &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
-            if ( pt->enabled && pt->first_injected 
-                    && v->vcpu_id == pt->bind_vcpu ) 
+            if ( pt->enabled && pt->first_injected
+                    && v->vcpu_id == pt->bind_vcpu )
                 pt->first_injected = 0;
         }
         hvm_set_guest_time(v, msr_content);
@@ -1963,7 +1940,7 @@ void store_cpu_user_regs(struct cpu_user
     regs->es = __vmread(GUEST_ES_SELECTOR);
     regs->eip = __vmread(GUEST_RIP);
 }
-#endif 
+#endif
 
 #ifdef XEN_DEBUGGER
 void save_cpu_user_regs(struct cpu_user_regs *regs)
diff -r d19b8542865b -r 6a26f49d3b16 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri Nov 17 10:24:22 2006 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Fri Nov 17 10:29:08 2006 +0000
@@ -97,6 +97,8 @@ int hvm_vcpu_initialise(struct vcpu *v);
 int hvm_vcpu_initialise(struct vcpu *v);
 void hvm_vcpu_destroy(struct vcpu *v);
 
+void hvm_send_assist_req(struct vcpu *v);
+
 static inline void
 hvm_store_cpu_guest_regs(
     struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
@@ -161,6 +163,7 @@ hvm_get_guest_ctrl_reg(struct vcpu *v, u
 
 void hvm_stts(struct vcpu *v);
 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
+void hvm_freeze_time(struct vcpu *v);
 void hvm_migrate_timers(struct vcpu *v);
 void hvm_do_resume(struct vcpu *v);
 
diff -r d19b8542865b -r 6a26f49d3b16 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Fri Nov 17 10:24:22 2006 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Fri Nov 17 10:29:08 2006 +0000
@@ -41,12 +41,12 @@ enum {
     VMX_INDEX_MSR_SYSCALL_MASK,
     VMX_INDEX_MSR_EFER,
 
-    VMX_MSR_COUNT,
+    VMX_MSR_COUNT
 };
 
 struct vmx_msr_state {
     unsigned long flags;
-    unsigned long msr_items[VMX_MSR_COUNT];
+    unsigned long msrs[VMX_MSR_COUNT];
     unsigned long shadow_gs;
 };
 
@@ -76,8 +76,8 @@ struct arch_vmx_struct {
     unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
     unsigned long        cpu_cr2; /* save CR2 */
     unsigned long        cpu_cr3;
-    struct vmx_msr_state msr_content;
-    unsigned long        vmxassist_enabled:1; 
+    struct vmx_msr_state msr_state;
+    unsigned long        vmxassist_enabled:1;
 };
 
 #define vmx_schedule_tail(next)         \
@@ -141,10 +141,10 @@ enum vmcs_field {
     HOST_FS_SELECTOR                = 0x00000c08,
     HOST_GS_SELECTOR                = 0x00000c0a,
     HOST_TR_SELECTOR                = 0x00000c0c,
-    IO_BITMAP_A                     = 0x00002000, 
-    IO_BITMAP_A_HIGH                = 0x00002001, 
-    IO_BITMAP_B                     = 0x00002002, 
-    IO_BITMAP_B_HIGH                = 0x00002003, 
+    IO_BITMAP_A                     = 0x00002000,
+    IO_BITMAP_A_HIGH                = 0x00002001,
+    IO_BITMAP_B                     = 0x00002002,
+    IO_BITMAP_B_HIGH                = 0x00002003,
     VM_EXIT_MSR_STORE_ADDR          = 0x00002006,
     VM_EXIT_MSR_STORE_ADDR_HIGH     = 0x00002007,
     VM_EXIT_MSR_LOAD_ADDR           = 0x00002008,
@@ -160,7 +160,7 @@ enum vmcs_field {
     GUEST_IA32_DEBUGCTL             = 0x00002802,
     GUEST_IA32_DEBUGCTL_HIGH        = 0x00002803,
     PIN_BASED_VM_EXEC_CONTROL       = 0x00004000,
-    CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,   
+    CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,
     EXCEPTION_BITMAP                = 0x00004004,
     PAGE_FAULT_ERROR_CODE_MASK      = 0x00004006,
     PAGE_FAULT_ERROR_CODE_MATCH     = 0x00004008,
@@ -177,7 +177,7 @@ enum vmcs_field {
     SECONDARY_VM_EXEC_CONTROL       = 0x0000401e,
     VM_INSTRUCTION_ERROR            = 0x00004400,
     VM_EXIT_REASON                  = 0x00004402,
-    VM_EXIT_INTR_INFO               = 0x00004404,   
+    VM_EXIT_INTR_INFO               = 0x00004404,
     VM_EXIT_INTR_ERROR_CODE         = 0x00004406,
     IDT_VECTORING_INFO_FIELD        = 0x00004408,
     IDT_VECTORING_ERROR_CODE        = 0x0000440a,
@@ -209,10 +209,10 @@ enum vmcs_field {
     CR4_GUEST_HOST_MASK             = 0x00006002,
     CR0_READ_SHADOW                 = 0x00006004,
     CR4_READ_SHADOW                 = 0x00006006,
-    CR3_TARGET_VALUE0               = 0x00006008, 
-    CR3_TARGET_VALUE1               = 0x0000600a, 
-    CR3_TARGET_VALUE2               = 0x0000600c, 
-    CR3_TARGET_VALUE3               = 0x0000600e, 
+    CR3_TARGET_VALUE0               = 0x00006008,
+    CR3_TARGET_VALUE1               = 0x0000600a,
+    CR3_TARGET_VALUE2               = 0x0000600c,
+    CR3_TARGET_VALUE3               = 0x0000600e,
     EXIT_QUALIFICATION              = 0x00006400,
     GUEST_LINEAR_ADDRESS            = 0x0000640a,
     GUEST_CR0                       = 0x00006800,
@@ -226,7 +226,7 @@ enum vmcs_field {
     GUEST_GS_BASE                   = 0x00006810,
     GUEST_LDTR_BASE                 = 0x00006812,
     GUEST_TR_BASE                   = 0x00006814,
-    GUEST_GDTR_BASE                 = 0x00006816,    
+    GUEST_GDTR_BASE                 = 0x00006816,
     GUEST_IDTR_BASE                 = 0x00006818,
     GUEST_DR7                       = 0x0000681a,
     GUEST_RSP                       = 0x0000681c,
diff -r d19b8542865b -r 6a26f49d3b16 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Fri Nov 17 10:24:22 2006 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Fri Nov 17 10:29:08 2006 +0000
@@ -262,13 +262,13 @@ static inline int vmx_paging_enabled(str
 
 static inline int vmx_long_mode_enabled(struct vcpu *v)
 {
-    u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
+    u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER];
     return efer & EFER_LMA;
 }
 
 static inline int vmx_lme_is_set(struct vcpu *v)
 {
-    u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
+    u64 efer = v->arch.hvm_vmx.msr_state.msrs[VMX_INDEX_MSR_EFER];
     return efer & EFER_LME;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.