[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH][Xen 3.3.1 mq tree] Add cs 15185



Add contents of cs 15185, update series file, and affected patches
# HG changeset patch
# User Ben Guthro <bguthro@xxxxxxxxxxxxxxx>
# Date 1189533473 14400
# Node ID 12113e3eecad87f6e66019f1be323773066b566b
# Parent  666b65529853822aea6093a0e24d44ccd75babec
add cs 15185, and update affected patches

diff -r 666b65529853 -r 12113e3eecad 15185-1f8fb764f843
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/15185-1f8fb764f843        Tue Sep 11 13:57:53 2007 -0400
@@ -0,0 +1,432 @@
+diff -r 8f147a735faf xen/arch/x86/hvm/hvm.c
+--- a/xen/arch/x86/hvm/hvm.c   Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/arch/x86/hvm/hvm.c   Fri Aug 17 14:47:32 2007 -0400
+@@ -224,6 +224,7 @@ int hvm_domain_initialise(struct domain 
+ 
+     spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
+     spin_lock_init(&d->arch.hvm_domain.irq_lock);
++    spin_lock_init(&d->arch.hvm_domain.vapic_access_lock);
+ 
+     rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
+     if ( rc != 0 )
+diff -r 8f147a735faf xen/arch/x86/hvm/vlapic.c
+--- a/xen/arch/x86/hvm/vlapic.c        Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/arch/x86/hvm/vlapic.c        Fri Aug 17 14:47:32 2007 -0400
+@@ -79,8 +79,6 @@ static unsigned int vlapic_lvt_mask[VLAP
+ #define vlapic_lvtt_period(vlapic)                              \
+     (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC)
+ 
+-#define vlapic_base_address(vlapic)                             \
+-    (vlapic->hw.apic_base_msr & MSR_IA32_APICBASE_BASE)
+ 
+ /*
+  * Generic APIC bitmap vector update & search routines.
+diff -r 8f147a735faf xen/arch/x86/hvm/vmx/intr.c
+--- a/xen/arch/x86/hvm/vmx/intr.c      Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/arch/x86/hvm/vmx/intr.c      Fri Aug 17 14:47:32 2007 -0400
+@@ -67,13 +67,17 @@ static inline int is_interruptibility_st
+     return __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ }
+ 
+-#ifdef __x86_64__
+ static void update_tpr_threshold(struct vlapic *vlapic)
+ {
+     int max_irr, tpr;
+ 
+     if ( !cpu_has_vmx_tpr_shadow )
+         return;
++
++#ifdef __i386__
++    if ( !vlapic->mmap_vtpr_enabled )
++        return;
++#endif
+ 
+     if ( !vlapic_enabled(vlapic) || 
+          ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
+@@ -85,9 +89,6 @@ static void update_tpr_threshold(struct 
+     tpr = vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xF0;
+     __vmwrite(TPR_THRESHOLD, (max_irr > tpr) ? (tpr >> 4) : (max_irr >> 4));
+ }
+-#else
+-#define update_tpr_threshold(v) ((void)0)
+-#endif
+ 
+ asmlinkage void vmx_intr_assist(void)
+ {
+diff -r 8f147a735faf xen/arch/x86/hvm/vmx/vmcs.c
+--- a/xen/arch/x86/hvm/vmx/vmcs.c      Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/arch/x86/hvm/vmx/vmcs.c      Fri Aug 17 14:47:32 2007 -0400
+@@ -40,6 +40,7 @@
+ /* Dynamic (run-time adjusted) execution control flags. */
+ u32 vmx_pin_based_exec_control __read_mostly;
+ u32 vmx_cpu_based_exec_control __read_mostly;
++u32 vmx_secondary_exec_control __read_mostly;
+ u32 vmx_vmexit_control __read_mostly;
+ u32 vmx_vmentry_control __read_mostly;
+ 
+@@ -59,12 +60,16 @@ static u32 adjust_vmx_controls(u32 ctl_m
+ 
+     return ctl;
+ }
++
++#define vmx_has_secondary_exec_ctls \
++    (_vmx_cpu_based_exec_control & ACTIVATE_SECONDARY_CONTROLS)
+ 
+ void vmx_init_vmcs_config(void)
+ {
+     u32 vmx_msr_low, vmx_msr_high, min, opt;
+     u32 _vmx_pin_based_exec_control;
+     u32 _vmx_cpu_based_exec_control;
++    u32 _vmx_secondary_exec_control = 0;
+     u32 _vmx_vmexit_control;
+     u32 _vmx_vmentry_control;
+ 
+@@ -80,9 +85,8 @@ void vmx_init_vmcs_config(void)
+            CPU_BASED_ACTIVATE_IO_BITMAP |
+            CPU_BASED_USE_TSC_OFFSETING);
+     opt = CPU_BASED_ACTIVATE_MSR_BITMAP;
+-#ifdef __x86_64__
+     opt |= CPU_BASED_TPR_SHADOW;
+-#endif
++    opt |= ACTIVATE_SECONDARY_CONTROLS;
+     _vmx_cpu_based_exec_control = adjust_vmx_controls(
+         min, opt, MSR_IA32_VMX_PROCBASED_CTLS_MSR);
+ #ifdef __x86_64__
+@@ -92,7 +96,18 @@ void vmx_init_vmcs_config(void)
+         _vmx_cpu_based_exec_control = adjust_vmx_controls(
+             min, opt, MSR_IA32_VMX_PROCBASED_CTLS_MSR);
+     }
++#elif defined(__i386__)
++    if ( !vmx_has_secondary_exec_ctls )
++        _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
+ #endif
++
++    if ( vmx_has_secondary_exec_ctls )
++    {
++        min = 0;
++        opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
++        _vmx_secondary_exec_control = adjust_vmx_controls(
++            min, opt, MSR_IA32_VMX_PROCBASED_CTLS2);
++    }
+ 
+     min = VM_EXIT_ACK_INTR_ON_EXIT;
+     opt = 0;
+@@ -113,6 +128,8 @@ void vmx_init_vmcs_config(void)
+         vmcs_revision_id = vmx_msr_low;
+         vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
+         vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
++        if ( vmx_has_secondary_exec_ctls )
++            vmx_secondary_exec_control = _vmx_secondary_exec_control;
+         vmx_vmexit_control         = _vmx_vmexit_control;
+         vmx_vmentry_control        = _vmx_vmentry_control;
+     }
+@@ -121,6 +138,8 @@ void vmx_init_vmcs_config(void)
+         BUG_ON(vmcs_revision_id != vmx_msr_low);
+         BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
+         BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
++        if ( vmx_has_secondary_exec_ctls )
++            BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control);
+         BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
+         BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
+     }
+@@ -296,6 +315,8 @@ static void construct_vmcs(struct vcpu *
+     __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
+     __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
+     v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
++    if ( vmx_cpu_based_exec_control & ACTIVATE_SECONDARY_CONTROLS )
++        __vmwrite(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control);
+ 
+     if ( cpu_has_vmx_msr_bitmap )
+         __vmwrite(MSR_BITMAP, virt_to_maddr(vmx_msr_bitmap));
+@@ -422,7 +443,7 @@ static void construct_vmcs(struct vcpu *
+     __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+ 
+ #ifdef __x86_64__ 
+-    /* VLAPIC TPR optimisation. */
++    /* CR8 based VLAPIC TPR optimization. */
+     if ( cpu_has_vmx_tpr_shadow )
+     {
+         __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
+@@ -430,6 +451,16 @@ static void construct_vmcs(struct vcpu *
+         __vmwrite(TPR_THRESHOLD, 0);
+     }
+ #endif
++
++    /* Memory-mapped based VLAPIC TPR optimization. */
++    if ( cpu_has_vmx_mmap_vtpr_optimization )
++    {
++        __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
++                    page_to_maddr(vcpu_vlapic(v)->regs_page));
++        __vmwrite(TPR_THRESHOLD, 0);
++
++        vcpu_vlapic(v)->mmap_vtpr_enabled = 1;
++    }
+ 
+     __vmwrite(GUEST_LDTR_SELECTOR, 0);
+     __vmwrite(GUEST_LDTR_BASE, 0);
+@@ -499,6 +530,18 @@ void vmx_do_resume(struct vcpu *v)
+         vmx_load_vmcs(v);
+         hvm_migrate_timers(v);
+         vmx_set_host_env(v);
++    }
++
++    if ( !v->arch.hvm_vmx.launched && vcpu_vlapic(v)->mmap_vtpr_enabled )
++    {
++        struct page_info *pg = change_guest_physmap_for_vtpr(v->domain, 1);
++
++        if ( pg == NULL )
++        {
++            gdprintk(XENLOG_ERR, "change_guest_physmap_for_vtpr failed!\n");
++            domain_crash_synchronous();
++        }
++        __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(pg));
+     }
+ 
+     debug_state = v->domain->debugger_attached;
+diff -r 8f147a735faf xen/arch/x86/hvm/vmx/vmx.c
+--- a/xen/arch/x86/hvm/vmx/vmx.c       Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/arch/x86/hvm/vmx/vmx.c       Fri Aug 17 14:47:32 2007 -0400
+@@ -2593,6 +2593,114 @@ done:
+     return 1;
+ }
+ 
++struct page_info * change_guest_physmap_for_vtpr(struct domain *d,
++                                                 int enable_vtpr)
++{
++    struct page_info *pg;
++    unsigned long pfn, mfn;
++
++    spin_lock(&d->arch.hvm_domain.vapic_access_lock);
++
++    pg = d->arch.hvm_domain.apic_access_page;
++    pfn = paddr_to_pfn(APIC_DEFAULT_PHYS_BASE);
++
++    if ( enable_vtpr )
++    {
++        if ( d->arch.hvm_domain.physmap_changed_for_vlapic_access )
++            goto out;
++
++        if ( pg == NULL )
++            pg = alloc_domheap_page(d);
++        if ( pg == NULL )
++        {
++            gdprintk(XENLOG_ERR, "alloc_domheap_pages() failed!\n");
++            goto out;
++        }
++
++        mfn = page_to_mfn(pg);
++        d->arch.hvm_domain.apic_access_page = pg;
++
++        guest_physmap_add_page(d, pfn, mfn);
++
++        d->arch.hvm_domain.physmap_changed_for_vlapic_access = 1;
++
++        goto out;
++    }
++    else
++    {
++        if ( d->arch.hvm_domain.physmap_changed_for_vlapic_access )
++        {
++            mfn = page_to_mfn(pg);
++            guest_physmap_remove_page(d, pfn, mfn);
++            flush_tlb_mask(d->domain_dirty_cpumask);
++
++            d->arch.hvm_domain.physmap_changed_for_vlapic_access = 0;
++        }
++        pg = NULL;
++        goto out;
++    }
++
++out:
++    spin_unlock(&d->arch.hvm_domain.vapic_access_lock);
++    return pg;
++}
++
++static void check_vlapic_msr_for_vtpr(struct vcpu *v)
++{
++    struct vlapic *vlapic = vcpu_vlapic(v);
++    int    mmap_vtpr_enabled = vcpu_vlapic(v)->mmap_vtpr_enabled;
++    uint32_t tmp;
++
++
++    if ( vlapic_hw_disabled(vlapic) && mmap_vtpr_enabled )
++    {
++        vcpu_vlapic(v)->mmap_vtpr_enabled = 0;    
++
++#ifdef __i386__
++        v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
++        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
++                  v->arch.hvm_vcpu.u.vmx.exec_control);
++#elif defined(__x86_64__)
++        if ( !cpu_has_vmx_tpr_shadow )
++        {
++            v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
++            __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
++                v->arch.hvm_vcpu.u.vmx.exec_control);
++        }
++#endif
++        tmp  = __vmread(SECONDARY_VM_EXEC_CONTROL);
++        tmp &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
++        __vmwrite(SECONDARY_VM_EXEC_CONTROL, tmp);
++
++        change_guest_physmap_for_vtpr(v->domain, 0);
++    }
++    else if ( !vlapic_hw_disabled(vlapic) && !mmap_vtpr_enabled &&
++              cpu_has_vmx_mmap_vtpr_optimization )
++    {
++        vcpu_vlapic(v)->mmap_vtpr_enabled = 1;
++
++        v->arch.hvm_vcpu.u.vmx.exec_control |=
++            ( ACTIVATE_SECONDARY_CONTROLS | CPU_BASED_TPR_SHADOW );
++        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
++                  v->arch.hvm_vcpu.u.vmx.exec_control);
++        tmp  = __vmread(SECONDARY_VM_EXEC_CONTROL);
++        tmp |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
++        __vmwrite(SECONDARY_VM_EXEC_CONTROL, tmp);
++
++        change_guest_physmap_for_vtpr(v->domain, 1);
++    }
++
++    if ( vcpu_vlapic(v)->mmap_vtpr_enabled &&
++        !vlapic_hw_disabled(vlapic) &&
++        (vlapic_base_address(vlapic) != APIC_DEFAULT_PHYS_BASE) )
++    {
++        gdprintk(XENLOG_ERR,
++                 "Local APIC base address is set to 0x%016"PRIx64"!\n",
++                  vlapic_base_address(vlapic));
++        domain_crash_synchronous();
++    }
++}
++
+ static inline int vmx_do_msr_write(struct cpu_user_regs *regs)
+ {
+     u32 ecx = regs->ecx;
+@@ -2621,6 +2729,7 @@ static inline int vmx_do_msr_write(struc
+         break;
+     case MSR_IA32_APICBASE:
+         vlapic_msr_set(vcpu_vlapic(v), msr_content);
++        check_vlapic_msr_for_vtpr(v);
+         break;
+     default:
+         if ( !long_mode_do_msr_write(regs) )
+@@ -2955,6 +3064,15 @@ asmlinkage void vmx_vmexit_handler(struc
+ 
+     case EXIT_REASON_TPR_BELOW_THRESHOLD:
+         break;
++    case EXIT_REASON_APIC_ACCESS:
++    {
++        unsigned long offset;
++
++        exit_qualification = __vmread(EXIT_QUALIFICATION);
++        offset = exit_qualification & 0x0fffUL;        
++        handle_mmio(APIC_DEFAULT_PHYS_BASE | offset);
++        break;
++    }
+ 
+     default:
+     exit_and_crash:
+diff -r 8f147a735faf xen/include/asm-x86/hvm/domain.h
+--- a/xen/include/asm-x86/hvm/domain.h Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/include/asm-x86/hvm/domain.h Fri Aug 17 14:47:32 2007 -0400
+@@ -41,6 +41,11 @@ struct hvm_domain {
+     s64                    tsc_frequency;
+     struct pl_time         pl_time;
+ 
++    /* For memory-mapped vLAPIC/vTPR access optimization */
++    spinlock_t             vapic_access_lock;
++    int                    physmap_changed_for_vlapic_access : 1;
++    struct page_info       *apic_access_page;
++
+     struct hvm_io_handler  io_handler;
+ 
+     /* Lock protects access to irq, vpic and vioapic. */
+diff -r 8f147a735faf xen/include/asm-x86/hvm/vlapic.h
+--- a/xen/include/asm-x86/hvm/vlapic.h Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/include/asm-x86/hvm/vlapic.h Fri Aug 17 14:47:32 2007 -0400
+@@ -49,12 +49,17 @@
+ #define vlapic_disabled(vlapic)    ((vlapic)->hw.disabled)
+ #define vlapic_enabled(vlapic)     (!vlapic_disabled(vlapic))
+ 
++#define vlapic_base_address(vlapic)                             \
++    (vlapic->hw.apic_base_msr & MSR_IA32_APICBASE_BASE)
++
+ struct vlapic {
+     struct hvm_hw_lapic      hw;
+     struct hvm_hw_lapic_regs *regs;
+     struct periodic_time     pt;
+     s_time_t                 timer_last_update;
+     struct page_info         *regs_page;
++
++    int                      mmap_vtpr_enabled : 1;
+ };
+ 
+ static inline uint32_t vlapic_get_reg(struct vlapic *vlapic, uint32_t reg)
+diff -r 8f147a735faf xen/include/asm-x86/hvm/vmx/vmcs.h
+--- a/xen/include/asm-x86/hvm/vmx/vmcs.h       Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/include/asm-x86/hvm/vmx/vmcs.h       Fri Aug 17 14:47:32 2007 -0400
+@@ -104,6 +104,7 @@ void vmx_vmcs_exit(struct vcpu *v);
+ #define CPU_BASED_ACTIVATE_MSR_BITMAP   0x10000000
+ #define CPU_BASED_MONITOR_EXITING       0x20000000
+ #define CPU_BASED_PAUSE_EXITING         0x40000000
++#define ACTIVATE_SECONDARY_CONTROLS     0x80000000
+ extern u32 vmx_cpu_based_exec_control;
+ 
+ #define PIN_BASED_EXT_INTR_MASK         0x00000001
+@@ -119,8 +120,16 @@ extern u32 vmx_vmexit_control;
+ #define VM_ENTRY_DEACT_DUAL_MONITOR     0x00000800
+ extern u32 vmx_vmentry_control;
+ 
++#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
++extern u32 vmx_secondary_exec_control;
++
++#define cpu_has_vmx_virtualize_apic_accesses \
++    (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
+ #define cpu_has_vmx_tpr_shadow \
+     (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
++#define cpu_has_vmx_mmap_vtpr_optimization \
++    (cpu_has_vmx_virtualize_apic_accesses && cpu_has_vmx_tpr_shadow)
++
+ #define cpu_has_vmx_msr_bitmap \
+     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
+ extern char *vmx_msr_bitmap;
+@@ -158,6 +167,8 @@ enum vmcs_field {
+     TSC_OFFSET_HIGH                 = 0x00002011,
+     VIRTUAL_APIC_PAGE_ADDR          = 0x00002012,
+     VIRTUAL_APIC_PAGE_ADDR_HIGH     = 0x00002013,
++    APIC_ACCESS_ADDR                = 0x00002014,
++    APIC_ACCESS_ADDR_HIGH           = 0x00002015, 
+     VMCS_LINK_POINTER               = 0x00002800,
+     VMCS_LINK_POINTER_HIGH          = 0x00002801,
+     GUEST_IA32_DEBUGCTL             = 0x00002802,
+diff -r 8f147a735faf xen/include/asm-x86/hvm/vmx/vmx.h
+--- a/xen/include/asm-x86/hvm/vmx/vmx.h        Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/include/asm-x86/hvm/vmx/vmx.h        Fri Aug 17 14:47:32 2007 -0400
+@@ -32,6 +32,9 @@ void vmx_intr_assist(void);
+ void vmx_intr_assist(void);
+ void vmx_do_resume(struct vcpu *);
+ void set_guest_time(struct vcpu *v, u64 gtime);
++
++extern struct page_info *change_guest_physmap_for_vtpr(struct domain *d,
++                                                       int enable_vtpr);
+ 
+ /*
+  * Exit Reasons
+@@ -81,6 +84,7 @@ void set_guest_time(struct vcpu *v, u64 
+ #define EXIT_REASON_MACHINE_CHECK       41
+ 
+ #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
++#define EXIT_REASON_APIC_ACCESS         44
+ 
+ /*
+  * Interruption-information format
+diff -r 8f147a735faf xen/include/asm-x86/msr.h
+--- a/xen/include/asm-x86/msr.h        Fri Aug 17 14:47:30 2007 -0400
++++ b/xen/include/asm-x86/msr.h        Fri Aug 17 14:47:32 2007 -0400
+@@ -117,6 +117,7 @@ static inline void wrmsrl(unsigned int m
+ #define MSR_IA32_VMX_CR0_FIXED1                 0x487
+ #define MSR_IA32_VMX_CR4_FIXED0                 0x488
+ #define MSR_IA32_VMX_CR4_FIXED1                 0x489
++#define MSR_IA32_VMX_PROCBASED_CTLS2            0x48b
+ #define IA32_FEATURE_CONTROL_MSR                0x3a
+ #define IA32_FEATURE_CONTROL_MSR_LOCK           0x1
+ #define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON   0x4
diff -r 666b65529853 -r 12113e3eecad 15473-300d1effb792
--- a/15473-300d1effb792        Tue Sep 11 13:52:59 2007 -0400
+++ b/15473-300d1effb792        Tue Sep 11 13:57:53 2007 -0400
@@ -7,9 +7,9 @@ xen-unstable changeset: 15473:300d1effb7
 xen-unstable changeset: 15473:300d1effb792700ad231a9627443be4158b832a8
 xen-unstable date: Fri Jul 06 14:36:34 2007 +0100
 
-diff -r 6abba6b69e14 xen/arch/x86/hvm/vmx/intr.c
---- a/xen/arch/x86/hvm/vmx/intr.c      Thu Sep 06 13:15:51 2007 +0100
-+++ b/xen/arch/x86/hvm/vmx/intr.c      Thu Sep 06 13:16:45 2007 +0100
+diff -r 3df9bf768ba3 xen/arch/x86/hvm/vmx/intr.c
+--- a/xen/arch/x86/hvm/vmx/intr.c      Mon Sep 10 09:13:38 2007 -0400
++++ b/xen/arch/x86/hvm/vmx/intr.c      Mon Sep 10 09:13:38 2007 -0400
 @@ -73,7 +73,7 @@
  
  static void enable_irq_window(struct vcpu *v)
@@ -19,21 +19,21 @@ diff -r 6abba6b69e14 xen/arch/x86/hvm/vm
      
      if ( !(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) )
      {
-diff -r 6abba6b69e14 xen/arch/x86/hvm/vmx/vmcs.c
---- a/xen/arch/x86/hvm/vmx/vmcs.c      Thu Sep 06 13:15:51 2007 +0100
-+++ b/xen/arch/x86/hvm/vmx/vmcs.c      Thu Sep 06 13:16:33 2007 +0100
-@@ -297,7 +297,7 @@ static void construct_vmcs(struct vcpu *
+diff -r 3df9bf768ba3 xen/arch/x86/hvm/vmx/vmcs.c
+--- a/xen/arch/x86/hvm/vmx/vmcs.c      Mon Sep 10 09:13:38 2007 -0400
++++ b/xen/arch/x86/hvm/vmx/vmcs.c      Mon Sep 10 09:15:23 2007 -0400
+@@ -316,7 +316,7 @@ static void construct_vmcs(struct vcpu *
      __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
      __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
      __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
 -    v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
 +    v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control;
+     if ( vmx_cpu_based_exec_control & ACTIVATE_SECONDARY_CONTROLS )
+         __vmwrite(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control);
  
-     if ( cpu_has_vmx_msr_bitmap )
-         __vmwrite(MSR_BITMAP, virt_to_maddr(vmx_msr_bitmap));
-diff -r 6abba6b69e14 xen/arch/x86/hvm/vmx/vmx.c
---- a/xen/arch/x86/hvm/vmx/vmx.c       Thu Sep 06 13:15:51 2007 +0100
-+++ b/xen/arch/x86/hvm/vmx/vmx.c       Thu Sep 06 13:15:51 2007 +0100
+diff -r 3df9bf768ba3 xen/arch/x86/hvm/vmx/vmx.c
+--- a/xen/arch/x86/hvm/vmx/vmx.c       Mon Sep 10 09:13:38 2007 -0400
++++ b/xen/arch/x86/hvm/vmx/vmx.c       Mon Sep 10 09:13:38 2007 -0400
 @@ -417,8 +417,8 @@ static inline void vmx_save_dr(struct vc
  
      /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
@@ -57,7 +57,7 @@ diff -r 6abba6b69e14 xen/arch/x86/hvm/vm
  }
  
  /*
-@@ -2858,15 +2858,15 @@ asmlinkage void vmx_vmexit_handler(struc
+@@ -2967,15 +2967,15 @@ asmlinkage void vmx_vmexit_handler(struc
          break;
      case EXIT_REASON_PENDING_VIRT_INTR:
          /* Disable the interrupt window. */
diff -r 666b65529853 -r 12113e3eecad series
--- a/series    Tue Sep 11 13:52:59 2007 -0400
+++ b/series    Tue Sep 11 13:57:53 2007 -0400
@@ -79,6 +79,7 @@ 15178-1bad5a932df5
 15178-1bad5a932df5
 15179-152dc0d812b2
 15183-63211a8027fa
+15185-1f8fb764f843
 15188-ae073ca6eb76
 15189-2d7d33ac982a
 15190-c9d66baad22b
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.