[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86, vmx: Enable VPID (Virtual Processor Identification)



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1207748089 -3600
# Node ID 0553004fa328b86756a428410d21394fe62bd29a
# Parent  3a213b0e1ac01b7c13b8da10919f1692219cdfb5
x86, vmx: Enable VPID (Virtual Processor Identification)

Allows TLB entries to be retained across VM entry and VM exit, and Xen
can now identify distinct address spaces through a new
virtual-processor ID (VPID) field of the VMCS.

Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
Signed-off-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Signed-off-by: Xiaohui Xin <Xiaohui.xin@xxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmcs.c        |   17 ++++++++
 xen/arch/x86/hvm/vmx/vmx.c         |   74 +++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/hvm/vmx/vmcs.h |   10 ++++-
 xen/include/asm-x86/hvm/vmx/vmx.h  |   44 ++++++++++++++++++----
 4 files changed, 133 insertions(+), 12 deletions(-)

diff -r 3a213b0e1ac0 -r 0553004fa328 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Apr 09 14:10:32 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Wed Apr 09 14:34:49 2008 +0100
@@ -38,6 +38,9 @@
 #include <asm/shadow.h>
 #include <asm/tboot.h>
 
+static int opt_vpid_enabled = 1;
+boolean_param("vpid", opt_vpid_enabled);
+
 /* Dynamic (run-time adjusted) execution control flags. */
 u32 vmx_pin_based_exec_control __read_mostly;
 u32 vmx_cpu_based_exec_control __read_mostly;
@@ -111,6 +114,8 @@ static void vmx_init_vmcs_config(void)
         opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_WBINVD_EXITING |
                SECONDARY_EXEC_ENABLE_EPT);
+        if ( opt_vpid_enabled )
+            opt |= SECONDARY_EXEC_ENABLE_VPID;
         _vmx_secondary_exec_control = adjust_vmx_controls(
             min, opt, MSR_IA32_VMX_PROCBASED_CTLS2);
     }
@@ -317,6 +322,8 @@ int vmx_cpu_up(void)
 
     ept_sync_all();
 
+    vpid_sync_all();
+
     return 1;
 }
 
@@ -627,6 +634,13 @@ static int construct_vmcs(struct vcpu *v
         __vmwrite(EPT_POINTER_HIGH,
                   d->arch.hvm_domain.vmx.ept_control.eptp >> 32);
 #endif
+    }
+
+    if ( cpu_has_vmx_vpid )
+    {
+        v->arch.hvm_vmx.vpid =
+            v->domain->arch.hvm_domain.vmx.vpid_base + v->vcpu_id;
+        __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vmx.vpid);
     }
 
     vmx_vmcs_exit(v);
@@ -822,6 +836,7 @@ void vmx_do_resume(struct vcpu *v)
         vmx_load_vmcs(v);
         hvm_migrate_timers(v);
         vmx_set_host_env(v);
+        vpid_sync_vcpu_all(v);
     }
 
     debug_state = v->domain->debugger_attached;
@@ -976,6 +991,8 @@ void vmcs_dump_vcpu(struct vcpu *v)
            (uint32_t)vmr(TPR_THRESHOLD));
     printk("EPT pointer = 0x%08x%08x\n",
            (uint32_t)vmr(EPT_POINTER_HIGH), (uint32_t)vmr(EPT_POINTER));
+    printk("Virtual processor ID = 0x%04x\n",
+           (uint32_t)vmr(VIRTUAL_PROCESSOR_ID));
 
     vmx_vmcs_exit(v);
 }
diff -r 3a213b0e1ac0 -r 0553004fa328 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Apr 09 14:10:32 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Apr 09 14:34:49 2008 +0100
@@ -57,6 +57,8 @@ static void vmx_ctxt_switch_to(struct vc
 
 static int  vmx_alloc_vlapic_mapping(struct domain *d);
 static void vmx_free_vlapic_mapping(struct domain *d);
+static int  vmx_alloc_vpid(struct domain *d);
+static void vmx_free_vpid(struct domain *d);
 static void vmx_install_vlapic_mapping(struct vcpu *v);
 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
 static void vmx_update_guest_efer(struct vcpu *v);
@@ -71,18 +73,30 @@ static void vmx_invlpg_intercept(unsigne
 
 static int vmx_domain_initialise(struct domain *d)
 {
+    int rc;
+
     d->arch.hvm_domain.vmx.ept_control.etmt = EPT_DEFAULT_MT;
     d->arch.hvm_domain.vmx.ept_control.gaw  = EPT_DEFAULT_GAW;
     d->arch.hvm_domain.vmx.ept_control.asr  =
         pagetable_get_pfn(d->arch.phys_table);
 
-    return vmx_alloc_vlapic_mapping(d);
+    if ( (rc = vmx_alloc_vpid(d)) != 0 )
+        return rc;
+
+    if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
+    {
+        vmx_free_vpid(d);
+        return rc;
+    }
+
+    return 0;
 }
 
 static void vmx_domain_destroy(struct domain *d)
 {
     ept_sync_domain(d);
     vmx_free_vlapic_mapping(d);
+    vmx_free_vpid(d);
 }
 
 static int vmx_vcpu_initialise(struct vcpu *v)
@@ -1024,6 +1038,7 @@ static void vmx_update_guest_cr(struct v
         }
  
         __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
+        vpid_sync_vcpu_all(v);
         break;
     case 4:
         v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
@@ -1069,9 +1084,15 @@ static void vmx_update_guest_efer(struct
 
 static void vmx_flush_guest_tlbs(void)
 {
-    /* No tagged TLB support on VMX yet.  The fact that we're in Xen
-     * at all means any guest will have a clean TLB when it's next run,
-     * because VMRESUME will flush it for us. */
+    /*
+     * If VPID (i.e. tagged TLB support) is not enabled, the fact that
+     * we're in Xen at all means any guest will have a clean TLB when
+     * it's next run, because VMRESUME will flush it for us.
+     *
+     * If enabled, we invalidate all translations associated with all
+     * VPID values.
+     */
+    vpid_sync_all();
 }
 
 static void __ept_sync_domain(void *info)
@@ -1202,6 +1223,9 @@ static struct hvm_function_table vmx_fun
     .invlpg_intercept     = vmx_invlpg_intercept
 };
 
+static unsigned long *vpid_bitmap;
+#define VPID_BITMAP_SIZE ((1u << VMCS_VPID_WIDTH) / MAX_VIRT_CPUS)
+
 void start_vmx(void)
 {
     static int bootstrapped;
@@ -1239,6 +1263,19 @@ void start_vmx(void)
     {
         printk("VMX: EPT is available.\n");
         vmx_function_table.hap_supported = 1;
+    }
+
+    if ( cpu_has_vmx_vpid )
+    {
+        printk("VMX: VPID is available.\n");
+
+        vpid_bitmap = xmalloc_array(
+            unsigned long, BITS_TO_LONGS(VPID_BITMAP_SIZE));
+        BUG_ON(vpid_bitmap == NULL);
+        memset(vpid_bitmap, 0, BITS_TO_LONGS(VPID_BITMAP_SIZE) * sizeof(long));
+
+        /* VPID 0 is used by VMX root mode (the hypervisor). */
+        __set_bit(0, vpid_bitmap);
     }
 
     setup_vmcs_dump();
@@ -1755,6 +1792,35 @@ static void vmx_free_vlapic_mapping(stru
         free_xenheap_page(mfn_to_virt(mfn));
 }
 
+static int vmx_alloc_vpid(struct domain *d)
+{
+    int idx;
+
+    if ( !cpu_has_vmx_vpid )
+        return 0;
+
+    do {
+        idx = find_first_zero_bit(vpid_bitmap, VPID_BITMAP_SIZE);
+        if ( idx >= VPID_BITMAP_SIZE )
+        {
+            dprintk(XENLOG_WARNING, "VMX VPID space exhausted.\n");
+            return -EBUSY;
+        }
+    }
+    while ( test_and_set_bit(idx, vpid_bitmap) );
+
+    d->arch.hvm_domain.vmx.vpid_base = idx * MAX_VIRT_CPUS;
+    return 0;
+}
+
+static void vmx_free_vpid(struct domain *d)
+{
+    if ( !cpu_has_vmx_vpid )
+        return;
+
+    clear_bit(d->arch.hvm_domain.vmx.vpid_base / MAX_VIRT_CPUS, vpid_bitmap);
+}
+
 static void vmx_install_vlapic_mapping(struct vcpu *v)
 {
     paddr_t virt_page_ma, apic_page_ma;
diff -r 3a213b0e1ac0 -r 0553004fa328 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Apr 09 14:10:32 2008 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Apr 09 14:34:49 2008 +0100
@@ -58,7 +58,7 @@ struct vmx_msr_state {
 
 struct vmx_domain {
     unsigned long apic_access_mfn;
-
+    unsigned long vpid_base;
     union {
         struct {
             u64 etmt :3,
@@ -89,6 +89,8 @@ struct arch_vmx_struct {
     /* Cache of cpu execution control. */
     u32                  exec_control;
     u32                  secondary_exec_control;
+
+    u16                  vpid;
 
     /* PMU */
     struct vpmu_struct   vpmu;
@@ -157,6 +159,7 @@ extern u32 vmx_vmentry_control;
 
 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
 #define SECONDARY_EXEC_ENABLE_EPT               0x00000002
+#define SECONDARY_EXEC_ENABLE_VPID              0x00000020
 #define SECONDARY_EXEC_WBINVD_EXITING           0x00000040
 extern u32 vmx_secondary_exec_control;
 
@@ -176,6 +179,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
 #define cpu_has_vmx_ept \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
+#define cpu_has_vmx_vpid \
+    (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
 
 /* GUEST_INTERRUPTIBILITY_INFO flags. */
 #define VMX_INTR_SHADOW_STI             0x00000001
@@ -185,6 +190,7 @@ extern bool_t cpu_has_vmx_ins_outs_instr
 
 /* VMCS field encodings. */
 enum vmcs_field {
+    VIRTUAL_PROCESSOR_ID            = 0x00000000,
     GUEST_ES_SELECTOR               = 0x00000800,
     GUEST_CS_SELECTOR               = 0x00000802,
     GUEST_SS_SELECTOR               = 0x00000804,
@@ -324,6 +330,8 @@ enum vmcs_field {
     HOST_RIP                        = 0x00006c16,
 };
 
+#define VMCS_VPID_WIDTH 16
+
 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr);
 int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val);
 int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val);
diff -r 3a213b0e1ac0 -r 0553004fa328 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Apr 09 14:10:32 2008 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Apr 09 14:34:49 2008 +0100
@@ -164,6 +164,7 @@ void vmx_realmode(struct cpu_user_regs *
 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
 #define VMWRITE_OPCODE  ".byte 0x0f,0x79\n"
 #define INVEPT_OPCODE   ".byte 0x66,0x0f,0x38,0x80\n"   /* m128,r64/32 */
+#define INVVPID_OPCODE  ".byte 0x66,0x0f,0x38,0x81\n"   /* m128,r64/32 */
 #define VMXOFF_OPCODE   ".byte 0x0f,0x01,0xc4\n"
 #define VMXON_OPCODE    ".byte 0xf3,0x0f,0xc7\n"
 
@@ -260,13 +261,30 @@ static inline void __invept(int ext, u64
         u64 eptp, gpa;
     } operand = {eptp, gpa};
 
-    __asm__ __volatile__ ( INVEPT_OPCODE
-                           MODRM_EAX_08
-                           /* CF==1 or ZF==1 --> rc = -1 */
-                           "ja 1f ; ud2 ; 1:\n"
-                           :
-                           : "a" (&operand), "c" (ext)
-                           : "memory");
+    asm volatile ( INVEPT_OPCODE
+                   MODRM_EAX_08
+                   /* CF==1 or ZF==1 --> rc = -1 */
+                   "ja 1f ; ud2 ; 1:\n"
+                   :
+                   : "a" (&operand), "c" (ext)
+                   : "memory" );
+}
+
+static inline void __invvpid(int ext, u16 vpid, u64 gva)
+{
+    struct {
+        u64 vpid:16;
+        u64 rsvd:48;
+        u64 gva;
+    } __attribute__ ((packed)) operand = {vpid, 0, gva};
+
+    asm volatile ( INVVPID_OPCODE
+                   MODRM_EAX_08
+                   /* CF==1 or ZF==1 --> rc = -1 */
+                   "ja 1f ; ud2 ; 1:\n"
+                   :
+                   : "a" (&operand), "c" (ext)
+                   : "memory" );
 }
 
 static inline void ept_sync_all(void)
@@ -278,6 +296,18 @@ static inline void ept_sync_all(void)
 }
 
 void ept_sync_domain(struct domain *d);
+
+static inline void vpid_sync_vcpu_all(struct vcpu *v)
+{
+    if ( cpu_has_vmx_vpid )
+        __invvpid(1, v->arch.hvm_vmx.vpid, 0);
+}
+
+static inline void vpid_sync_all(void)
+{
+    if ( cpu_has_vmx_vpid )
+        __invvpid(2, 0, 0);
+}
 
 static inline void __vmxoff(void)
 {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.