[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/hvm: Add get_shadow_gs_base() wrapper function


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Mon, 14 May 2012 16:32:09 +0000
  • Delivery-date: Mon, 14 May 2012 16:32:13 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
# Date 1335542040 -7200
# Node ID fd04ba0aa4fa6e06b58f9c3ed4e1cd3830459bc6
# Parent  107285938c50f82667bd4d014820b439a077c22c
x86/hvm: Add get_shadow_gs_base() wrapper function

Add a wrapper function to the HVM function table that returns the
shadow GS base.

Signed-off-by: Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 107285938c50 -r fd04ba0aa4fa xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Apr 26 10:03:08 2012 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Apr 27 17:54:00 2012 +0200
@@ -645,6 +645,11 @@ static void svm_set_segment_register(str
         svm_vmload(vmcb);
 }
 
+static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
+{
+    return v->arch.hvm_svm.vmcb->kerngsbase;
+}
+
 static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -1990,6 +1995,7 @@ static struct hvm_function_table __read_
     .guest_x86_mode       = svm_guest_x86_mode,
     .get_segment_register = svm_get_segment_register,
     .set_segment_register = svm_set_segment_register,
+    .get_shadow_gs_base   = svm_get_shadow_gs_base,
     .update_host_cr3      = svm_update_host_cr3,
     .update_guest_cr      = svm_update_guest_cr,
     .update_guest_efer    = svm_update_guest_efer,
diff -r 107285938c50 -r fd04ba0aa4fa xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Apr 26 10:03:08 2012 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Apr 27 17:54:00 2012 +0200
@@ -942,6 +942,15 @@ static void vmx_set_segment_register(str
     vmx_vmcs_exit(v);
 }
 
+static unsigned long vmx_get_shadow_gs_base(struct vcpu *v)
+{
+#ifdef __x86_64__
+    return v->arch.hvm_vmx.shadow_gs;
+#else
+    return 0;
+#endif
+}
+
 static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
 {
     if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) )
@@ -1522,6 +1531,7 @@ static struct hvm_function_table __read_
     .guest_x86_mode       = vmx_guest_x86_mode,
     .get_segment_register = vmx_get_segment_register,
     .set_segment_register = vmx_set_segment_register,
+    .get_shadow_gs_base   = vmx_get_shadow_gs_base,
     .update_host_cr3      = vmx_update_host_cr3,
     .update_guest_cr      = vmx_update_guest_cr,
     .update_guest_efer    = vmx_update_guest_efer,
diff -r 107285938c50 -r fd04ba0aa4fa xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Thu Apr 26 10:03:08 2012 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Fri Apr 27 17:54:00 2012 +0200
@@ -106,6 +106,7 @@ struct hvm_function_table {
                                  struct segment_register *reg);
     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
                                  struct segment_register *reg);
+    unsigned long (*get_shadow_gs_base)(struct vcpu *v);
 
     /* 
      * Re-set the value of CR3 that Xen runs on when handling VM exits.
@@ -305,6 +306,11 @@ hvm_set_segment_register(struct vcpu *v,
     hvm_funcs.set_segment_register(v, seg, reg);
 }
 
+static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
+{
+    return hvm_funcs.get_shadow_gs_base(v);
+}
+
 #define is_viridian_domain(_d)                                             \
  (is_hvm_domain(_d) && ((_d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN]))
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.