[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 16/17] x86: do page table switching when entering/leaving hypervisor



For XPTI enabled domains do page table switching when entering or
leaving the hypervisor. This requires both %cr3 values to be stored
in the per-vcpu stack regions and adding the switching code to the
macros used to switch stacks.

The hypervisor will run on the original L4 page table supplied by the
guest, while the guest will use the shadow.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/pv/xpti.c            | 17 ++++++++++++-----
 xen/arch/x86/traps.c              |  3 ++-
 xen/arch/x86/x86_64/asm-offsets.c |  2 ++
 xen/include/asm-x86/asm_defns.h   | 17 ++++++++++++++++-
 xen/include/asm-x86/current.h     |  4 +++-
 5 files changed, 35 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/pv/xpti.c b/xen/arch/x86/pv/xpti.c
index da83339563..e08aa782bf 100644
--- a/xen/arch/x86/pv/xpti.c
+++ b/xen/arch/x86/pv/xpti.c
@@ -441,19 +441,26 @@ void xpti_update_l4(const struct domain *d, unsigned long 
mfn,
 void xpti_make_cr3(struct vcpu *v, unsigned long mfn)
 {
     struct xpti_domain *xd = v->domain->arch.pv_domain.xpti;
+    struct cpu_info *cpu_info;
     unsigned long flags;
-    unsigned int idx;
+    unsigned int old, new;
+
+    cpu_info = (struct cpu_info *)v->arch.pv_vcpu.stack_regs;
 
     spin_lock_irqsave(&xd->lock, flags);
 
-    idx = v->arch.pv_vcpu.xen_cr3_shadow;
+    old = v->arch.pv_vcpu.xen_cr3_shadow;
 
     /* First activate new shadow. */
-    v->arch.pv_vcpu.xen_cr3_shadow = xpti_shadow_activate(xd, mfn);
+    new = xpti_shadow_activate(xd, mfn);
+    v->arch.pv_vcpu.xen_cr3_shadow = new;
 
     /* Deactivate old shadow if applicable. */
-    if ( idx != L4_INVALID )
-        xpti_shadow_deactivate(xd, idx);
+    if ( old != L4_INVALID )
+        xpti_shadow_deactivate(xd, old);
+
+    cpu_info->xen_cr3 = mfn << PAGE_SHIFT;
+    cpu_info->guest_cr3 = xd->l4pg[new].xen_mfn << PAGE_SHIFT;
 
     spin_unlock_irqrestore(&xd->lock, flags);
 }
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 9b29014e2c..93b228dced 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -305,9 +305,10 @@ static void show_guest_stack(struct vcpu *v, const struct 
cpu_user_regs *regs)
     if ( v != current )
     {
         struct vcpu *vcpu;
+        unsigned long cr3 = read_cr3();
 
         ASSERT(guest_kernel_mode(v, regs));
-        vcpu = maddr_get_owner(read_cr3()) == v->domain ? v : NULL;
+        vcpu = maddr_get_owner(cr3) == v->domain ? v : NULL;
         if ( !vcpu )
         {
             stack = do_page_walk(v, (unsigned long)stack);
diff --git a/xen/arch/x86/x86_64/asm-offsets.c 
b/xen/arch/x86/x86_64/asm-offsets.c
index b0060be261..2855feafa3 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -141,6 +141,8 @@ void __dummy__(void)
     OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl);
     OFFSET(CPUINFO_use_shadow_spec_ctrl, struct cpu_info, 
use_shadow_spec_ctrl);
     OFFSET(CPUINFO_bti_ist_info, struct cpu_info, bti_ist_info);
+    OFFSET(CPUINFO_guest_cr3, struct cpu_info, guest_cr3);
+    OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3);
     OFFSET(CPUINFO_stack_bottom_cpu, struct cpu_info, stack_bottom_cpu);
     OFFSET(CPUINFO_flags, struct cpu_info, flags);
     DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index f626cc6134..f69d1501fb 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -141,6 +141,8 @@ void ret_from_intr(void);
         GET_STACK_END(ax);                                               \
         testb $ON_VCPUSTACK, STACK_CPUINFO_FIELD(flags)(%rax);           \
         jz    1f;                                                        \
+        movq  STACK_CPUINFO_FIELD(xen_cr3)(%rax), %rcx;                  \
+        mov   %rcx, %cr3;                                                \
         movq  STACK_CPUINFO_FIELD(stack_bottom_cpu)(%rax), %rsp;         \
 1:
 
@@ -148,12 +150,25 @@ void ret_from_intr(void);
         GET_STACK_END(ax);                                               \
         testb $ON_VCPUSTACK, STACK_CPUINFO_FIELD(flags)(%rax);           \
         jz    1f;                                                        \
+        movq  STACK_CPUINFO_FIELD(xen_cr3)(%rax), %rcx;                  \
+        mov   %rcx, %cr3;                                                \
         sub   $(STACK_SIZE - 1 - ist * PAGE_SIZE), %rax;                 \
         mov   %rax, %rsp;                                                \
 1:
 
 #define SWITCH_TO_VCPU_STACK                                             \
-        mov   %r12, %rsp
+        mov   %r12, %rsp;                                                \
+        GET_STACK_END(ax);                                               \
+        testb $ON_VCPUSTACK, STACK_CPUINFO_FIELD(flags)(%rax);           \
+        jz    1f;                                                        \
+        mov   %cr4, %r8;                                                 \
+        mov   %r8, %r9;                                                  \
+        and   $~X86_CR4_PGE, %r8;                                        \
+        mov   %r8, %cr4;                                                 \
+        movq  STACK_CPUINFO_FIELD(guest_cr3)(%rax), %rcx;                \
+        mov   %rcx, %cr3;                                                \
+        mov   %r9, %cr4;                                                 \
+1:
 
 #ifndef NDEBUG
 #define ASSERT_NOT_IN_ATOMIC                                             \
diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
index e128c13a1e..82d76a3746 100644
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -67,7 +67,9 @@ struct cpu_info {
         };
         /* per vcpu mapping (xpti) */
         struct {
-            unsigned long v_pad[4];
+            unsigned long v_pad[2];
+            unsigned long guest_cr3;
+            unsigned long xen_cr3;
             unsigned long stack_bottom_cpu;
         };
     };
-- 
2.13.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.