[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/pv: Move stack_switch()/seg_segment_base() into PV-only files



commit a4c4b288069cf0c7285ec3d3efb1df0b46051d46
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Sep 3 19:09:45 2020 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Sep 9 16:53:28 2020 +0100

    x86/pv: Move stack_switch()/seg_segment_base() into PV-only files
    
    So they are excluded from !CONFIG_PV builds.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/pv/misc-hypercalls.c | 101 ++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/x86_64/mm.c          | 101 --------------------------------------
 2 files changed, 101 insertions(+), 101 deletions(-)

diff --git a/xen/arch/x86/pv/misc-hypercalls.c 
b/xen/arch/x86/pv/misc-hypercalls.c
index 136fa10c96..b353972e3d 100644
--- a/xen/arch/x86/pv/misc-hypercalls.c
+++ b/xen/arch/x86/pv/misc-hypercalls.c
@@ -171,6 +171,107 @@ long set_debugreg(struct vcpu *v, unsigned int reg, 
unsigned long value)
     return 0;
 }
 
+long do_stack_switch(unsigned long ss, unsigned long esp)
+{
+    fixup_guest_stack_selector(current->domain, ss);
+    current->arch.pv.kernel_ss = ss;
+    current->arch.pv.kernel_sp = esp;
+
+    return 0;
+}
+
+long do_set_segment_base(unsigned int which, unsigned long base)
+{
+    struct vcpu *v = current;
+    long ret = 0;
+
+    if ( is_pv_32bit_vcpu(v) )
+        return -ENOSYS; /* x86/64 only. */
+
+    switch ( which )
+    {
+    case SEGBASE_FS:
+        if ( is_canonical_address(base) )
+            wrfsbase(base);
+        else
+            ret = -EINVAL;
+        break;
+
+    case SEGBASE_GS_USER:
+        if ( is_canonical_address(base) )
+        {
+            wrgsshadow(base);
+            v->arch.pv.gs_base_user = base;
+        }
+        else
+            ret = -EINVAL;
+        break;
+
+    case SEGBASE_GS_KERNEL:
+        if ( is_canonical_address(base) )
+            wrgsbase(base);
+        else
+            ret = -EINVAL;
+        break;
+
+    case SEGBASE_GS_USER_SEL:
+    {
+        unsigned int sel = (uint16_t)base;
+
+        /*
+         * We wish to update the user %gs from the GDT/LDT.  Currently, the
+         * guest kernel's GS_BASE is in context.
+         */
+        asm volatile ( "swapgs" );
+
+        if ( sel > 3 )
+            /* Fix up RPL for non-NUL selectors. */
+            sel |= 3;
+        else if ( boot_cpu_data.x86_vendor &
+                  (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+            /* Work around NUL segment behaviour on AMD hardware. */
+            asm volatile ( "mov %[sel], %%gs"
+                           :: [sel] "r" (FLAT_USER_DS32) );
+
+        /*
+         * Load the chosen selector, with fault handling.
+         *
+         * Errors ought to fail the hypercall, but that was never built in
+         * originally, and Linux will BUG() if this call fails.
+         *
+         * NUL the selector in the case of an error.  This too needs to deal
+         * with the AMD NUL segment behaviour, but it is already a slowpath in
+         * #GP context so perform the flat load unconditionally to avoid
+         * complicated logic.
+         *
+         * Anyone wanting to check for errors from this hypercall should
+         * re-read %gs and compare against the input.
+         */
+        asm volatile ( "1: mov %[sel], %%gs\n\t"
+                       ".section .fixup, \"ax\", @progbits\n\t"
+                       "2: mov %k[flat], %%gs\n\t"
+                       "   xor %[sel], %[sel]\n\t"
+                       "   jmp 1b\n\t"
+                       ".previous\n\t"
+                       _ASM_EXTABLE(1b, 2b)
+                       : [sel] "+r" (sel)
+                       : [flat] "r" (FLAT_USER_DS32) );
+
+        /* Update the cache of the inactive base, as read from the GDT/LDT. */
+        v->arch.pv.gs_base_user = rdgsbase();
+
+        asm volatile ( safe_swapgs );
+        break;
+    }
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index b69cf2dc4f..98581dfe5f 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1010,107 +1010,6 @@ long subarch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
     return rc;
 }
 
-long do_stack_switch(unsigned long ss, unsigned long esp)
-{
-    fixup_guest_stack_selector(current->domain, ss);
-    current->arch.pv.kernel_ss = ss;
-    current->arch.pv.kernel_sp = esp;
-    return 0;
-}
-
-long do_set_segment_base(unsigned int which, unsigned long base)
-{
-    struct vcpu *v = current;
-    long ret = 0;
-
-    if ( is_pv_32bit_vcpu(v) )
-        return -ENOSYS; /* x86/64 only. */
-
-    switch ( which )
-    {
-    case SEGBASE_FS:
-        if ( is_canonical_address(base) )
-            wrfsbase(base);
-        else
-            ret = -EINVAL;
-        break;
-
-    case SEGBASE_GS_USER:
-        if ( is_canonical_address(base) )
-        {
-            wrgsshadow(base);
-            v->arch.pv.gs_base_user = base;
-        }
-        else
-            ret = -EINVAL;
-        break;
-
-    case SEGBASE_GS_KERNEL:
-        if ( is_canonical_address(base) )
-            wrgsbase(base);
-        else
-            ret = -EINVAL;
-        break;
-
-    case SEGBASE_GS_USER_SEL:
-    {
-        unsigned int sel = (uint16_t)base;
-
-        /*
-         * We wish to update the user %gs from the GDT/LDT.  Currently, the
-         * guest kernel's GS_BASE is in context.
-         */
-        asm volatile ( "swapgs" );
-
-        if ( sel > 3 )
-            /* Fix up RPL for non-NUL selectors. */
-            sel |= 3;
-        else if ( boot_cpu_data.x86_vendor &
-                  (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
-            /* Work around NUL segment behaviour on AMD hardware. */
-            asm volatile ( "mov %[sel], %%gs"
-                           :: [sel] "r" (FLAT_USER_DS32) );
-
-        /*
-         * Load the chosen selector, with fault handling.
-         *
-         * Errors ought to fail the hypercall, but that was never built in
-         * originally, and Linux will BUG() if this call fails.
-         *
-         * NUL the selector in the case of an error.  This too needs to deal
-         * with the AMD NUL segment behaviour, but it is already a slowpath in
-         * #GP context so perform the flat load unconditionally to avoid
-         * complicated logic.
-         *
-         * Anyone wanting to check for errors from this hypercall should
-         * re-read %gs and compare against the input.
-         */
-        asm volatile ( "1: mov %[sel], %%gs\n\t"
-                       ".section .fixup, \"ax\", @progbits\n\t"
-                       "2: mov %k[flat], %%gs\n\t"
-                       "   xor %[sel], %[sel]\n\t"
-                       "   jmp 1b\n\t"
-                       ".previous\n\t"
-                       _ASM_EXTABLE(1b, 2b)
-                       : [sel] "+r" (sel)
-                       : [flat] "r" (FLAT_USER_DS32) );
-
-        /* Update the cache of the inactive base, as read from the GDT/LDT. */
-        v->arch.pv.gs_base_user = rdgsbase();
-
-        asm volatile ( safe_swapgs );
-        break;
-    }
-
-    default:
-        ret = -EINVAL;
-        break;
-    }
-
-    return ret;
-}
-
-
 /* Returns TRUE if given descriptor is valid for GDT or LDT. */
 int check_descriptor(const struct domain *dom, seg_desc_t *d)
 {
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.