[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/shadow: remove a few 32-bit hypervisor leftovers



commit 1a57a83ef71ba1bf81fe97e35ca302e6a7949649
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Feb 9 13:24:23 2016 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Feb 9 13:24:23 2016 +0100

    x86/shadow: remove a few 32-bit hypervisor leftovers
    
    ... related to 8-byte cmpxchg having required special precautions
    there.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c | 34 ++++++++++++++--------------------
 xen/arch/x86/mm/shadow/types.h  |  1 -
 2 files changed, 14 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index f3f49e0..2270838 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -259,10 +259,10 @@ hvm_emulate_cmpxchg(enum x86_segment seg,
     struct sh_emulate_ctxt *sh_ctxt =
         container_of(ctxt, struct sh_emulate_ctxt, ctxt);
     struct vcpu *v = current;
-    unsigned long addr, old[2], new[2];
+    unsigned long addr, old, new;
     int rc;
 
-    if ( !is_x86_user_segment(seg) )
+    if ( !is_x86_user_segment(seg) || bytes > sizeof(long) )
         return X86EMUL_UNHANDLEABLE;
 
     rc = hvm_translate_linear_addr(
@@ -270,15 +270,12 @@ hvm_emulate_cmpxchg(enum x86_segment seg,
     if ( rc )
         return rc;
 
-    old[0] = new[0] = 0;
-    memcpy(old, p_old, bytes);
-    memcpy(new, p_new, bytes);
-
-    if ( bytes <= sizeof(long) )
-        return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
-            v, addr, old[0], new[0], bytes, sh_ctxt);
+    old = new = 0;
+    memcpy(&old, p_old, bytes);
+    memcpy(&new, p_new, bytes);
 
-    return X86EMUL_UNHANDLEABLE;
+    return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
+               v, addr, old, new, bytes, sh_ctxt);
 }
 
 static const struct x86_emulate_ops hvm_shadow_emulator_ops = {
@@ -335,21 +332,18 @@ pv_emulate_cmpxchg(enum x86_segment seg,
 {
     struct sh_emulate_ctxt *sh_ctxt =
         container_of(ctxt, struct sh_emulate_ctxt, ctxt);
-    unsigned long old[2], new[2];
+    unsigned long old, new;
     struct vcpu *v = current;
 
-    if ( !is_x86_user_segment(seg) )
+    if ( !is_x86_user_segment(seg) || bytes > sizeof(long) )
         return X86EMUL_UNHANDLEABLE;
 
-    old[0] = new[0] = 0;
-    memcpy(old, p_old, bytes);
-    memcpy(new, p_new, bytes);
-
-    if ( bytes <= sizeof(long) )
-        return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
-            v, offset, old[0], new[0], bytes, sh_ctxt);
+    old = new = 0;
+    memcpy(&old, p_old, bytes);
+    memcpy(&new, p_new, bytes);
 
-    return X86EMUL_UNHANDLEABLE;
+    return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
+               v, offset, old, new, bytes, sh_ctxt);
 }
 
 static const struct x86_emulate_ops pv_shadow_emulator_ops = {
diff --git a/xen/arch/x86/mm/shadow/types.h b/xen/arch/x86/mm/shadow/types.h
index 6b9959d..503243f 100644
--- a/xen/arch/x86/mm/shadow/types.h
+++ b/xen/arch/x86/mm/shadow/types.h
@@ -247,7 +247,6 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, 
u32 flags)
 #define sh_detach_old_tables       INTERNAL_NAME(sh_detach_old_tables)
 #define sh_x86_emulate_write       INTERNAL_NAME(sh_x86_emulate_write)
 #define sh_x86_emulate_cmpxchg     INTERNAL_NAME(sh_x86_emulate_cmpxchg)
-#define sh_x86_emulate_cmpxchg8b   INTERNAL_NAME(sh_x86_emulate_cmpxchg8b)
 #define sh_audit_l1_table          INTERNAL_NAME(sh_audit_l1_table)
 #define sh_audit_fl1_table         INTERNAL_NAME(sh_audit_fl1_table)
 #define sh_audit_l2_table          INTERNAL_NAME(sh_audit_l2_table)
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.