[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.6] x86: defer not-present segment checks



commit ce904f64784cc3afdb23b09de9af229b3a76a55c
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Oct 25 17:16:30 2016 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Oct 25 17:16:30 2016 +0200

    x86: defer not-present segment checks
    
    Following on from commits 5602e74c60 ("x86emul: correct loading of
    %ss") and bdb860d01c ("x86/HVM: correct segment register loading during
    task switch") the point of the non-.present checks needs to be refined:
    #NP (and its #SS companion), other than suggested by the various
    instruction pages in Intel's SDM, gets checked for only after all type
    and permission checks. The only checks getting done even later are the
    long mode specific ones for system descriptors (which we don't support
    yet) and 64-bit code segments (i.e. anything touching other than the
    attribute byte).
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    
    x86/hvm: Correct the position of the %cs L/D checks
    
    Contrary to the description in the software manuals, in Long Mode, attempts 
to
    load %cs check that D is not set in combination with L before the present 
flag
    is checked.
    
    This can be observed because the L/D check fails with #GP before the 
presence
    check failes with #NP.
    
    This change partially reverts c/s 78ff18c90 "x86: defer not-present segment
    checks", taking it back to how it was in the v1 submission.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    master commit: 78ff18c905318a9b1e5dd32662986f03b10a4e56
    master date: 2016-10-10 12:16:49 +0200
    master commit: 13b9f31751a55a96e86bd7e64b433a62a4a5b71e
    master date: 2016-10-14 12:43:17 +0100
---
 xen/arch/x86/hvm/hvm.c                 | 27 +++++++++++----------
 xen/arch/x86/x86_emulate/x86_emulate.c | 44 +++++++++++++++++++---------------
 2 files changed, 39 insertions(+), 32 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c0f7ded..7d10ab0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3858,13 +3858,6 @@ static int hvm_load_segment_selector(
     do {
         desc = *pdesc;
 
-        /* Segment present in memory? */
-        if ( !(desc.b & _SEGMENT_P) )
-        {
-            fault_type = TRAP_no_segment;
-            goto unmap_and_fail;
-        }
-
         /* LDT descriptor is a system segment. All others are code/data. */
         if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) )
             goto unmap_and_fail;
@@ -3909,6 +3902,14 @@ static int hvm_load_segment_selector(
                 goto unmap_and_fail;
             break;
         }
+
+        /* Segment present in memory? */
+        if ( !(desc.b & _SEGMENT_P) )
+        {
+            fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
+                                             : TRAP_stack_error;
+            goto unmap_and_fail;
+        }
     } while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
               writable && /* except if we are to discard writes */
               (cmpxchg(&pdesc->b, desc.b, desc.b | 0x100) != desc.b) );
@@ -3995,12 +3996,6 @@ void hvm_task_switch(
     if ( tr.attr.fields.g )
         tr.limit = (tr.limit << 12) | 0xfffu;
 
-    if ( !tr.attr.fields.p )
-    {
-        hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
-        goto out;
-    }
-
     if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
     {
         hvm_inject_hw_exception(
@@ -4009,6 +4004,12 @@ void hvm_task_switch(
         goto out;
     }
 
+    if ( !tr.attr.fields.p )
+    {
+        hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
+        goto out;
+    }
+
     if ( tr.limit < (sizeof(tss)-1) )
     {
         hvm_inject_hw_exception(TRAP_invalid_tss, tss_sel & 0xfff8);
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c 
b/xen/arch/x86/x86_emulate/x86_emulate.c
index 5f6be53..ad24d6a 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1176,7 +1176,7 @@ protmode_load_seg(
     struct { uint32_t a, b; } desc;
     uint8_t dpl, rpl;
     int cpl = get_cpl(ctxt, ops);
-    uint32_t new_desc_b, a_flag = 0x100;
+    uint32_t a_flag = 0x100;
     int rc, fault_type = EXC_GP;
 
     if ( cpl < 0 )
@@ -1217,13 +1217,6 @@ protmode_load_seg(
                          &desc, sizeof(desc), ctxt)) )
         return rc;
 
-    /* Segment present in memory? */
-    if ( !(desc.b & (1u<<15)) )
-    {
-        fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
-        goto raise_exn;
-    }
-
     if ( !is_x86_user_segment(seg) )
     {
         /* System segments must have S flag == 0. */
@@ -1258,7 +1251,11 @@ protmode_load_seg(
                /* Non-conforming segment: check RPL and DPL against CPL. */
                : rpl > cpl || dpl != cpl )
             goto raise_exn;
-        /* 64-bit code segments (L bit set) must have D bit clear. */
+        /*
+         * 64-bit code segments (L bit set) must have D bit clear.
+         * Experimentally in long mode, the L and D bits are checked before
+         * the Present bit.
+         */
         if ( in_longmode(ctxt, ops) &&
              (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
             goto raise_exn;
@@ -1275,7 +1272,8 @@ protmode_load_seg(
         /* LDT system segment? */
         if ( (desc.b & (15u<<8)) != (2u<<8) )
             goto raise_exn;
-        goto skip_accessed_flag;
+        a_flag = 0;
+        break;
     case x86_seg_tr:
         /* Available TSS system segment? */
         if ( (desc.b & (15u<<8)) != (9u<<8) )
@@ -1293,18 +1291,26 @@ protmode_load_seg(
         break;
     }
 
+    /* Segment present in memory? */
+    if ( !(desc.b & (1 << 15)) )
+    {
+        fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
+        goto raise_exn;
+    }
+
     /* Ensure Accessed flag is set. */
-    new_desc_b = desc.b | a_flag;
-    if ( !(desc.b & a_flag) &&
-         ((rc = ops->cmpxchg(
-             x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
-             &desc.b, &new_desc_b, 4, ctxt)) != 0) )
-        return rc;
+    if ( a_flag && !(desc.b & a_flag) )
+    {
+        uint32_t new_desc_b = desc.b | a_flag;
 
-    /* Force the Accessed flag in our local copy. */
-    desc.b |= a_flag;
+        if ( (rc = ops->cmpxchg(x86_seg_none, desctab.base + (sel & 0xfff8) + 
4,
+                                &desc.b, &new_desc_b, 4, ctxt)) != 0 )
+            return rc;
+
+        /* Force the Accessed flag in our local copy. */
+        desc.b = new_desc_b;
+    }
 
- skip_accessed_flag:
     sreg->base = (((desc.b <<  0) & 0xff000000u) |
                   ((desc.b << 16) & 0x00ff0000u) |
                   ((desc.a >> 16) & 0x0000ffffu));
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.6

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.