[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/hvm: Fix mapping corner case during task switching



commit a9a2a761f75126d908612c64fabe6adde2b6d2b9
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Wed Aug 1 13:48:33 2018 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Mon Sep 3 19:02:36 2018 +0100

    x86/hvm: Fix mapping corner case during task switching
    
    hvm_map_entry() can fail for a number of reasons, including for a misaligned
    LDT/GDT access which crosses a 4K boundary.  Architecturally speaking, this
    should be fixed, but Long Mode doesn't support task switches, and no 32bit 
OS
    is going to misalign its LDT/GDT base, which is why this task isn't very 
high
    on the TODO list.
    
    However, the hvm_map_fail error label returns failure without raising an
    exception, which interferes with hvm_task_switch()'s exception tracking, and
    can cause it to finish and return to guest context as if the task switch had
    completed successfully.
    
    Resolve this corner case by folding all the failure paths together, which
    causes an hvm_map_entry() failure to result in #TS[SEL].  hvm_unmap_entry()
    copes fine with a NULL pointer so can be called unconditionally.
    
    In practice, this is just a latent corner case as all hvm_map_entry() 
failures
    crash the domain, but it should be fixed nevertheless.
    
    Finally, rename hvm_load_segment_selector() to task_switch_load_seg() to 
avoid
    giving the impression that it is usable for general segment loading.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c | 51 +++++++++++++++++++++++++-------------------------
 1 file changed, 25 insertions(+), 26 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ac067a8d38..c22bf0bbb6 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2695,11 +2695,11 @@ static void hvm_unmap_entry(void *p)
     hvm_unmap_guest_frame(p, 0);
 }
 
-static int hvm_load_segment_selector(
+static int task_switch_load_seg(
     enum x86_segment seg, uint16_t sel, unsigned int cpl, unsigned int eflags)
 {
     struct segment_register desctab, segr;
-    struct desc_struct *pdesc, desc;
+    struct desc_struct *pdesc = NULL, desc;
     u8 dpl, rpl;
     bool_t writable;
     int fault_type = TRAP_invalid_tss;
@@ -2719,7 +2719,7 @@ static int hvm_load_segment_selector(
     if ( (sel & 0xfffc) == 0 )
     {
         if ( (seg == x86_seg_cs) || (seg == x86_seg_ss) )
-            goto fail;
+            goto fault;
         memset(&segr, 0, sizeof(segr));
         segr.sel = sel;
         hvm_set_segment_register(v, seg, &segr);
@@ -2728,29 +2728,29 @@ static int hvm_load_segment_selector(
 
     /* LDT descriptor must be in the GDT. */
     if ( (seg == x86_seg_ldtr) && (sel & 4) )
-        goto fail;
+        goto fault;
 
     hvm_get_segment_register(
         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
 
     /* Segment not valid for use (cooked meaning of .p)? */
     if ( !desctab.p )
-        goto fail;
+        goto fault;
 
     /* Check against descriptor table limit. */
     if ( ((sel & 0xfff8) + 7) > desctab.limit )
-        goto fail;
+        goto fault;
 
     pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8), &writable);
     if ( pdesc == NULL )
-        goto hvm_map_fail;
+        goto fault;
 
     do {
         desc = *pdesc;
 
         /* LDT descriptor is a system segment. All others are code/data. */
         if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) )
-            goto unmap_and_fail;
+            goto fault;
 
         dpl = (desc.b >> 13) & 3;
         rpl = sel & 3;
@@ -2760,27 +2760,27 @@ static int hvm_load_segment_selector(
         case x86_seg_cs:
             /* Code segment? */
             if ( !(desc.b & _SEGMENT_CODE) )
-                goto unmap_and_fail;
+                goto fault;
             /* Non-conforming segment: check DPL against RPL. */
             if ( !(desc.b & _SEGMENT_EC) && (dpl != rpl) )
-                goto unmap_and_fail;
+                goto fault;
             break;
         case x86_seg_ss:
             /* Writable data segment? */
             if ( (desc.b & (_SEGMENT_CODE|_SEGMENT_WR)) != _SEGMENT_WR )
-                goto unmap_and_fail;
+                goto fault;
             if ( (dpl != cpl) || (dpl != rpl) )
-                goto unmap_and_fail;
+                goto fault;
             break;
         case x86_seg_ldtr:
             /* LDT system segment? */
             if ( (desc.b & _SEGMENT_TYPE) != (2u<<8) )
-                goto unmap_and_fail;
+                goto fault;
             goto skip_accessed_flag;
         default:
             /* Readable code or data segment? */
             if ( (desc.b & (_SEGMENT_CODE|_SEGMENT_WR)) == _SEGMENT_CODE )
-                goto unmap_and_fail;
+                goto fault;
             /*
              * Data or non-conforming code segment:
              * check DPL against RPL and CPL.
@@ -2788,7 +2788,7 @@ static int hvm_load_segment_selector(
             if ( ((desc.b & (_SEGMENT_EC|_SEGMENT_CODE)) !=
                   (_SEGMENT_EC|_SEGMENT_CODE))
                  && ((dpl < cpl) || (dpl < rpl)) )
-                goto unmap_and_fail;
+                goto fault;
             break;
         }
 
@@ -2797,7 +2797,7 @@ static int hvm_load_segment_selector(
         {
             fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
                                              : TRAP_stack_error;
-            goto unmap_and_fail;
+            goto fault;
         }
     } while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
               writable && /* except if we are to discard writes */
@@ -2822,11 +2822,10 @@ static int hvm_load_segment_selector(
 
     return 0;
 
- unmap_and_fail:
+ fault:
     hvm_unmap_entry(pdesc);
- fail:
     hvm_inject_hw_exception(fault_type, sel & 0xfffc);
- hvm_map_fail:
+
     return 1;
 }
 
@@ -2999,7 +2998,7 @@ void hvm_task_switch(
 
     new_cpl = tss.eflags & X86_EFLAGS_VM ? 3 : tss.cs & 3;
 
-    if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, new_cpl, 0) )
+    if ( task_switch_load_seg(x86_seg_ldtr, tss.ldt, new_cpl, 0) )
         goto out;
 
     rc = hvm_set_cr3(tss.cr3, 1);
@@ -3020,12 +3019,12 @@ void hvm_task_switch(
     regs->rdi    = tss.edi;
 
     exn_raised = 0;
-    if ( hvm_load_segment_selector(x86_seg_es, tss.es, new_cpl, tss.eflags) ||
-         hvm_load_segment_selector(x86_seg_cs, tss.cs, new_cpl, tss.eflags) ||
-         hvm_load_segment_selector(x86_seg_ss, tss.ss, new_cpl, tss.eflags) ||
-         hvm_load_segment_selector(x86_seg_ds, tss.ds, new_cpl, tss.eflags) ||
-         hvm_load_segment_selector(x86_seg_fs, tss.fs, new_cpl, tss.eflags) ||
-         hvm_load_segment_selector(x86_seg_gs, tss.gs, new_cpl, tss.eflags) )
+    if ( task_switch_load_seg(x86_seg_es, tss.es, new_cpl, tss.eflags) ||
+         task_switch_load_seg(x86_seg_cs, tss.cs, new_cpl, tss.eflags) ||
+         task_switch_load_seg(x86_seg_ss, tss.ss, new_cpl, tss.eflags) ||
+         task_switch_load_seg(x86_seg_ds, tss.ds, new_cpl, tss.eflags) ||
+         task_switch_load_seg(x86_seg_fs, tss.fs, new_cpl, tss.eflags) ||
+         task_switch_load_seg(x86_seg_gs, tss.gs, new_cpl, tss.eflags) )
         exn_raised = 1;
 
     if ( taskswitch_reason == TSW_call_or_int )
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.