[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.2] x86: make new_guest_cr3() preemptible



commit a8f694905923e9ad4067d0fb63dbbcfb6e585bff
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu May 2 17:15:13 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu May 2 17:15:13 2013 +0200

    x86: make new_guest_cr3() preemptible
    
    ... as it may take significant amounts of time.
    
    This is part of CVE-2013-1918 / XSA-45.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
    master commit: e2e6b7b627fec0d7a769ab46441f2985ebccbf04
    master date: 2013-05-02 16:35:50 +0200
---
 xen/arch/x86/mm.c    |   82 +++++++++++++++++++++++++++++++++++++------------
 xen/arch/x86/traps.c |   15 ++++++++-
 2 files changed, 75 insertions(+), 22 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 2f05b60..d455d13 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2889,44 +2889,69 @@ int new_guest_cr3(unsigned long mfn)
 {
     struct vcpu *curr = current;
     struct domain *d = curr->domain;
-    int okay;
+    int rc;
     unsigned long old_base_mfn;
 
 #ifdef __x86_64__
     if ( is_pv_32on64_domain(d) )
     {
-        okay = paging_mode_refcounts(d)
-            ? 0 /* Old code was broken, but what should it be? */
-            : mod_l4_entry(
+        rc = paging_mode_refcounts(d)
+             ? -EINVAL /* Old code was broken, but what should it be? */
+             : mod_l4_entry(
                     __va(pagetable_get_paddr(curr->arch.guest_table)),
                     l4e_from_pfn(
                         mfn,
                         (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
-                    pagetable_get_pfn(curr->arch.guest_table), 0, 0, curr) == 
0;
-        if ( unlikely(!okay) )
+                    pagetable_get_pfn(curr->arch.guest_table), 0, 1, curr);
+        switch ( rc )
         {
+        case 0:
+            break;
+        case -EINTR:
+        case -EAGAIN:
+            return -EAGAIN;
+        default:
             MEM_LOG("Error while installing new compat baseptr %lx", mfn);
-            return 0;
+            return rc;
         }
 
         invalidate_shadow_ldt(curr, 0);
         write_ptbase(curr);
 
-        return 1;
+        return 0;
     }
 #endif
-    okay = paging_mode_refcounts(d)
-        ? get_page_from_pagenr(mfn, d)
-        : !get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 0);
-    if ( unlikely(!okay) )
+    rc = put_old_guest_table(curr);
+    if ( unlikely(rc) )
+        return rc;
+
+    old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
+    /*
+     * This is particularly important when getting restarted after the
+     * previous attempt got preempted in the put-old-MFN phase.
+     */
+    if ( old_base_mfn == mfn )
     {
-        MEM_LOG("Error while installing new baseptr %lx", mfn);
+        write_ptbase(curr);
         return 0;
     }
 
-    invalidate_shadow_ldt(curr, 0);
+    rc = paging_mode_refcounts(d)
+         ? (get_page_from_pagenr(mfn, d) ? 0 : -EINVAL)
+         : get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d, 0, 1);
+    switch ( rc )
+    {
+    case 0:
+        break;
+    case -EINTR:
+    case -EAGAIN:
+        return -EAGAIN;
+    default:
+        MEM_LOG("Error while installing new baseptr %lx", mfn);
+        return rc;
+    }
 
-    old_base_mfn = pagetable_get_pfn(curr->arch.guest_table);
+    invalidate_shadow_ldt(curr, 0);
 
     curr->arch.guest_table = pagetable_from_pfn(mfn);
     update_cr3(curr);
@@ -2935,13 +2960,25 @@ int new_guest_cr3(unsigned long mfn)
 
     if ( likely(old_base_mfn != 0) )
     {
+        struct page_info *page = mfn_to_page(old_base_mfn);
+
         if ( paging_mode_refcounts(d) )
-            put_page(mfn_to_page(old_base_mfn));
+            put_page(page);
         else
-            put_page_and_type(mfn_to_page(old_base_mfn));
+            switch ( rc = put_page_and_type_preemptible(page, 1) )
+            {
+            case -EINTR:
+                rc = -EAGAIN;
+            case -EAGAIN:
+                curr->arch.old_guest_table = page;
+                break;
+            default:
+                BUG_ON(rc);
+                break;
+            }
     }
 
-    return 1;
+    return rc;
 }
 
 static struct domain *get_pg_owner(domid_t domid)
@@ -3239,8 +3276,13 @@ long do_mmuext_op(
         }
 
         case MMUEXT_NEW_BASEPTR:
-            okay = (!paging_mode_translate(d)
-                    && new_guest_cr3(op.arg1.mfn));
+            if ( paging_mode_translate(d) )
+                okay = 0;
+            else
+            {
+                rc = new_guest_cr3(op.arg1.mfn);
+                okay = !rc;
+            }
             break;
 
         
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 692281a..eada470 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2407,12 +2407,23 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
 #endif
             }
             page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
-            rc = page ? new_guest_cr3(page_to_mfn(page)) : 0;
             if ( page )
+            {
+                rc = new_guest_cr3(page_to_mfn(page));
                 put_page(page);
+            }
+            else
+                rc = -EINVAL;
             domain_unlock(v->domain);
-            if ( rc == 0 ) /* not okay */
+            switch ( rc )
+            {
+            case 0:
+                break;
+            case -EAGAIN: /* retry after preemption */
+                goto skip;
+            default:      /* not okay */
                 goto fail;
+            }
             break;
         }
 
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.2

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.