[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86: cleanup after making various page table manipulation operations preemptible



commit b965b31a6bce8c37e67e525fae6da0e2f26d6b2e
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu May 2 17:04:14 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu May 2 17:04:14 2013 +0200

    x86: cleanup after making various page table manipulation operations 
preemptible
    
    This drops the "preemptible" parameters from various functions where
    now they can't (or shouldn't, validated by assertions) be run in non-
    preemptible mode anymore, to prove that manipulations of at least L3
    and L4 page tables and page table entries are now always preemptible,
    i.e. the earlier patches actually fulfill their purpose of fixing the
    resulting security issue.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/domain.c    |    2 +-
 xen/arch/x86/mm.c        |   98 ++++++++++++++++++++++++---------------------
 xen/include/asm-x86/mm.h |    9 +---
 3 files changed, 55 insertions(+), 54 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 0baaa95..db1e65d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1859,7 +1859,7 @@ static int relinquish_memory(
         }
 
         if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
-            ret = put_page_and_type_preemptible(page, 1);
+            ret = put_page_and_type_preemptible(page);
         switch ( ret )
         {
         case 0:
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index aef98d5..dd89079 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -945,7 +945,7 @@ get_page_from_l2e(
 define_get_linear_pagetable(l3);
 static int
 get_page_from_l3e(
-    l3_pgentry_t l3e, unsigned long pfn, struct domain *d, int partial, int 
preemptible)
+    l3_pgentry_t l3e, unsigned long pfn, struct domain *d, int partial)
 {
     int rc;
 
@@ -959,7 +959,7 @@ get_page_from_l3e(
     }
 
     rc = get_page_and_type_from_pagenr(
-        l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, preemptible);
+        l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, 1);
     if ( unlikely(rc == -EINVAL) && get_l3_linear_pagetable(l3e, pfn, d) )
         rc = 0;
 
@@ -969,7 +969,7 @@ get_page_from_l3e(
 define_get_linear_pagetable(l4);
 static int
 get_page_from_l4e(
-    l4_pgentry_t l4e, unsigned long pfn, struct domain *d, int partial, int 
preemptible)
+    l4_pgentry_t l4e, unsigned long pfn, struct domain *d, int partial)
 {
     int rc;
 
@@ -983,7 +983,7 @@ get_page_from_l4e(
     }
 
     rc = get_page_and_type_from_pagenr(
-        l4e_get_pfn(l4e), PGT_l3_page_table, d, partial, preemptible);
+        l4e_get_pfn(l4e), PGT_l3_page_table, d, partial, 1);
     if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, pfn, d) )
         rc = 0;
 
@@ -1122,8 +1122,10 @@ static int put_page_from_l2e(l2_pgentry_t l2e, unsigned 
long pfn)
 static int __put_page_type(struct page_info *, int preemptible);
 
 static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn,
-                             int partial, int preemptible)
+                             int partial, bool_t defer)
 {
+    struct page_info *pg;
+
     if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_pfn(l3e) == pfn) )
         return 1;
 
@@ -1140,40 +1142,44 @@ static int put_page_from_l3e(l3_pgentry_t l3e, unsigned 
long pfn,
         return 0;
     }
 
+    pg = l3e_get_page(l3e);
+
     if ( unlikely(partial > 0) )
     {
-        ASSERT(preemptible >= 0);
-        return __put_page_type(l3e_get_page(l3e), preemptible);
+        ASSERT(!defer);
+        return __put_page_type(pg, 1);
     }
 
-    if ( preemptible < 0 )
+    if ( defer )
     {
-        current->arch.old_guest_table = l3e_get_page(l3e);
+        current->arch.old_guest_table = pg;
         return 0;
     }
 
-    return put_page_and_type_preemptible(l3e_get_page(l3e), preemptible);
+    return put_page_and_type_preemptible(pg);
 }
 
 static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn,
-                             int partial, int preemptible)
+                             int partial, bool_t defer)
 {
     if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && 
          (l4e_get_pfn(l4e) != pfn) )
     {
+        struct page_info *pg = l4e_get_page(l4e);
+
         if ( unlikely(partial > 0) )
         {
-            ASSERT(preemptible >= 0);
-            return __put_page_type(l4e_get_page(l4e), preemptible);
+            ASSERT(!defer);
+            return __put_page_type(pg, 1);
         }
 
-        if ( preemptible < 0 )
+        if ( defer )
         {
-            current->arch.old_guest_table = l4e_get_page(l4e);
+            current->arch.old_guest_table = pg;
             return 0;
         }
 
-        return put_page_and_type_preemptible(l4e_get_page(l4e), preemptible);
+        return put_page_and_type_preemptible(pg);
     }
     return 1;
 }
@@ -1307,7 +1313,7 @@ static int alloc_l2_table(struct page_info *page, 
unsigned long type,
     return rc > 0 ? 0 : rc;
 }
 
-static int alloc_l3_table(struct page_info *page, int preemptible)
+static int alloc_l3_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     unsigned long  pfn = page_to_mfn(page);
@@ -1339,11 +1345,10 @@ static int alloc_l3_table(struct page_info *page, int 
preemptible)
                 rc = get_page_and_type_from_pagenr(l3e_get_pfn(pl3e[i]),
                                                    PGT_l2_page_table |
                                                    PGT_pae_xen_l2,
-                                                   d, partial, preemptible);
+                                                   d, partial, 1);
         }
         else if ( !is_guest_l3_slot(i) ||
-                  (rc = get_page_from_l3e(pl3e[i], pfn, d,
-                                          partial, preemptible)) > 0 )
+                  (rc = get_page_from_l3e(pl3e[i], pfn, d, partial)) > 0 )
             continue;
 
         if ( rc == -EAGAIN )
@@ -1403,7 +1408,7 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const 
struct domain *d)
         l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
 }
 
-static int alloc_l4_table(struct page_info *page, int preemptible)
+static int alloc_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     unsigned long  pfn = page_to_mfn(page);
@@ -1415,8 +1420,7 @@ static int alloc_l4_table(struct page_info *page, int 
preemptible)
           i++, partial = 0 )
     {
         if ( !is_guest_l4_slot(d, i) ||
-             (rc = get_page_from_l4e(pl4e[i], pfn, d,
-                                     partial, preemptible)) > 0 )
+             (rc = get_page_from_l4e(pl4e[i], pfn, d, partial)) > 0 )
             continue;
 
         if ( rc == -EAGAIN )
@@ -1503,7 +1507,7 @@ static int free_l2_table(struct page_info *page, int 
preemptible)
     return err;
 }
 
-static int free_l3_table(struct page_info *page, int preemptible)
+static int free_l3_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     unsigned long pfn = page_to_mfn(page);
@@ -1516,7 +1520,7 @@ static int free_l3_table(struct page_info *page, int 
preemptible)
     do {
         if ( is_guest_l3_slot(i) )
         {
-            rc = put_page_from_l3e(pl3e[i], pfn, partial, preemptible);
+            rc = put_page_from_l3e(pl3e[i], pfn, partial, 0);
             if ( rc < 0 )
                 break;
             partial = 0;
@@ -1542,7 +1546,7 @@ static int free_l3_table(struct page_info *page, int 
preemptible)
     return rc > 0 ? 0 : rc;
 }
 
-static int free_l4_table(struct page_info *page, int preemptible)
+static int free_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     unsigned long pfn = page_to_mfn(page);
@@ -1552,7 +1556,7 @@ static int free_l4_table(struct page_info *page, int 
preemptible)
 
     do {
         if ( is_guest_l4_slot(d, i) )
-            rc = put_page_from_l4e(pl4e[i], pfn, partial, preemptible);
+            rc = put_page_from_l4e(pl4e[i], pfn, partial, 0);
         if ( rc < 0 )
             break;
         partial = 0;
@@ -1811,7 +1815,6 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
                         l3_pgentry_t nl3e, 
                         unsigned long pfn,
                         int preserve_ad,
-                        int preemptible,
                         struct vcpu *vcpu)
 {
     l3_pgentry_t ol3e;
@@ -1851,7 +1854,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
             return rc ? 0 : -EFAULT;
         }
 
-        rc = get_page_from_l3e(nl3e, pfn, d, 0, preemptible);
+        rc = get_page_from_l3e(nl3e, pfn, d, 0);
         if ( unlikely(rc < 0) )
             return rc;
         rc = 0;
@@ -1874,7 +1877,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
         if ( !create_pae_xen_mappings(d, pl3e) )
             BUG();
 
-    put_page_from_l3e(ol3e, pfn, 0, -preemptible);
+    put_page_from_l3e(ol3e, pfn, 0, 1);
     return rc;
 }
 
@@ -1883,7 +1886,6 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
                         l4_pgentry_t nl4e, 
                         unsigned long pfn,
                         int preserve_ad,
-                        int preemptible,
                         struct vcpu *vcpu)
 {
     struct domain *d = vcpu->domain;
@@ -1916,7 +1918,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
             return rc ? 0 : -EFAULT;
         }
 
-        rc = get_page_from_l4e(nl4e, pfn, d, 0, preemptible);
+        rc = get_page_from_l4e(nl4e, pfn, d, 0);
         if ( unlikely(rc < 0) )
             return rc;
         rc = 0;
@@ -1935,7 +1937,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
         return -EFAULT;
     }
 
-    put_page_from_l4e(ol4e, pfn, 0, -preemptible);
+    put_page_from_l4e(ol4e, pfn, 0, 1);
     return rc;
 }
 
@@ -2055,10 +2057,12 @@ static int alloc_page_type(struct page_info *page, 
unsigned long type,
         rc = alloc_l2_table(page, type, preemptible);
         break;
     case PGT_l3_page_table:
-        rc = alloc_l3_table(page, preemptible);
+        ASSERT(preemptible);
+        rc = alloc_l3_table(page);
         break;
     case PGT_l4_page_table:
-        rc = alloc_l4_table(page, preemptible);
+        ASSERT(preemptible);
+        rc = alloc_l4_table(page);
         break;
     case PGT_seg_desc_page:
         rc = alloc_segdesc_page(page);
@@ -2148,10 +2152,12 @@ int free_page_type(struct page_info *page, unsigned 
long type,
         rc = free_l2_table(page, preemptible);
         break;
     case PGT_l3_page_table:
-        rc = free_l3_table(page, preemptible);
+        ASSERT(preemptible);
+        rc = free_l3_table(page);
         break;
     case PGT_l4_page_table:
-        rc = free_l4_table(page, preemptible);
+        ASSERT(preemptible);
+        rc = free_l4_table(page);
         break;
     default:
         MEM_LOG("type %lx pfn %lx\n", type, page_to_mfn(page));
@@ -2622,7 +2628,7 @@ static int put_old_guest_table(struct vcpu *v)
     if ( !v->arch.old_guest_table )
         return 0;
 
-    switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table, 1) )
+    switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table) )
     {
     case -EINTR:
     case -EAGAIN:
@@ -2656,7 +2662,7 @@ int vcpu_destroy_pagetables(struct vcpu *v)
         if ( paging_mode_refcounts(v->domain) )
             put_page(page);
         else
-            rc = put_page_and_type_preemptible(page, 1);
+            rc = put_page_and_type_preemptible(page);
     }
 
     if ( l4tab )
@@ -2677,7 +2683,7 @@ int vcpu_destroy_pagetables(struct vcpu *v)
             if ( paging_mode_refcounts(v->domain) )
                 put_page(page);
             else
-                rc = put_page_and_type_preemptible(page, 1);
+                rc = put_page_and_type_preemptible(page);
         }
         if ( !rc )
             v->arch.guest_table_user = pagetable_null();
@@ -2707,7 +2713,7 @@ int new_guest_cr3(unsigned long mfn)
                     l4e_from_pfn(
                         mfn,
                         (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
-                    gt_mfn, 0, 1, curr);
+                    gt_mfn, 0, curr);
         unmap_domain_page(pl4e);
         switch ( rc )
         {
@@ -2771,7 +2777,7 @@ int new_guest_cr3(unsigned long mfn)
         if ( paging_mode_refcounts(d) )
             put_page(page);
         else
-            switch ( rc = put_page_and_type_preemptible(page, 1) )
+            switch ( rc = put_page_and_type_preemptible(page) )
             {
             case -EINTR:
                 rc = -EAGAIN;
@@ -3055,7 +3061,7 @@ long do_mmuext_op(
                 break;
             }
 
-            switch ( rc = put_page_and_type_preemptible(page, 1) )
+            switch ( rc = put_page_and_type_preemptible(page) )
             {
             case -EINTR:
             case -EAGAIN:
@@ -3131,7 +3137,7 @@ long do_mmuext_op(
                 if ( paging_mode_refcounts(d) )
                     put_page(page);
                 else
-                    switch ( rc = put_page_and_type_preemptible(page, 1) )
+                    switch ( rc = put_page_and_type_preemptible(page) )
                     {
                     case -EINTR:
                         rc = -EAGAIN;
@@ -3606,11 +3612,11 @@ long do_mmu_update(
                     break;
                 case PGT_l3_page_table:
                     rc = mod_l3_entry(va, l3e_from_intpte(req.val), mfn,
-                                      cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
+                                      cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
                     break;
                 case PGT_l4_page_table:
                     rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
-                                      cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
+                                      cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
                 break;
                 case PGT_writable_page:
                     perfc_incr(writable_mmu_updates);
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 7a92737..6b367c0 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -364,15 +364,10 @@ static inline void put_page_and_type(struct page_info 
*page)
     put_page(page);
 }
 
-static inline int put_page_and_type_preemptible(struct page_info *page,
-                                                int preemptible)
+static inline int put_page_and_type_preemptible(struct page_info *page)
 {
-    int rc = 0;
+    int rc = put_page_type_preemptible(page);
 
-    if ( preemptible )
-        rc = put_page_type_preemptible(page);
-    else
-        put_page_type(page);
     if ( likely(rc == 0) )
         put_page(page);
     return rc;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.