[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] use clear_domain_page() instead of open coding it



commit 3980ccfb975974639254042b2b17d4d6bc02153e
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Oct 27 11:44:20 2015 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Oct 27 11:44:20 2015 +0100

    use clear_domain_page() instead of open coding it
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 xen/arch/x86/hvm/stdvga.c       |    7 ++-----
 xen/arch/x86/hvm/vmx/vvmx.c     |    8 +++-----
 xen/arch/x86/mm/p2m.c           |    9 ++++-----
 xen/arch/x86/mm/paging.c        |    8 +++-----
 xen/arch/x86/mm/shadow/common.c |    8 ++------
 xen/common/page_alloc.c         |   10 ++--------
 6 files changed, 16 insertions(+), 34 deletions(-)

diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index f50bff7..02a97f9 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -552,8 +552,7 @@ void stdvga_init(struct domain *d)
 {
     struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
     struct page_info *pg;
-    void *p;
-    int i;
+    unsigned int i;
 
     memset(s, 0, sizeof(*s));
     spin_lock_init(&s->lock);
@@ -564,9 +563,7 @@ void stdvga_init(struct domain *d)
         if ( pg == NULL )
             break;
         s->vram_page[i] = pg;
-        p = __map_domain_page(pg);
-        clear_page(p);
-        unmap_domain_page(p);
+        clear_domain_page(_mfn(page_to_mfn(pg)));
     }
 
     if ( i == ARRAY_SIZE(s->vram_page) )
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index cb6f9b8..3ac9cf9 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -68,7 +68,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
     if ( cpu_has_vmx_vmcs_shadowing )
     {
         struct page_info *vmread_bitmap, *vmwrite_bitmap;
-        unsigned long *vr, *vw;
+        unsigned long *vw;
 
         vmread_bitmap = alloc_domheap_page(NULL, 0);
         if ( !vmread_bitmap )
@@ -78,6 +78,8 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         }
         v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
 
+        clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
+
         vmwrite_bitmap = alloc_domheap_page(NULL, 0);
         if ( !vmwrite_bitmap )
         {
@@ -86,10 +88,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         }
         v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
 
-        vr = __map_domain_page(vmread_bitmap);
         vw = __map_domain_page(vmwrite_bitmap);
-
-        clear_page(vr);
         clear_page(vw);
 
         /*
@@ -101,7 +100,6 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         set_bit(IO_BITMAP_B, vw);
         set_bit(VMCS_HIGH(IO_BITMAP_B), vw);
 
-        unmap_domain_page(vr);
         unmap_domain_page(vw);
     }
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 1178832..7f68f24 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1907,7 +1907,7 @@ p2m_flush_table(struct p2m_domain *p2m)
 {
     struct page_info *top, *pg;
     struct domain *d = p2m->domain;
-    void *p;
+    mfn_t mfn;
 
     p2m_lock(p2m);
 
@@ -1928,15 +1928,14 @@ p2m_flush_table(struct p2m_domain *p2m)
     p2m->np2m_base = P2M_BASE_EADDR;
     
     /* Zap the top level of the trie */
-    top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
-    p = __map_domain_page(top);
-    clear_page(p);
-    unmap_domain_page(p);
+    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
+    clear_domain_page(mfn);
 
     /* Make sure nobody else is using this p2m table */
     nestedhvm_vmcx_flushtlb(p2m);
 
     /* Free the rest of the trie pages back to the paging pool */
+    top = mfn_to_page(mfn);
     while ( (pg = page_list_remove_head(&p2m->pages)) )
         if ( pg != top ) 
             d->arch.paging.free_page(d, pg);
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 42648df..e6f726d 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -79,12 +79,10 @@ static mfn_t paging_new_log_dirty_page(struct domain *d)
 static mfn_t paging_new_log_dirty_leaf(struct domain *d)
 {
     mfn_t mfn = paging_new_log_dirty_page(d);
+
     if ( mfn_valid(mfn) )
-    {
-        void *leaf = map_domain_page(mfn);
-        clear_page(leaf);
-        unmap_domain_page(leaf);
-    }
+        clear_domain_page(mfn);
+
     return mfn;
 }
 
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index fa70ad6..bad355b 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1407,8 +1407,7 @@ mfn_t shadow_alloc(struct domain *d,
     unsigned int pages = shadow_size(shadow_type);
     struct page_list_head tmp_list;
     cpumask_t mask;
-    void *p;
-    int i;
+    unsigned int i;
 
     ASSERT(paging_locked_by_me(d));
     ASSERT(shadow_type != SH_type_none);
@@ -1454,10 +1453,7 @@ mfn_t shadow_alloc(struct domain *d,
             flush_tlb_mask(&mask);
         }
         /* Now safe to clear the page for reuse */
-        p = __map_domain_page(sp);
-        ASSERT(p != NULL);
-        clear_page(p);
-        unmap_domain_page(p);
+        clear_domain_page(page_to_mfn(sp));
         INIT_PAGE_LIST_ENTRY(&sp->list);
         page_list_add(sp, &tmp_list);
         sp->u.sh.type = shadow_type;
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index abd5448..624a266 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1959,22 +1959,16 @@ __initcall(pagealloc_keyhandler_init);
 
 void scrub_one_page(struct page_info *pg)
 {
-    void *p;
-
     if ( unlikely(pg->count_info & PGC_broken) )
         return;
 
-    p = __map_domain_page(pg);
-
 #ifndef NDEBUG
     /* Avoid callers relying on allocations returning zeroed pages. */
-    memset(p, 0xc2, PAGE_SIZE);
+    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
 #else
     /* For a production build, clear_page() is the fastest way to scrub. */
-    clear_page(p);
+    clear_domain_page(_mfn(page_to_mfn(pg)));
 #endif
-
-    unmap_domain_page(p);
 }
 
 static void dump_heap(unsigned char key)
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.