[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/HVM: drop __hvm_clear()



Rather than almost entirely duplicating __hvm_copy(), have __hvm_copy()
clear guest memory if the source pointer is NULL.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3212,7 +3212,10 @@ static enum hvm_copy_result __hvm_copy(
             }
             else
             {
-                memcpy(p, buf, count);
+                if ( buf )
+                    memcpy(p, buf, count);
+                else
+                    memset(p, 0, count);
                 paging_mark_dirty(curr->domain, page_to_mfn(page));
             }
         }
@@ -3224,95 +3227,8 @@ static enum hvm_copy_result __hvm_copy(
         unmap_domain_page(p);
 
         addr += count;
-        buf  += count;
-        todo -= count;
-        put_page(page);
-    }
-
-    return HVMCOPY_okay;
-}
-
-static enum hvm_copy_result __hvm_clear(paddr_t addr, int size)
-{
-    struct vcpu *curr = current;
-    unsigned long gfn;
-    struct page_info *page;
-    p2m_type_t p2mt;
-    char *p;
-    int count, todo = size;
-    uint32_t pfec = PFEC_page_present | PFEC_write_access;
-
-    /*
-     * XXX Disable for 4.1.0: PV-on-HVM drivers will do grant-table ops
-     * such as query_size. Grant-table code currently does copy_to/from_guest
-     * accesses under the big per-domain lock, which this test would disallow.
-     * The test is not needed until we implement sleeping-on-waitqueue when
-     * we access a paged-out frame, and that's post 4.1.0 now.
-     */
-#if 0
-    /*
-     * If the required guest memory is paged out, this function may sleep.
-     * Hence we bail immediately if called from atomic context.
-     */
-    if ( in_atomic() )
-        return HVMCOPY_unhandleable;
-#endif
-
-    while ( todo > 0 )
-    {
-        count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
-
-        gfn = paging_gva_to_gfn(curr, addr, &pfec);
-        if ( gfn == gfn_x(INVALID_GFN) )
-        {
-            if ( pfec & PFEC_page_paged )
-                return HVMCOPY_gfn_paged_out;
-            if ( pfec & PFEC_page_shared )
-                return HVMCOPY_gfn_shared;
-            return HVMCOPY_bad_gva_to_gfn;
-        }
-
-        page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
-
-        if ( !page )
-            return HVMCOPY_bad_gfn_to_mfn;
-
-        if ( p2m_is_paging(p2mt) )
-        {
-            put_page(page);
-            p2m_mem_paging_populate(curr->domain, gfn);
-            return HVMCOPY_gfn_paged_out;
-        }
-        if ( p2m_is_shared(p2mt) )
-        {
-            put_page(page);
-            return HVMCOPY_gfn_shared;
-        }
-        if ( p2m_is_grant(p2mt) )
-        {
-            put_page(page);
-            return HVMCOPY_unhandleable;
-        }
-
-        p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
-
-        if ( p2m_is_discard_write(p2mt) )
-        {
-            static unsigned long lastpage;
-            if ( xchg(&lastpage, gfn) != gfn )
-                gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
-                        " memory page. gfn=%#lx, mfn=%#lx\n",
-                         gfn, page_to_mfn(page));
-        }
-        else
-        {
-            memset(p, 0x00, count);
-            paging_mark_dirty(curr->domain, page_to_mfn(page));
-        }
-
-        unmap_domain_page(p);
-
-        addr += count;
+        if ( buf )
+            buf  += count;
         todo -= count;
         put_page(page);
     }
@@ -3387,7 +3303,7 @@ unsigned long clear_user_hvm(void *to, u
         return 0;
     }
 
-    rc = __hvm_clear((unsigned long)to, len);
+    rc = hvm_copy_to_guest_linear((unsigned long)to, NULL, len, 0, NULL);
     return rc ? len : 0; /* fake a copy_to_user() return code */
 }
 



Attachment: x86-HVM-drop-clear.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.