[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] xen/arm: p2m: Rework p2m_cache_flush_range



commit 8d6e1fe4a71f4048ba770b8312f25ba3adb67d12
Author:     Julien Grall <julien.grall@xxxxxxx>
AuthorDate: Thu Nov 29 19:02:09 2018 +0000
Commit:     Julien Grall <julien.grall@xxxxxxx>
CommitDate: Wed Dec 12 16:09:22 2018 +0000

    xen/arm: p2m: Rework p2m_cache_flush_range
    
    A follow-up patch will add support for preemption in p2m_cache_flush_range.
    Because of the complexity for the 2 loops, it would be necessary to add
    preemption in both of them.
    
    This can be avoided by merging the 2 loops together and still keeping
    the code fairly simple to read and extend.
    
    Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
    Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
 xen/arch/arm/p2m.c | 47 ++++++++++++++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 15 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index c713226561..17e2523fc1 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1527,7 +1527,8 @@ int relinquish_p2m_mapping(struct domain *d)
 int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    gfn_t next_gfn;
+    gfn_t next_block_gfn;
+    mfn_t mfn = INVALID_MFN;
     p2m_type_t t;
     unsigned int order;
 
@@ -1542,24 +1543,40 @@ int p2m_cache_flush_range(struct domain *d, gfn_t 
start, gfn_t end)
     start = gfn_max(start, p2m->lowest_mapped_gfn);
     end = gfn_min(end, p2m->max_mapped_gfn);
 
-    for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
-    {
-        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
-
-        next_gfn = gfn_next_boundary(start, order);
+    next_block_gfn = start;
 
-        /* Skip hole and non-RAM page */
-        if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
-            continue;
-
-        /* XXX: Implement preemption */
-        while ( gfn_x(start) < gfn_x(next_gfn) )
+    while ( gfn_x(start) < gfn_x(end) )
+    {
+        /*
+         * We want to flush page by page as:
+         *  - it may not be possible to map the full block (can be up to 1GB)
+         *    in Xen memory
+         *  - we may want to do fine grain preemption as flushing multiple
+         *    page in one go may take a long time
+         *
+         * As p2m_get_entry is able to return the size of the mapping
+         * in the p2m, it is pointless to execute it for each page.
+         *
+         * We can optimize it by tracking the gfn of the next
+         * block. So we will only call p2m_get_entry for each block (can
+         * be up to 1GB).
+         */
+        if ( gfn_eq(start, next_block_gfn) )
         {
-            flush_page_to_ram(mfn_x(mfn), false);
+            mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
+            next_block_gfn = gfn_next_boundary(start, order);
 
-            start = gfn_add(start, 1);
-            mfn = mfn_add(mfn, 1);
+            if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
+            {
+                start = next_block_gfn;
+                continue;
+            }
         }
+
+        flush_page_to_ram(mfn_x(mfn), false);
+
+        start = gfn_add(start, 1);
+        mfn = mfn_add(mfn, 1);
     }
 
     invalidate_icache();
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.