[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] xen/arm: p2m: Add support for preemption in p2m_cache_flush_range



commit 221c2fc433904937966e40c6a2538650253b7204
Author:     Julien Grall <julien.grall@xxxxxxx>
AuthorDate: Mon Nov 26 16:12:01 2018 +0000
Commit:     Julien Grall <julien.grall@xxxxxxx>
CommitDate: Tue Dec 18 17:56:29 2018 +0000

    xen/arm: p2m: Add support for preemption in p2m_cache_flush_range
    
    p2m_cache_flush_range does not yet support preemption, this may be an
    issue as cleaning the cache can take a long time. While the current
    caller (XEN_DOMCTL_cacheflush) does not stricly require preemption, this
    will be necessary for new caller in a follow-up patch.
    
    The preemption implemented is quite simple, a counter is incremented by:
        - 1 on region skipped
        - 10 for each page requiring a flush
    
    When the counter reach 512 or above, we will check if preemption is
    needed. If not, the counter will be reset to 0. If yes, the function
    will stop, update start (to allow resuming later on) and return
    -ERESTART. This allows the caller to decide how the preemption will be
    done.
    
    For now, XEN_DOMCTL_cacheflush will continue to ignore the preemption.
    
    Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
    Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
 xen/arch/arm/domctl.c     |  8 +++++++-
 xen/arch/arm/p2m.c        | 35 ++++++++++++++++++++++++++++++++---
 xen/include/asm-arm/p2m.h |  4 +++-
 3 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index 20691528a6..9da88b8c64 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -54,6 +54,7 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain 
*d,
     {
         gfn_t s = _gfn(domctl->u.cacheflush.start_pfn);
         gfn_t e = gfn_add(s, domctl->u.cacheflush.nr_pfns);
+        int rc;
 
         if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
             return -EINVAL;
@@ -61,7 +62,12 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain 
*d,
         if ( gfn_x(e) < gfn_x(s) )
             return -EINVAL;
 
-        return p2m_cache_flush_range(d, s, e);
+        /* XXX: Handle preemption */
+        do
+            rc = p2m_cache_flush_range(d, &s, e);
+        while ( rc == -ERESTART );
+
+        return rc;
     }
     case XEN_DOMCTL_bind_pt_irq:
     {
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 17e2523fc1..ff52cb178f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1524,13 +1524,17 @@ int relinquish_p2m_mapping(struct domain *d)
     return rc;
 }
 
-int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end)
+int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     gfn_t next_block_gfn;
+    gfn_t start = *pstart;
     mfn_t mfn = INVALID_MFN;
     p2m_type_t t;
     unsigned int order;
+    int rc = 0;
+    /* Counter for preemption */
+    unsigned short count = 0;
 
     /*
      * The operation cache flush will invalidate the RAM assigned to the
@@ -1547,6 +1551,25 @@ int p2m_cache_flush_range(struct domain *d, gfn_t start, 
gfn_t end)
 
     while ( gfn_x(start) < gfn_x(end) )
     {
+       /*
+         * Cleaning the cache for the P2M may take a long time. So we
+         * need to be able to preempt. We will arbitrarily preempt every
+         * time count reach 512 or above.
+         *
+         * The count will be incremented by:
+         *  - 1 on region skipped
+         *  - 10 for each page requiring a flush
+         */
+        if ( count >= 512 )
+        {
+            if ( softirq_pending(smp_processor_id()) )
+            {
+                rc = -ERESTART;
+                break;
+            }
+            count = 0;
+        }
+
         /*
          * We want to flush page by page as:
          *  - it may not be possible to map the full block (can be up to 1GB)
@@ -1568,22 +1591,28 @@ int p2m_cache_flush_range(struct domain *d, gfn_t 
start, gfn_t end)
 
             if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
             {
+                count++;
                 start = next_block_gfn;
                 continue;
             }
         }
 
+        count += 10;
+
         flush_page_to_ram(mfn_x(mfn), false);
 
         start = gfn_add(start, 1);
         mfn = mfn_add(mfn, 1);
     }
 
-    invalidate_icache();
+    if ( rc != -ERESTART )
+        invalidate_icache();
 
     p2m_read_unlock(p2m);
 
-    return 0;
+    *pstart = start;
+
+    return rc;
 }
 
 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 7c1d930b1d..a633e27cc9 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -232,8 +232,10 @@ bool p2m_resolve_translation_fault(struct domain *d, gfn_t 
gfn);
 /*
  * Clean & invalidate caches corresponding to a region [start,end) of guest
  * address space.
+ *
+ * start will get updated if the function is preempted.
  */
-int p2m_cache_flush_range(struct domain *d, gfn_t start, gfn_t end);
+int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end);
 
 /*
  * Map a region in the guest p2m with a specific p2m type.
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.