[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/arm: p2m: Re-implement p2m_cache_flush using p2m_get_entry



commit 15d77a68818ccda02a24209a092a63cc1c1ec9dc
Author:     Julien Grall <julien.grall@xxxxxxx>
AuthorDate: Thu Sep 15 12:28:30 2016 +0100
Commit:     Stefano Stabellini <sstabellini@xxxxxxxxxx>
CommitDate: Tue Sep 27 18:14:04 2016 -0700

    xen/arm: p2m: Re-implement p2m_cache_flush using p2m_get_entry
    
    The function p2m_cache_flush can be re-implemented using the generic
    function p2m_get_entry by iterating over the range and using the mapping
    order given by the callee.
    
    As the current implementation, no preemption is implemented, although
    the comment in the current code claimed it. As the function is called by
    a DOMCTL with a region of 1GB maximum, I think the preemption can be
    left unimplemented for now.
    
    Finally drop the operation CACHEFLUSH in apply_one_level as nobody is
    using it anymore. Note that the function could have been dropped in one
    go at the end, however I find easier to drop the operations one by one
    avoiding a big deletion in the patch that convert the last operation.
    
    Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
    Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
    Tested-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
---
 xen/arch/arm/p2m.c | 83 ++++++++++++++++++++++++++++++++----------------------
 1 file changed, 50 insertions(+), 33 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 1ff4d77..d544193 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -62,6 +62,22 @@ static inline void p2m_write_lock(struct p2m_domain *p2m)
     write_lock(&p2m->lock);
 }
 
+/*
+ * Return the start of the next mapping based on the order of the
+ * current one.
+ */
+static inline gfn_t gfn_next_boundary(gfn_t gfn, unsigned int order)
+{
+    /*
+     * The order corresponds to the order of the mapping (or invalid
+     * range) in the page table. So we need to align the GFN before
+     * incrementing.
+     */
+    gfn = _gfn(gfn_x(gfn) & ~((1UL << order) - 1));
+
+    return gfn_add(gfn, 1UL << order);
+}
+
 static void p2m_flush_tlb(struct p2m_domain *p2m);
 
 static inline void p2m_write_unlock(struct p2m_domain *p2m)
@@ -734,7 +750,6 @@ enum p2m_operation {
     INSERT,
     REMOVE,
     RELINQUISH,
-    CACHEFLUSH,
     MEMACCESS,
 };
 
@@ -993,36 +1008,6 @@ static int apply_one_level(struct domain *d,
          */
         return P2M_ONE_PROGRESS;
 
-    case CACHEFLUSH:
-        if ( !p2m_valid(orig_pte) )
-        {
-            *addr = (*addr + level_size) & level_mask;
-            return P2M_ONE_PROGRESS_NOP;
-        }
-
-        if ( level < 3 && p2m_table(orig_pte) )
-            return P2M_ONE_DESCEND;
-
-        /*
-         * could flush up to the next superpage boundary, but would
-         * need to be careful about preemption, so just do one 4K page
-         * now and return P2M_ONE_PROGRESS{,_NOP} so that the caller will
-         * continue to loop over the rest of the range.
-         */
-        if ( p2m_is_ram(orig_pte.p2m.type) )
-        {
-            unsigned long offset = paddr_to_pfn(*addr & ~level_mask);
-            flush_page_to_ram(orig_pte.p2m.base + offset);
-
-            *addr += PAGE_SIZE;
-            return P2M_ONE_PROGRESS;
-        }
-        else
-        {
-            *addr += PAGE_SIZE;
-            return P2M_ONE_PROGRESS_NOP;
-        }
-
     case MEMACCESS:
         if ( level < 3 )
         {
@@ -1571,12 +1556,44 @@ int p2m_cache_flush(struct domain *d, gfn_t start, 
unsigned long nr)
 {
     struct p2m_domain *p2m = &d->arch.p2m;
     gfn_t end = gfn_add(start, nr);
+    gfn_t next_gfn;
+    p2m_type_t t;
+    unsigned int order;
 
     start = gfn_max(start, p2m->lowest_mapped_gfn);
     end = gfn_min(end, p2m->max_mapped_gfn);
 
-    return apply_p2m_changes(d, CACHEFLUSH, start, nr, INVALID_MFN,
-                             0, p2m_invalid, d->arch.p2m.default_access);
+    /*
+     * The operation cache flush will invalidate the RAM assigned to the
+     * guest in a given range. It will not modify the page table and
+     * flushing the cache whilst the page is used by another CPU is
+     * fine. So using read-lock is fine here.
+     */
+    p2m_read_lock(p2m);
+
+    for ( ; gfn_x(start) < gfn_x(end); start = next_gfn )
+    {
+        mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order);
+
+        next_gfn = gfn_next_boundary(start, order);
+
+        /* Skip hole and non-RAM page */
+        if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_ram(t) )
+            continue;
+
+        /* XXX: Implement preemption */
+        while ( gfn_x(start) < gfn_x(next_gfn) )
+        {
+            flush_page_to_ram(mfn_x(mfn));
+
+            start = gfn_add(start, 1);
+            mfn = mfn_add(mfn, 1);
+        }
+    }
+
+    p2m_read_unlock(p2m);
+
+    return 0;
 }
 
 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.