[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/arm: p2m: Remove translation table when it's empty



commit de5162b664ea3dea07a0b77b523413c2ce479f2a
Author:     Julien Grall <julien.grall@xxxxxxxxxx>
AuthorDate: Tue Dec 1 17:52:12 2015 +0000
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Tue Dec 15 11:59:04 2015 +0000

    xen/arm: p2m: Remove translation table when it's empty
    
    Currently, the translation table is left in place even if no entries
    are in use. Because of how the p2m code has been implemented,
    replacing a translation table by a block (i.e superpage) is not
    supported. Therefore, any remapping of a superpage size will be split
    in smaller chunks making the translation less efficient.
    
    Replacing a table by a block when a new mapping is added would be too
    complicated because it requires us to check if all the upper levels
    are not in use and free them if necessary.
    
    Instead, we will remove the empty translation table when mappings are
    removed. To avoid going through all the table checking if no entry is
    in use, a counter representing the number of entry currently in use is
    kept per table translation and updated when an entry changes state
    (i.e valid <-> invalid).
    
    As Xen allocates a page for each translation table, it's possible to
    store the counter in the struct page_info. A new field p2m_refcount
    has been introduced in the in use union for this purpose. This is fine
    as the page is only used by the P2M code and nobody touches the other
    field of the union type_info.
    
    For the record, type_info has not been used because it would require
    more work to use it properly as Xen on ARM doesn't yet have the
    concept of type.
    
    Once Xen has finished removing a mapping and all the references to
    each translation table have been updated, then the higher levels will
    be processed and freed as needed. This will allow us to propagate the
    number of references and free multiple translation table at different
    level in one go.
    
    Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
    Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    [ ijc -- updated commit message as discussed ]
---
 xen/arch/arm/p2m.c       |   65 ++++++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/mm.h |    6 ++++
 2 files changed, 71 insertions(+), 0 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index ae0acf0..2190908 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -427,6 +427,8 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
 
              write_pte(&p[i], pte);
          }
+
+         page->u.inuse.p2m_refcount = LPAE_ENTRIES;
     }
     else
         clear_page(p);
@@ -936,6 +938,20 @@ static int apply_one_level(struct domain *d,
     BUG(); /* Should never get here */
 }
 
+/*
+ * The page is only used by the P2M code which is protected by the p2m->lock.
+ * So we can avoid to use atomic helpers.
+ */
+static void update_reference_mapping(struct page_info *page,
+                                     lpae_t old_entry,
+                                     lpae_t new_entry)
+{
+    if ( p2m_valid(old_entry) && !p2m_valid(new_entry) )
+        page->u.inuse.p2m_refcount--;
+    else if ( !p2m_valid(old_entry) && p2m_valid(new_entry) )
+        page->u.inuse.p2m_refcount++;
+}
+
 static int apply_p2m_changes(struct domain *d,
                      enum p2m_operation op,
                      paddr_t start_gpaddr,
@@ -961,6 +977,8 @@ static int apply_p2m_changes(struct domain *d,
     const bool_t preempt = !is_idle_vcpu(current);
     bool_t flush = false;
     bool_t flush_pt;
+    PAGE_LIST_HEAD(free_pages);
+    struct page_info *pg;
 
     /* Some IOMMU don't support coherent PT walk. When the p2m is
      * shared with the CPU, Xen has to make sure that the PT changes have
@@ -1070,6 +1088,7 @@ static int apply_p2m_changes(struct domain *d,
         {
             unsigned offset = offsets[level];
             lpae_t *entry = &mappings[level][offset];
+            lpae_t old_entry = *entry;
 
             ret = apply_one_level(d, entry,
                                   level, flush_pt, op,
@@ -1078,6 +1097,10 @@ static int apply_p2m_changes(struct domain *d,
                                   mattr, t, a);
             if ( ret < 0 ) { rc = ret ; goto out; }
             count += ret;
+
+            if ( ret != P2M_ONE_PROGRESS_NOP )
+                update_reference_mapping(pages[level], old_entry, *entry);
+
             /* L3 had better have done something! We cannot descend any 
further */
             BUG_ON(level == 3 && ret == P2M_ONE_DESCEND);
             if ( ret != P2M_ONE_DESCEND ) break;
@@ -1099,6 +1122,45 @@ static int apply_p2m_changes(struct domain *d,
             }
             /* else: next level already valid */
         }
+
+        BUG_ON(level > 3);
+
+        if ( op == REMOVE )
+        {
+            for ( ; level > P2M_ROOT_LEVEL; level-- )
+            {
+                lpae_t old_entry;
+                lpae_t *entry;
+                unsigned int offset;
+
+                pg = pages[level];
+
+                /*
+                 * No need to try the previous level if the current one
+                 * still contains some mappings.
+                 */
+                if ( pg->u.inuse.p2m_refcount )
+                    break;
+
+                offset = offsets[level - 1];
+                entry = &mappings[level - 1][offset];
+                old_entry = *entry;
+
+                page_list_del(pg, &p2m->pages);
+
+                p2m_remove_pte(entry, flush_pt);
+
+                p2m->stats.mappings[level - 1]--;
+                update_reference_mapping(pages[level - 1], old_entry, *entry);
+
+                /*
+                 * We can't free the page now because it may be present
+                 * in the guest TLB. Queue it and free it after the TLB
+                 * has been flushed.
+                 */
+                page_list_add(pg, &free_pages);
+            }
+        }
     }
 
     if ( op == ALLOCATE || op == INSERT )
@@ -1116,6 +1178,9 @@ out:
         iommu_iotlb_flush(d, sgfn, egfn - sgfn);
     }
 
+    while ( (pg = page_list_remove_head(&free_pages)) )
+        free_domheap_page(pg);
+
     if ( rc < 0 && ( op == INSERT || op == ALLOCATE ) &&
          addr != start_gpaddr )
     {
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index a95082e..1427163 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -33,6 +33,12 @@ struct page_info
         struct {
             /* Type reference count and various PGT_xxx flags and fields. */
             unsigned long type_info;
+            /*
+             * Reference count for page table used in the P2M code.
+             * The counter is protected by the p2m->lock of the
+             * associated domain.
+             */
+            unsigned long p2m_refcount;
         } inuse;
         /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
         struct {
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.