[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] arch/arm: unmap partially-mapped memory regions



commit 4b25423aee137f33f136f539af2d85c7df2b7566
Author:     Arianna Avanzini <avanzini.arianna@xxxxxxxxx>
AuthorDate: Tue Sep 2 01:47:34 2014 +0200
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Wed Sep 3 12:49:37 2014 +0100

    arch/arm: unmap partially-mapped memory regions
    
    This commit modifies the function apply_p2m_changes() so that it
    destroys changes performed while mapping a memory region, if errors are
    seen during the operation. The implemented behaviour includes destroying
    only mappings created during the latest invocation of apply_p2m_changes().
    This is useful to avoid that memory areas remain partially accessible
    to guests.
    
    Signed-off-by: Arianna Avanzini <avanzini.arianna@xxxxxxxxx>
    Cc: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
    Cc: Paolo Valente <paolo.valente@xxxxxxxxxx>
    Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
    Cc: Julien Grall <julien.grall@xxxxxxxxxx>
    Cc: Ian Campbell <Ian.Campbell@xxxxxxxxxxxxx>
    Cc: Jan Beulich <jbeulich@xxxxxxxx>
    Cc: Keir Fraser <keir@xxxxxxx>
    Cc: Tim Deegan <tim@xxxxxxx>
    Cc: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
    Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Cc: Eric Trudeau <etrudeau@xxxxxxxxxxxx>
    Cc: Viktor Kleinik <viktor.kleinik@xxxxxxxxxxxxxxx>
    Cc: Andrii Tseglytskyi <andrii.tseglytskyi@xxxxxxxxxxxxxxx>
    Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/arch/arm/p2m.c |   41 ++++++++++++++++++++++++++++++-----------
 1 files changed, 30 insertions(+), 11 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 8f83d17..ede839d 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -440,6 +440,14 @@ static bool_t is_mapping_aligned(const paddr_t 
start_gpaddr,
 #define P2M_ONE_PROGRESS_NOP   0x1
 #define P2M_ONE_PROGRESS       0x10
 
+/* Helpers to lookup the properties of each level */
+static const paddr_t level_sizes[] =
+    { ZEROETH_SIZE, FIRST_SIZE, SECOND_SIZE, THIRD_SIZE };
+static const paddr_t level_masks[] =
+    { ZEROETH_MASK, FIRST_MASK, SECOND_MASK, THIRD_MASK };
+static const paddr_t level_shifts[] =
+    { ZEROETH_SHIFT, FIRST_SHIFT, SECOND_SHIFT, THIRD_SHIFT };
+
 /*
  * 0   == (P2M_ONE_DESCEND) continue to descend the tree
  * +ve == (P2M_ONE_PROGRESS_*) handled at this level, continue, flush,
@@ -460,13 +468,6 @@ static int apply_one_level(struct domain *d,
                            int mattr,
                            p2m_type_t t)
 {
-    /* Helpers to lookup the properties of each level */
-    const paddr_t level_sizes[] =
-        { ZEROETH_SIZE, FIRST_SIZE, SECOND_SIZE, THIRD_SIZE };
-    const paddr_t level_masks[] =
-        { ZEROETH_MASK, FIRST_MASK, SECOND_MASK, THIRD_MASK };
-    const paddr_t level_shifts[] =
-        { ZEROETH_SHIFT, FIRST_SHIFT, SECOND_SHIFT, THIRD_SHIFT };
     const paddr_t level_size = level_sizes[level];
     const paddr_t level_mask = level_masks[level];
     const paddr_t level_shift = level_shifts[level];
@@ -713,7 +714,8 @@ static int apply_p2m_changes(struct domain *d,
     int rc, ret;
     struct p2m_domain *p2m = &d->arch.p2m;
     lpae_t *first = NULL, *second = NULL, *third = NULL;
-    paddr_t addr;
+    paddr_t addr, orig_maddr = maddr;
+    unsigned int level = 0;
     unsigned long cur_first_page = ~0,
                   cur_first_offset = ~0,
                   cur_second_offset = ~0;
@@ -769,8 +771,9 @@ static int apply_p2m_changes(struct domain *d,
          * current hardware doesn't support super page mappings at
          * level 0 anyway */
 
+        level = 1;
         ret = apply_one_level(d, &first[first_table_offset(addr)],
-                              1, flush_pt, op,
+                              level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
                               mattr, t);
@@ -790,8 +793,9 @@ static int apply_p2m_changes(struct domain *d,
         }
         /* else: second already valid */
 
+        level = 2;
         ret = apply_one_level(d,&second[second_table_offset(addr)],
-                              2, flush_pt, op,
+                              level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
                               mattr, t);
@@ -809,8 +813,9 @@ static int apply_p2m_changes(struct domain *d,
             cur_second_offset = second_table_offset(addr);
         }
 
+        level = 3;
         ret = apply_one_level(d, &third[third_table_offset(addr)],
-                              3, flush_pt, op,
+                              level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
                               mattr, t);
@@ -844,6 +849,20 @@ out:
     if (third) unmap_domain_page(third);
     if (second) unmap_domain_page(second);
     if (first) unmap_domain_page(first);
+    if ( rc < 0 && ( op == INSERT || op == ALLOCATE ) &&
+         addr != start_gpaddr )
+    {
+        BUG_ON(addr == end_gpaddr);
+        /*
+         * addr keeps the address of the last successfully-inserted mapping,
+         * while apply_p2m_changes() considers an address range which is
+         * exclusive of end_gpaddr: add level_size to addr to obtain the
+         * right end of the range
+         */
+        apply_p2m_changes(d, REMOVE,
+                          start_gpaddr, addr + level_sizes[level], orig_maddr,
+                          mattr, p2m_invalid);
+    }
 
     spin_unlock(&p2m->lock);
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.