[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 4/4] xen: rework paging_log_dirty_op to work with hvm guests



When the caller of paging_log_dirty_op is a hvm guest Xen would choke when
trying to copy the dirty bitmap to the guest because the paging lock is
already held.

Fix this by independently mapping each page of the guest bitmap as needed
without the paging lock held.

Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
Changes since v4:
 - Indent again label.
 - Replace bogus paddr_t cast with proper type.
 - Update preempt.log_dirty before dropping the paging lock.

Changes since v3:
 - Drop last parameter from map_dirty_bitmap.
 - Drop pointless initializers in paging_log_dirty_op.
 - Add a new field to paging_domain in order to copy i2 position.
 - Move the again case up to make sure we don't hold cached values of the
   contents of log_dirty.
 - Replace the BUG_ON in paging_log_dirty_op with an ASSERT.

Changes since v2:
 - Add checks for p2m_is_ram and p2m_is_discard_write when mapping a guest
   page.
 - Remove error checking from memset/memcpy, they unconditionally return
   dst.
---
 xen/arch/x86/mm/paging.c     | 102 ++++++++++++++++++++++++++++++++++++++-----
 xen/include/asm-x86/domain.h |   1 +
 2 files changed, 92 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 59d4720..aed68f2 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -408,6 +408,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
     return rv;
 }
 
+static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
+                                     unsigned long pages,
+                                     struct page_info **page)
+{
+    uint32_t pfec = PFEC_page_present | PFEC_write_access;
+    unsigned long gfn;
+    p2m_type_t p2mt;
+
+    gfn = paging_gva_to_gfn(current,
+                            (unsigned long)(dirty_bitmap.p + (pages >> 3)),
+                            &pfec);
+    if ( gfn == INVALID_GFN )
+        return NULL;
+
+    *page = get_page_from_gfn(current->domain, gfn, &p2mt, P2M_UNSHARE);
+
+    if ( !p2m_is_ram(p2mt) )
+    {
+        put_page(*page);
+        return NULL;
+    }
+    if ( p2m_is_paging(p2mt) )
+    {
+        put_page(*page);
+        p2m_mem_paging_populate(current->domain, gfn);
+        return NULL;
+    }
+    if ( p2m_is_shared(p2mt) || p2m_is_discard_write(p2mt) )
+    {
+        put_page(*page);
+        return NULL;
+    }
+
+    return __map_domain_page(*page);
+}
+
+static inline void unmap_dirty_bitmap(void *addr, struct page_info *page)
+{
+    if ( addr != NULL )
+    {
+        unmap_domain_page(addr);
+        put_page(page);
+    }
+}
+
 
 /* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN,
  * clear the bitmap and stats as well. */
@@ -420,6 +465,9 @@ static int paging_log_dirty_op(struct domain *d,
     mfn_t *l4 = NULL, *l3 = NULL, *l2 = NULL;
     unsigned long *l1 = NULL;
     int i4, i3, i2;
+    uint8_t *dirty_bitmap;
+    struct page_info *page;
+    unsigned long index_mapped;
 
     if ( !resuming )
     {
@@ -433,6 +481,14 @@ static int paging_log_dirty_op(struct domain *d,
         p2m_flush_hardware_cached_dirty(d);
     }
 
+    index_mapped = resuming ? d->arch.paging.preempt.log_dirty.done : 0;
+    dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, index_mapped, &page);
+    if ( dirty_bitmap == NULL )
+    {
+        domain_unpause(d);
+        return -EFAULT;
+    }
+
     paging_lock(d);
 
     if ( !d->arch.paging.preempt.dom )
@@ -455,6 +511,7 @@ static int paging_log_dirty_op(struct domain *d,
                  d->arch.paging.log_dirty.fault_count,
                  d->arch.paging.log_dirty.dirty_count);
 
+ again:
     sc->stats.fault_count = d->arch.paging.log_dirty.fault_count;
     sc->stats.dirty_count = d->arch.paging.log_dirty.dirty_count;
 
@@ -472,18 +529,18 @@ static int paging_log_dirty_op(struct domain *d,
     l4 = paging_map_log_dirty_bitmap(d);
     i4 = d->arch.paging.preempt.log_dirty.i4;
     i3 = d->arch.paging.preempt.log_dirty.i3;
+    i2 = d->arch.paging.preempt.log_dirty.i2;
     pages = d->arch.paging.preempt.log_dirty.done;
 
     for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
     {
         l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
-        for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
+        for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
+             i3++, i2 = 0 )
         {
             l2 = ((l3 && mfn_valid(l3[i3])) ?
                   map_domain_page(mfn_x(l3[i3])) : NULL);
-            for ( i2 = 0;
-                  (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
-                  i2++ )
+            for ( ; (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ )
             {
                 unsigned int bytes = PAGE_SIZE;
                 l1 = ((l2 && mfn_valid(l2[i2])) ?
@@ -492,15 +549,34 @@ static int paging_log_dirty_op(struct domain *d,
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
                 if ( likely(peek) )
                 {
-                    if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap,
-                                                    pages >> 3, (uint8_t *)l1,
-                                                    bytes)
-                             : clear_guest_offset(sc->dirty_bitmap,
-                                                  pages >> 3, bytes)) != 0 )
+                    if ( pages >> (3 + PAGE_SHIFT) !=
+                         index_mapped >> (3 + PAGE_SHIFT) )
                     {
-                        rv = -EFAULT;
-                        goto out;
+                        /* We need to map next page */
+                        d->arch.paging.preempt.log_dirty.i4 = i4;
+                        d->arch.paging.preempt.log_dirty.i3 = i3;
+                        d->arch.paging.preempt.log_dirty.i2 = i2;
+                        d->arch.paging.preempt.log_dirty.done = pages;
+                        paging_unlock(d);
+                        unmap_dirty_bitmap(dirty_bitmap, page);
+                        index_mapped = pages;
+                        dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, 
pages,
+                                                        &page);
+                        paging_lock(d);
+                        if ( dirty_bitmap == NULL )
+                        {
+                            rv = -EFAULT;
+                            goto out;
+                        }
+                        goto again;
                     }
+                    ASSERT(((pages >> 3) % PAGE_SIZE) + bytes <= PAGE_SIZE);
+                    if ( l1 )
+                        memcpy(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), l1,
+                               bytes);
+                    else
+                        memset(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), 0,
+                               bytes);
                 }
                 pages += bytes << 3;
                 if ( l1 )
@@ -517,6 +593,7 @@ static int paging_log_dirty_op(struct domain *d,
             {
                 d->arch.paging.preempt.log_dirty.i4 = i4;
                 d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
+                d->arch.paging.preempt.log_dirty.i2 = 0;
                 rv = -ERESTART;
                 break;
             }
@@ -529,6 +606,7 @@ static int paging_log_dirty_op(struct domain *d,
         {
             d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
             d->arch.paging.preempt.log_dirty.i3 = 0;
+            d->arch.paging.preempt.log_dirty.i2 = 0;
             rv = -ERESTART;
         }
         if ( rv )
@@ -570,12 +648,14 @@ static int paging_log_dirty_op(struct domain *d,
          * paging modes (shadow or hap).  Safe because the domain is paused. */
         d->arch.paging.log_dirty.clean_dirty_bitmap(d);
     }
+    unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
     return rv;
 
  out:
     d->arch.paging.preempt.dom = NULL;
     paging_unlock(d);
+    unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
 
     if ( l1 )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 3f83e8b..e364a2c 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -202,6 +202,7 @@ struct paging_domain {
                 unsigned long done:PADDR_BITS - PAGE_SHIFT;
                 unsigned long i4:PAGETABLE_ORDER;
                 unsigned long i3:PAGETABLE_ORDER;
+                unsigned long i2:PAGETABLE_ORDER;
             } log_dirty;
         };
     } preempt;
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.