[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] Revert "x86: rework paging_log_dirty_op to work with hvm guests"



commit 0d7a599afff0665c74f328f6af85e556688d7908
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon May 18 12:34:44 2015 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon May 18 12:34:44 2015 +0200

    Revert "x86: rework paging_log_dirty_op to work with hvm guests"
    
    This reverts commit a809eeea06d20b115d78f12e473502bcb6209844, as it
    breaks PV log dirty mode handling.
---
 xen/arch/x86/mm/paging.c     |   97 +++++-------------------------------------
 xen/include/asm-x86/domain.h |    1 -
 2 files changed, 11 insertions(+), 87 deletions(-)

diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 5eee88c..59d4720 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -408,51 +408,6 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
     return rv;
 }
 
-static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
-                                     unsigned long pages,
-                                     struct page_info **page)
-{
-    uint32_t pfec = PFEC_page_present | PFEC_write_access;
-    unsigned long gfn;
-    p2m_type_t p2mt;
-
-    gfn = paging_gva_to_gfn(current,
-                            (unsigned long)(dirty_bitmap.p + (pages >> 3)),
-                            &pfec);
-    if ( gfn == INVALID_GFN )
-        return NULL;
-
-    *page = get_page_from_gfn(current->domain, gfn, &p2mt, P2M_UNSHARE);
-
-    if ( !p2m_is_ram(p2mt) )
-    {
-        put_page(*page);
-        return NULL;
-    }
-    if ( p2m_is_paging(p2mt) )
-    {
-        put_page(*page);
-        p2m_mem_paging_populate(current->domain, gfn);
-        return NULL;
-    }
-    if ( p2m_is_shared(p2mt) || p2m_is_discard_write(p2mt) )
-    {
-        put_page(*page);
-        return NULL;
-    }
-
-    return __map_domain_page(*page);
-}
-
-static inline void unmap_dirty_bitmap(void *addr, struct page_info *page)
-{
-    if ( addr != NULL )
-    {
-        unmap_domain_page(addr);
-        put_page(page);
-    }
-}
-
 
 /* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN,
  * clear the bitmap and stats as well. */
@@ -465,11 +420,7 @@ static int paging_log_dirty_op(struct domain *d,
     mfn_t *l4 = NULL, *l3 = NULL, *l2 = NULL;
     unsigned long *l1 = NULL;
     int i4, i3, i2;
-    uint8_t *dirty_bitmap;
-    struct page_info *page;
-    unsigned long index_mapped;
 
- again:
     if ( !resuming )
     {
         domain_pause(d);
@@ -482,14 +433,6 @@ static int paging_log_dirty_op(struct domain *d,
         p2m_flush_hardware_cached_dirty(d);
     }
 
-    index_mapped = resuming ? d->arch.paging.preempt.log_dirty.done : 0;
-    dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, index_mapped, &page);
-    if ( dirty_bitmap == NULL )
-    {
-        domain_unpause(d);
-        return -EFAULT;
-    }
-
     paging_lock(d);
 
     if ( !d->arch.paging.preempt.dom )
@@ -529,18 +472,18 @@ static int paging_log_dirty_op(struct domain *d,
     l4 = paging_map_log_dirty_bitmap(d);
     i4 = d->arch.paging.preempt.log_dirty.i4;
     i3 = d->arch.paging.preempt.log_dirty.i3;
-    i2 = d->arch.paging.preempt.log_dirty.i2;
     pages = d->arch.paging.preempt.log_dirty.done;
 
     for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
     {
         l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
-        for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
-             i3++, i2 = 0 )
+        for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
         {
             l2 = ((l3 && mfn_valid(l3[i3])) ?
                   map_domain_page(mfn_x(l3[i3])) : NULL);
-            for ( ; (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ )
+            for ( i2 = 0;
+                  (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
+                  i2++ )
             {
                 unsigned int bytes = PAGE_SIZE;
                 l1 = ((l2 && mfn_valid(l2[i2])) ?
@@ -549,28 +492,15 @@ static int paging_log_dirty_op(struct domain *d,
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
                 if ( likely(peek) )
                 {
-                    if ( pages >> (3 + PAGE_SHIFT) !=
-                         index_mapped >> (3 + PAGE_SHIFT) )
+                    if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap,
+                                                    pages >> 3, (uint8_t *)l1,
+                                                    bytes)
+                             : clear_guest_offset(sc->dirty_bitmap,
+                                                  pages >> 3, bytes)) != 0 )
                     {
-                        /* We need to map next page */
-                        d->arch.paging.preempt.log_dirty.i4 = i4;
-                        d->arch.paging.preempt.log_dirty.i3 = i3;
-                        d->arch.paging.preempt.log_dirty.i2 = i2;
-                        d->arch.paging.preempt.log_dirty.done = pages;
-                        d->arch.paging.preempt.dom = current->domain;
-                        d->arch.paging.preempt.op = sc->op;
-                        resuming = 1;
-                        paging_unlock(d);
-                        unmap_dirty_bitmap(dirty_bitmap, page);
-                        goto again;
+                        rv = -EFAULT;
+                        goto out;
                     }
-                    ASSERT(((pages >> 3) % PAGE_SIZE) + bytes <= PAGE_SIZE);
-                    if ( l1 )
-                        memcpy(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), l1,
-                               bytes);
-                    else
-                        memset(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), 0,
-                               bytes);
                 }
                 pages += bytes << 3;
                 if ( l1 )
@@ -587,7 +517,6 @@ static int paging_log_dirty_op(struct domain *d,
             {
                 d->arch.paging.preempt.log_dirty.i4 = i4;
                 d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
-                d->arch.paging.preempt.log_dirty.i2 = 0;
                 rv = -ERESTART;
                 break;
             }
@@ -600,7 +529,6 @@ static int paging_log_dirty_op(struct domain *d,
         {
             d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
             d->arch.paging.preempt.log_dirty.i3 = 0;
-            d->arch.paging.preempt.log_dirty.i2 = 0;
             rv = -ERESTART;
         }
         if ( rv )
@@ -630,7 +558,6 @@ static int paging_log_dirty_op(struct domain *d,
     if ( rv )
     {
         /* Never leave the domain paused on real errors. */
-        unmap_dirty_bitmap(dirty_bitmap, page);
         ASSERT(rv == -ERESTART);
         return rv;
     }
@@ -643,14 +570,12 @@ static int paging_log_dirty_op(struct domain *d,
          * paging modes (shadow or hap).  Safe because the domain is paused. */
         d->arch.paging.log_dirty.clean_dirty_bitmap(d);
     }
-    unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
     return rv;
 
  out:
     d->arch.paging.preempt.dom = NULL;
     paging_unlock(d);
-    unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
 
     if ( l1 )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index f4375c2..45b5283 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -204,7 +204,6 @@ struct paging_domain {
                 unsigned long done:PADDR_BITS - PAGE_SHIFT;
                 unsigned long i4:PAGETABLE_ORDER;
                 unsigned long i3:PAGETABLE_ORDER;
-                unsigned long i2:PAGETABLE_ORDER;
             } log_dirty;
         };
     } preempt;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.