[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/4] xen: introduce new function update_dirty_bitmap



Introduce log_dirty new function update_dirty_bitmap, which will only update the
log dirty bitmap, but won't clear the EPT page dirty bit. The function is used
by live migration peek round with EPT D bit supported.

Set correct p2m type when EPT dirty bit supported.

Signed-off-by: Xudong Hao <xudong.hao@xxxxxxxxx>
Signed-off-by: Haitao Shan<haitao.shan@xxxxxxxxx>
---
 xen/arch/x86/mm/hap/hap.c       |   32 +++++++++++++++++++++++++++-----
 xen/arch/x86/mm/p2m-pt.c        |    1 +
 xen/arch/x86/mm/p2m.c           |    9 +++++++++
 xen/arch/x86/mm/paging.c        |   32 +++++++++++++++++++-------------
 xen/arch/x86/mm/shadow/common.c |    2 +-
 xen/include/asm-x86/domain.h    |    1 +
 xen/include/asm-x86/hap.h       |    3 +++
 xen/include/asm-x86/p2m.h       |    5 ++++-
 xen/include/asm-x86/paging.h    |    3 ++-
 9 files changed, 67 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 8790c58..2a4cf1d 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -76,6 +76,7 @@ static int hap_enable_vram_tracking(struct domain *d)
     p2m_change_type_range(d, dirty_vram->begin_pfn, dirty_vram->end_pfn, 
                           p2m_ram_rw, p2m_ram_logdirty);
 
+    /* A TLB flush is needed no matter whether hap dirty bit is supported */
     flush_tlb_mask(d->domain_dirty_cpumask);
     return 0;
 }
@@ -83,19 +84,31 @@ static int hap_enable_vram_tracking(struct domain *d)
 static int hap_disable_vram_tracking(struct domain *d)
 {
     struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+    p2m_type_t p2mt = p2m_ram_rw;
 
     if ( !dirty_vram )
         return -EINVAL;
 
+    /* With hap dirty bit, p2m type cannot be changed from p2m_ram_logdirty
+     * to p2m_ram_rw when first fault is met. Actually, there is no such
+     * fault occurred.
+     */
+    if ( hap_has_dirty_bit )
+        p2mt = p2m_ram_logdirty;
+
     paging_lock(d);
     d->arch.paging.mode &= ~PG_log_dirty;
     paging_unlock(d);
 
     /* set l1e entries of P2M table with normal mode */
     p2m_change_type_range(d, dirty_vram->begin_pfn, dirty_vram->end_pfn, 
-                          p2m_ram_logdirty, p2m_ram_rw);
+                          p2mt, p2m_ram_rw);
 
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    /* With hap dirty bit, we actually did not change HW sensitive bits
+     * of the P2M tables.
+     */
+    if ( !hap_has_dirty_bit )
+        flush_tlb_mask(d->domain_dirty_cpumask);
     return 0;
 }
 
@@ -117,7 +130,7 @@ static void hap_vram_tracking_init(struct domain *d)
 {
     paging_log_dirty_init(d, hap_enable_vram_tracking,
                           hap_disable_vram_tracking,
-                          hap_clean_vram_tracking);
+                          hap_clean_vram_tracking, NULL);
 }
 
 int hap_track_dirty_vram(struct domain *d,
@@ -220,8 +233,16 @@ static int hap_disable_log_dirty(struct domain *d)
 
 static void hap_clean_dirty_bitmap(struct domain *d)
 {
+    p2m_type_t p2mt = (hap_has_dirty_bit)? p2m_ram_logdirty : p2m_ram_rw;
     /* set l1e entries of P2M table to be read-only. */
-    p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
+    p2m_change_entry_type_global(d, p2mt, p2m_ram_logdirty);
+    flush_tlb_mask(d->domain_dirty_cpumask);
+}
+
+static void hap_update_dirty_bitmap(struct domain *d)
+{
+    /* find out dirty page by walking EPT table and update dirty bitmap. */
+    p2m_query_entry_global(d, WALK_EPT_D);
     flush_tlb_mask(d->domain_dirty_cpumask);
 }
 
@@ -238,7 +259,8 @@ void hap_logdirty_init(struct domain *d)
     /* Reinitialize logdirty mechanism */
     paging_log_dirty_init(d, hap_enable_log_dirty,
                           hap_disable_log_dirty,
-                          hap_clean_dirty_bitmap);
+                          hap_clean_dirty_bitmap,
+                          hap_update_dirty_bitmap);
 }
 
 /************************************************/
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index c97cac4..7334167 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -1141,6 +1141,7 @@ void p2m_pt_init(struct p2m_domain *p2m)
     p2m->set_entry = p2m_set_entry;
     p2m->get_entry = p2m_gfn_to_mfn;
     p2m->change_entry_type_global = p2m_change_type_global;
+    p2m->query_entry_global = NULL;
     p2m->write_p2m_entry = paging_write_p2m_entry;
 #if P2M_AUDIT
     p2m->audit_p2m = p2m_pt_audit_p2m;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0a796f3..26b1ca7 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -148,6 +148,15 @@ void p2m_change_entry_type_global(struct domain *d,
     p2m_unlock(p2m);
 }
 
+void p2m_query_entry_global(struct domain *d, int mask)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    ASSERT(p2m);
+    p2m_lock(p2m);
+    p2m->query_entry_global(p2m, mask);
+    p2m_unlock(p2m);
+}
+
 mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
                     p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
                     unsigned int *page_order, bool_t locked)
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index ca879f9..12ee552 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -335,9 +335,24 @@ int paging_log_dirty_op(struct domain *d, struct 
xen_domctl_shadow_op *sc)
     int i4, i3, i2;
 
     domain_pause(d);
-    paging_lock(d);
 
     clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
+    if ( clean )
+    {
+        /* We need to further call clean_dirty_bitmap() functions of specific
+         * paging modes (shadow or hap).  Safe because the domain is paused.
+         * And this call must be made before actually transferring the dirty
+         * bitmap since with HW hap dirty bit support, dirty bitmap is
+         * produced by hooking on this call. */
+        d->arch.paging.log_dirty.clean_dirty_bitmap(d);
+    }
+
+    if ( peek && d->arch.paging.log_dirty.update_dirty_bitmap)
+    {
+        d->arch.paging.log_dirty.update_dirty_bitmap(d);
+    }
+
+    paging_lock(d);
 
     PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n",
                  (clean) ? "clean" : "peek",
@@ -420,17 +435,6 @@ int paging_log_dirty_op(struct domain *d, struct 
xen_domctl_shadow_op *sc)
     if ( pages < sc->pages )
         sc->pages = pages;
 
-    paging_unlock(d);
-
-    if ( clean )
-    {
-        /* We need to further call clean_dirty_bitmap() functions of specific
-         * paging modes (shadow or hap).  Safe because the domain is paused. */
-        d->arch.paging.log_dirty.clean_dirty_bitmap(d);
-    }
-    domain_unpause(d);
-    return rv;
-
  out:
     paging_unlock(d);
     domain_unpause(d);
@@ -600,11 +604,13 @@ int paging_log_dirty_range(struct domain *d,
 void paging_log_dirty_init(struct domain *d,
                            int    (*enable_log_dirty)(struct domain *d),
                            int    (*disable_log_dirty)(struct domain *d),
-                           void   (*clean_dirty_bitmap)(struct domain *d))
+                           void   (*clean_dirty_bitmap)(struct domain *d),
+                           void   (*update_dirty_bitmap)(struct domain *d))
 {
     d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
     d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
     d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
+    d->arch.paging.log_dirty.update_dirty_bitmap = update_dirty_bitmap;
 }
 
 /* This function fress log dirty bitmap resources. */
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index dc245be..f4e7566 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -50,7 +50,7 @@ void shadow_domain_init(struct domain *d, unsigned int 
domcr_flags)
 
     /* Use shadow pagetables for log-dirty support */
     paging_log_dirty_init(d, shadow_enable_log_dirty, 
-                          shadow_disable_log_dirty, shadow_clean_dirty_bitmap);
+                          shadow_disable_log_dirty, shadow_clean_dirty_bitmap, 
NULL);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     d->arch.paging.shadow.oos_active = 0;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index aecee68..22cd0f9 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -179,6 +179,7 @@ struct log_dirty_domain {
     int            (*enable_log_dirty   )(struct domain *d);
     int            (*disable_log_dirty  )(struct domain *d);
     void           (*clean_dirty_bitmap )(struct domain *d);
+    void           (*update_dirty_bitmap )(struct domain *d);
 };
 
 struct paging_domain {
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index bd5d732..fb43b27 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -31,6 +31,9 @@
 #define HAP_ERROR(_f, _a...)                                          \
     printk("hap error: %s(): " _f, __func__, ##_a)
 
+#define WALK_EPT_UNUSED    0
+#define WALK_EPT_D         2
+
 /************************************************/
 /*          hap domain page mapping             */
 /************************************************/
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 63bc7cf..21ed9a5 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -249,7 +249,7 @@ struct p2m_domain {
     void               (*change_entry_type_global)(struct p2m_domain *p2m,
                                                    p2m_type_t ot,
                                                    p2m_type_t nt);
-    
+    void               (*query_entry_global)(struct p2m_domain *p2m, int mask);
     void               (*write_p2m_entry)(struct p2m_domain *p2m,
                                           unsigned long gfn, l1_pgentry_t *p,
                                           mfn_t table_mfn, l1_pgentry_t new,
@@ -506,6 +506,9 @@ int guest_physmap_mark_populate_on_demand(struct domain *d, 
unsigned long gfn,
 void p2m_change_entry_type_global(struct domain *d, 
                                   p2m_type_t ot, p2m_type_t nt);
 
+/* Query across all p2m entries in a domain */
+void p2m_query_entry_global(struct domain *d, int mask);
+
 /* Change types across a range of p2m entries (start ... end-1) */
 void p2m_change_type_range(struct domain *d, 
                            unsigned long start, unsigned long end,
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index c432a97..9d95e51 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -160,7 +160,8 @@ int paging_log_dirty_disable(struct domain *d);
 void paging_log_dirty_init(struct domain *d,
                            int  (*enable_log_dirty)(struct domain *d),
                            int  (*disable_log_dirty)(struct domain *d),
-                           void (*clean_dirty_bitmap)(struct domain *d));
+                           void (*clean_dirty_bitmap)(struct domain *d),
+                           void (*update_dirty_bitmap)(struct domain *d));
 
 /* mark a page as dirty */
 void paging_mark_dirty(struct domain *d, unsigned long guest_mfn);
-- 
1.5.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.