[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] x86/paging: Rename paging_mark_pfn_dirty() and use pfn_t



paging_mark_gfn_dirty() actually takes a pfn, even by paramter name.  Rename
the function and alter the type to pfn_t to match.

Push pfn_t into the LOGDIRTY_IDX() macros, and clean up a couple of local
variable types in paging_mark_pfn_dirty().

Leave an explicit comment in vmx_vcpu_flush_pml_buffer() when we intentally
perform a straight conversion from gfn to pfn.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmcs.c  |  4 +++-
 xen/arch/x86/mm/paging.c     | 26 +++++++++++++-------------
 xen/include/asm-x86/paging.h | 10 +++++-----
 3 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 0995496..5db5fea 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1484,7 +1484,9 @@ void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
          * is extremely difficult to debug.
          */
         p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
-        paging_mark_gfn_dirty(v->domain, gfn);
+
+        /* HVM guest: pfn == gfn */
+        paging_mark_pfn_dirty(v->domain, _pfn(gfn));
     }
 
     unmap_domain_page(pml_buf);
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 3a66098..0f3d885 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -265,25 +265,25 @@ static int paging_log_dirty_disable(struct domain *d, 
bool_t resuming)
 }
 
 /* Mark a page as dirty, with taking guest pfn as parameter */
-void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn)
+void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
 {
-    int changed;
+    bool changed;
     mfn_t mfn, *l4, *l3, *l2;
     unsigned long *l1;
-    int i1, i2, i3, i4;
+    unsigned int i1, i2, i3, i4;
 
     if ( !paging_mode_log_dirty(d) )
         return;
 
     /* Shared MFNs should NEVER be marked dirty */
-    BUG_ON(SHARED_M2P(pfn));
+    BUG_ON(SHARED_M2P(pfn_x(pfn)));
 
     /*
      * Values with the MSB set denote MFNs that aren't really part of the
      * domain's pseudo-physical memory map (e.g., the shared info frame).
      * Nothing to do here...
      */
-    if ( unlikely(!VALID_M2P(pfn)) )
+    if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
         return;
 
     i1 = L1_LOGDIRTY_IDX(pfn);
@@ -331,8 +331,8 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long 
pfn)
     if ( changed )
     {
         PAGING_DEBUG(LOGDIRTY,
-                     "marked mfn %" PRI_mfn " (pfn=%lx), dom %d\n",
-                     mfn_x(mfn), pfn, d->domain_id);
+                     "marked mfn %" PRI_mfn " (pfn %" PRI_pfn "), dom %d\n",
+                     mfn_x(mfn), pfn_x(pfn), d->domain_id);
         d->arch.paging.log_dirty.dirty_count++;
     }
 
@@ -345,23 +345,23 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned 
long pfn)
 /* Mark a page as dirty */
 void paging_mark_dirty(struct domain *d, mfn_t gmfn)
 {
-    unsigned long pfn;
+    pfn_t pfn;
 
     if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
          page_get_owner(mfn_to_page(gmfn)) != d )
         return;
 
     /* We /really/ mean PFN here, even for non-translated guests. */
-    pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+    pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
 
-    paging_mark_gfn_dirty(d, pfn);
+    paging_mark_pfn_dirty(d, pfn);
 }
 
 
 /* Is this guest page dirty? */
 int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
 {
-    unsigned long pfn;
+    pfn_t pfn;
     mfn_t mfn, *l4, *l3, *l2;
     unsigned long *l1;
     int rv;
@@ -370,9 +370,9 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
     ASSERT(paging_mode_log_dirty(d));
 
     /* We /really/ mean PFN here, even for non-translated guests. */
-    pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+    pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
     /* Shared pages are always read-only; invalid pages can't be dirty. */
-    if ( unlikely(SHARED_M2P(pfn) || !VALID_M2P(pfn)) )
+    if ( unlikely(SHARED_M2P(pfn_x(pfn)) || !VALID_M2P(pfn_x(pfn))) )
         return 0;
 
     mfn = d->arch.paging.log_dirty.top;
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 63e3867..cec6bfd 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -159,7 +159,7 @@ void paging_log_dirty_init(struct domain *d,
 /* mark a page as dirty */
 void paging_mark_dirty(struct domain *d, mfn_t gmfn);
 /* mark a page as dirty with taking guest pfn as parameter */
-void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn);
+void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn);
 
 /* is this guest page dirty? 
  * This is called from inside paging code, with the paging lock held. */
@@ -175,12 +175,12 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn);
  * TODO2: Abstract out the radix-tree mechanics?
  */
 #define LOGDIRTY_NODE_ENTRIES (1 << PAGETABLE_ORDER)
-#define L1_LOGDIRTY_IDX(pfn) ((pfn) & ((1 << (PAGE_SHIFT+3)) - 1))
-#define L2_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3)) & \
+#define L1_LOGDIRTY_IDX(pfn) (pfn_x(pfn) & ((1 << (PAGE_SHIFT + 3)) - 1))
+#define L2_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3)) & \
                               (LOGDIRTY_NODE_ENTRIES-1))
-#define L3_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3+PAGETABLE_ORDER)) & \
+#define L3_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + 
PAGETABLE_ORDER)) & \
                               (LOGDIRTY_NODE_ENTRIES-1))
-#define L4_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3+PAGETABLE_ORDER*2)) & \
+#define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER 
* 2)) & \
                               (LOGDIRTY_NODE_ENTRIES-1))
 
 /* VRAM dirty tracking support */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.