[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] x86/paging: Update paging_mark_dirty() to use mfn_t



Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>

The one use of paging_mark_dirty() in common/tmem shows that TMEM currently
wont compile for ARM.

I considered introducing a common prototype in include/xen/paging.h which can
be overriden by include/asm/paging.h, which would also allow the removal of
gnttab_mark_dirty() which seems to exist only to stub out other common uses.

If this is considered a good idea, I'd prefer to submit a separate patch than
to merge it into this one.
---
 xen/arch/x86/debug.c              |  2 +-
 xen/arch/x86/hvm/hvm.c            | 12 ++++++------
 xen/arch/x86/hvm/ioreq.c          |  2 +-
 xen/arch/x86/mm.c                 | 16 ++++++++--------
 xen/arch/x86/mm/guest_walk.c      |  8 ++++----
 xen/arch/x86/mm/mem_sharing.c     |  2 +-
 xen/arch/x86/mm/p2m-pod.c         |  2 +-
 xen/arch/x86/mm/paging.c          |  5 +----
 xen/arch/x86/mm/shadow/common.c   |  6 +++---
 xen/arch/x86/mm/shadow/multi.c    |  2 +-
 xen/common/tmem_xen.c             |  2 +-
 xen/include/asm-x86/grant_table.h |  2 +-
 xen/include/asm-x86/paging.h      |  2 +-
 13 files changed, 30 insertions(+), 33 deletions(-)

diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index 3030022..259b8c4 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -181,7 +181,7 @@ unsigned int dbg_rw_guest_mem(struct domain *dp, void * 
__user gaddr,
         if ( toaddr )
         {
             copy_from_user(va, buf, pagecnt);    /* va = buf */
-            paging_mark_dirty(dp, mfn_x(mfn));
+            paging_mark_dirty(dp, mfn);
         }
         else
         {
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 61f5029..a589b17 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1923,7 +1923,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
          */
         if ( npfec.write_access )
         {
-            paging_mark_dirty(currd, mfn_x(mfn));
+            paging_mark_dirty(currd, mfn);
             /*
              * If p2m is really an altp2m, unlock here to avoid lock ordering
              * violation when the change below is propagated from host p2m.
@@ -2613,7 +2613,7 @@ static void *_hvm_map_guest_frame(unsigned long gfn, 
bool_t permanent,
         if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
         else if ( !permanent )
-            paging_mark_dirty(d, page_to_mfn(page));
+            paging_mark_dirty(d, _mfn(page_to_mfn(page)));
     }
 
     if ( !permanent )
@@ -2676,7 +2676,7 @@ void hvm_unmap_guest_frame(void *p, bool_t permanent)
         list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
             if ( track->page == page )
             {
-                paging_mark_dirty(d, mfn);
+                paging_mark_dirty(d, _mfn(mfn));
                 list_del(&track->list);
                 xfree(track);
                 break;
@@ -2693,7 +2693,7 @@ void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
 
     spin_lock(&d->arch.hvm_domain.write_map.lock);
     list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
-        paging_mark_dirty(d, page_to_mfn(track->page));
+        paging_mark_dirty(d, _mfn(page_to_mfn(track->page)));
     spin_unlock(&d->arch.hvm_domain.write_map.lock);
 }
 
@@ -3211,7 +3211,7 @@ static enum hvm_copy_result __hvm_copy(
                     memcpy(p, buf, count);
                 else
                     memset(p, 0, count);
-                paging_mark_dirty(curr->domain, page_to_mfn(page));
+                paging_mark_dirty(curr->domain, _mfn(page_to_mfn(page)));
             }
         }
         else
@@ -5799,7 +5799,7 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
             if ( page )
             {
-                paging_mark_dirty(d, page_to_mfn(page));
+                paging_mark_dirty(d, _mfn(page_to_mfn(page)));
                 /* These are most probably not page tables any more */
                 /* don't take a long time and don't die either */
                 sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 88071ab..e1123dc 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -282,7 +282,7 @@ static int hvm_add_ioreq_gmfn(
     rc = guest_physmap_add_page(d, _gfn(iorp->gmfn),
                                 _mfn(page_to_mfn(iorp->page)), 0);
     if ( rc == 0 )
-        paging_mark_dirty(d, page_to_mfn(iorp->page));
+        paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
 
     return rc;
 }
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c5dd6f2..24a5211 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2251,7 +2251,7 @@ static int alloc_page_type(struct page_info *page, 
unsigned long type,
 
     /* A page table is dirtied when its type count becomes non-zero. */
     if ( likely(owner != NULL) )
-        paging_mark_dirty(owner, page_to_mfn(page));
+        paging_mark_dirty(owner, _mfn(page_to_mfn(page)));
 
     switch ( type & PGT_type_mask )
     {
@@ -2325,7 +2325,7 @@ int free_page_type(struct page_info *page, unsigned long 
type,
     if ( likely(owner != NULL) && unlikely(paging_mode_enabled(owner)) )
     {
         /* A page table is dirtied when its type count becomes zero. */
-        paging_mark_dirty(owner, page_to_mfn(page));
+        paging_mark_dirty(owner, _mfn(page_to_mfn(page)));
 
         if ( shadow_mode_refcounts(owner) )
             return 0;
@@ -3247,7 +3247,7 @@ long do_mmuext_op(
                 goto pin_drop;
 
             /* A page is dirtied when its pin status is set. */
-            paging_mark_dirty(pg_owner, page_to_mfn(page));
+            paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
 
             /* We can race domain destruction (domain_relinquish_resources). */
             if ( unlikely(pg_owner != d) )
@@ -3307,7 +3307,7 @@ long do_mmuext_op(
             put_page(page);
 
             /* A page is dirtied when its pin status is cleared. */
-            paging_mark_dirty(pg_owner, page_to_mfn(page));
+            paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
 
             break;
         }
@@ -3516,7 +3516,7 @@ long do_mmuext_op(
             }
 
             /* A page is dirtied when it's being cleared. */
-            paging_mark_dirty(pg_owner, page_to_mfn(page));
+            paging_mark_dirty(pg_owner, _mfn(page_to_mfn(page)));
 
             clear_domain_page(_mfn(page_to_mfn(page)));
 
@@ -3551,7 +3551,7 @@ long do_mmuext_op(
             }
 
             /* A page is dirtied when it's being copied to. */
-            paging_mark_dirty(pg_owner, page_to_mfn(dst_page));
+            paging_mark_dirty(pg_owner, _mfn(page_to_mfn(dst_page)));
 
             copy_domain_page(_mfn(page_to_mfn(dst_page)),
                              _mfn(page_to_mfn(src_page)));
@@ -3894,7 +3894,7 @@ long do_mmu_update(
 
             set_gpfn_from_mfn(mfn, gpfn);
 
-            paging_mark_dirty(pg_owner, mfn);
+            paging_mark_dirty(pg_owner, _mfn(mfn));
 
             put_page(mfn_to_page(mfn));
             break;
@@ -4700,7 +4700,7 @@ long do_update_descriptor(u64 pa, u64 desc)
         break;
     }
 
-    paging_mark_dirty(dom, mfn);
+    paging_mark_dirty(dom, _mfn(mfn));
 
     /* All is good so make the update. */
     gdt_pent = map_domain_page(_mfn(mfn));
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 868e909..250a2b3 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -396,21 +396,21 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     {
 #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */
         if ( set_ad_bits(l4p + guest_l4_table_offset(va), &gw->l4e, 0) )
-            paging_mark_dirty(d, mfn_x(gw->l4mfn));
+            paging_mark_dirty(d, gw->l4mfn);
         if ( set_ad_bits(l3p + guest_l3_table_offset(va), &gw->l3e,
                          (pse1G && (pfec & PFEC_write_access))) )
-            paging_mark_dirty(d, mfn_x(gw->l3mfn));
+            paging_mark_dirty(d, gw->l3mfn);
 #endif
         if ( !pse1G ) 
         {
             if ( set_ad_bits(l2p + guest_l2_table_offset(va), &gw->l2e,
                              (pse2M && (pfec & PFEC_write_access))) )
-                paging_mark_dirty(d, mfn_x(gw->l2mfn));            
+                paging_mark_dirty(d, gw->l2mfn);
             if ( !pse2M ) 
             {
                 if ( set_ad_bits(l1p + guest_l1_table_offset(va), &gw->l1e, 
                                  (pfec & PFEC_write_access)) )
-                    paging_mark_dirty(d, mfn_x(gw->l1mfn));
+                    paging_mark_dirty(d, gw->l1mfn);
             }
         }
     }
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 25ff6a6..db7f389 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1210,7 +1210,7 @@ int __mem_sharing_unshare_page(struct domain *d,
 
     /* Now that the gfn<->mfn map is properly established,
      * marking dirty is feasible */
-    paging_mark_dirty(d, mfn_x(page_to_mfn(page)));
+    paging_mark_dirty(d, page_to_mfn(page));
     /* We do not need to unlock a private page */
     put_gfn(d, gfn);
     return 0;
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 149f529..367ee00 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1112,7 +1112,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned 
long gfn,
     for( i = 0; i < (1UL << order); i++ )
     {
         set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_aligned + i);
-        paging_mark_dirty(d, mfn_x(mfn) + i);
+        paging_mark_dirty(d, mfn_add(mfn, i));
     }
     
     p2m->pod.entry_count -= (1 << order);
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 4437611..3a66098 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -343,12 +343,9 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long 
pfn)
 }
 
 /* Mark a page as dirty */
-void paging_mark_dirty(struct domain *d, unsigned long guest_mfn)
+void paging_mark_dirty(struct domain *d, mfn_t gmfn)
 {
     unsigned long pfn;
-    mfn_t gmfn;
-
-    gmfn = _mfn(guest_mfn);
 
     if ( !paging_mode_log_dirty(d) || !mfn_valid(gmfn) ||
          page_get_owner(mfn_to_page(gmfn)) != d )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 0ba4153..126dfa8 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -999,7 +999,7 @@ sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void 
*entry, u32 size)
     int result = 0;
     struct page_info *page = mfn_to_page(gmfn);
 
-    paging_mark_dirty(v->domain, mfn_x(gmfn));
+    paging_mark_dirty(v->domain, gmfn);
 
     // Determine which types of shadows are affected, and update each.
     //
@@ -1818,11 +1818,11 @@ void sh_emulate_unmap_dest(struct vcpu *v, void *addr, 
unsigned int bytes,
             sh_validate_guest_pt_write(v, sh_ctxt->mfn[1], addr + b1, b2);
     }
 
-    paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[0]));
+    paging_mark_dirty(v->domain, sh_ctxt->mfn[0]);
 
     if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) )
     {
-        paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn[1]));
+        paging_mark_dirty(v->domain, sh_ctxt->mfn[1]);
         vunmap((void *)((unsigned long)addr & PAGE_MASK));
     }
     else
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 336d24f..805c056 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -675,7 +675,7 @@ _sh_propagate(struct vcpu *v,
     {
         if ( mfn_valid(target_mfn) ) {
             if ( ft & FETCH_TYPE_WRITE )
-                paging_mark_dirty(d, mfn_x(target_mfn));
+                paging_mark_dirty(d, target_mfn);
             else if ( !paging_mfn_is_dirty(d, target_mfn) )
                 sflags &= ~_PAGE_RW;
         }
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 84ae7fd..7d60b71 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -80,7 +80,7 @@ static inline void cli_put_page(void *cli_va, struct 
page_info *cli_pfp,
     if ( mark_dirty )
     {
         put_page_and_type(cli_pfp);
-        paging_mark_dirty(current->domain,cli_mfn);
+        paging_mark_dirty(current->domain, _mfn(cli_mfn));
     }
     else
         put_page(cli_pfp);
diff --git a/xen/include/asm-x86/grant_table.h 
b/xen/include/asm-x86/grant_table.h
index 8c9bbcf..e1b3391 100644
--- a/xen/include/asm-x86/grant_table.h
+++ b/xen/include/asm-x86/grant_table.h
@@ -46,7 +46,7 @@ int replace_grant_host_mapping(
 #define gnttab_status_gmfn(d, t, i)                     \
     (mfn_to_gmfn(d, gnttab_status_mfn(t, i)))
 
-#define gnttab_mark_dirty(d, f) paging_mark_dirty((d), (f))
+#define gnttab_mark_dirty(d, f) paging_mark_dirty((d), _mfn(f))
 
 static inline void gnttab_clear_flag(unsigned int nr, uint16_t *st)
 {
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 2243aa1..63e3867 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -157,7 +157,7 @@ void paging_log_dirty_init(struct domain *d,
                            void (*clean_dirty_bitmap)(struct domain *d));
 
 /* mark a page as dirty */
-void paging_mark_dirty(struct domain *d, unsigned long guest_mfn);
+void paging_mark_dirty(struct domain *d, mfn_t gmfn);
 /* mark a page as dirty with taking guest pfn as parameter */
 void paging_mark_gfn_dirty(struct domain *d, unsigned long pfn);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.