[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] preliminary clean up ia64 mm.c for blktap dom0 mount support.



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 371d2837a1feb3985e0899676d50c5a124555489
# Parent  4816a891b3d69491c0f2e063dc59d8118d40b8e9
[IA64] preliminary clean up ia64 mm.c for blktap dom0 mount support.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/xen/mm.c            |  134 ++++++++++++++++++++++++--------------
 xen/include/asm-ia64/perfc_defn.h |    2 
 2 files changed, 86 insertions(+), 50 deletions(-)

diff -r 4816a891b3d6 -r 371d2837a1fe xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Fri Nov 10 11:19:57 2006 -0700
+++ b/xen/arch/ia64/xen/mm.c    Fri Nov 10 11:34:39 2006 -0700
@@ -36,7 +36,7 @@
  * 
  *   operations on this structure:
  *   - global tlb purge
- *     vcpu_ptc_g(), vcpu_ptc_ga() and domain_page_flush()
+ *     vcpu_ptc_g(), vcpu_ptc_ga() and domain_page_flush_and_put()
  *     I.e. callers of domain_flush_vtlb_range() and domain_flush_vtlb_all()
  *     These functions invalidate VHPT entry and vcpu->arch.{i, d}tlb
  * 
@@ -179,8 +179,9 @@
 #include <asm/page.h>
 #include <public/memory.h>
 
-static void domain_page_flush(struct domain* d, unsigned long mpaddr,
-                              volatile pte_t* ptep, pte_t old_pte);
+static void domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
+                                      volatile pte_t* ptep, pte_t old_pte, 
+                                      struct page_info* page);
 
 extern unsigned long ia64_iobase;
 
@@ -1038,6 +1039,25 @@ assign_domain_mach_page(struct domain *d
     return mpaddr;
 }
 
+static void
+domain_put_page(struct domain* d, unsigned long mpaddr,
+                volatile pte_t* ptep, pte_t old_pte, int clear_PGC_allocate)
+{
+    unsigned long mfn = pte_pfn(old_pte);
+    struct page_info* page = mfn_to_page(mfn);
+
+    if (page_get_owner(page) == d ||
+        page_get_owner(page) == NULL) {
+        BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
+        set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+    }
+
+    if (clear_PGC_allocate)
+        try_to_clear_PGC_allocate(d, page);
+
+    domain_page_flush_and_put(d, mpaddr, ptep, old_pte, page);
+}
+
 // caller must get_page(mfn_to_page(mfn)) before call.
 // caller must call set_gpfn_from_mfn() before call if necessary.
 // because set_gpfn_from_mfn() result must be visible before pte xchg
@@ -1068,18 +1088,7 @@ assign_domain_page_replace(struct domain
         //   => create_host_mapping()
         //      => assign_domain_page_replace()
         if (mfn != old_mfn) {
-            struct page_info* old_page = mfn_to_page(old_mfn);
-
-            if (page_get_owner(old_page) == d ||
-                page_get_owner(old_page) == NULL) {
-                BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
-                set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
-            }
-
-            domain_page_flush(d, mpaddr, pte, old_pte);
-
-            try_to_clear_PGC_allocate(d, old_page);
-            put_page(old_page);
+            domain_put_page(d, mpaddr, pte, old_pte, 1);
         }
     }
     perfc_incrc(assign_domain_page_replace);
@@ -1143,8 +1152,7 @@ assign_domain_page_cmpxchg_rel(struct do
 
     set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
 
-    domain_page_flush(d, mpaddr, pte, old_pte);
-    put_page(old_page);
+    domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page);
     perfc_incrc(assign_domain_pge_cmpxchg_rel);
     return 0;
 }
@@ -1201,23 +1209,12 @@ zap_domain_page_one(struct domain *d, un
     page = mfn_to_page(mfn);
     BUG_ON((page->count_info & PGC_count_mask) == 0);
 
-    if (page_get_owner(page) == d ||
-        page_get_owner(page) == NULL) {
-        // exchange_memory() calls
-        //   steal_page()
-        //     page owner is set to NULL
-        //   guest_physmap_remove_page()
-        //     zap_domain_page_one()
-        BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
-        set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
-    }
-
-    domain_page_flush(d, mpaddr, pte, old_pte);
-
-    if (page_get_owner(page) != NULL) {
-        try_to_clear_PGC_allocate(d, page);
-    }
-    put_page(page);
+    // exchange_memory() calls
+    //   steal_page()
+    //     page owner is set to NULL
+    //   guest_physmap_remove_page()
+    //     zap_domain_page_one()
+    domain_put_page(d, mpaddr, pte, old_pte, (page_get_owner(page) != NULL));
     perfc_incrc(zap_dcomain_page_one);
 }
 
@@ -1445,12 +1442,13 @@ destroy_grant_host_mapping(unsigned long
                unsigned long mfn, unsigned int flags)
 {
     struct domain* d = current->domain;
+    unsigned long gpfn = gpaddr >> PAGE_SHIFT;
     volatile pte_t* pte;
     unsigned long cur_arflags;
     pte_t cur_pte;
     pte_t new_pte;
     pte_t old_pte;
-    struct page_info* page;
+    struct page_info* page = mfn_to_page(mfn);
 
     if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
         gdprintk(XENLOG_INFO, "%s: flags 0x%x\n", __func__, flags);
@@ -1467,7 +1465,8 @@ destroy_grant_host_mapping(unsigned long
  again:
     cur_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;
     cur_pte = pfn_pte(mfn, __pgprot(cur_arflags));
-    if (!pte_present(cur_pte)) {
+    if (!pte_present(cur_pte) ||
+        (page_get_owner(page) == d && get_gpfn_from_mfn(mfn) == gpfn)) {
         gdprintk(XENLOG_INFO, "%s: gpaddr 0x%lx mfn 0x%lx cur_pte 0x%lx\n",
                 __func__, gpaddr, mfn, pte_val(cur_pte));
         return GNTST_general_error;
@@ -1492,11 +1491,10 @@ destroy_grant_host_mapping(unsigned long
     }
     BUG_ON(pte_pfn(old_pte) != mfn);
 
-    domain_page_flush(d, gpaddr, pte, old_pte);
-
-    page = mfn_to_page(mfn);
-    BUG_ON(page_get_owner(page) == d);//try_to_clear_PGC_allocate(d, page) is 
not needed.
-    put_page(page);
+    /* try_to_clear_PGC_allocate(d, page) is not needed. */
+    BUG_ON(page_get_owner(page) == d &&
+           get_gpfn_from_mfn(mfn) == gpfn);
+    domain_page_flush_and_put(d, gpaddr, pte, old_pte, page);
 
     perfc_incrc(destroy_grant_host_mapping);
     return GNTST_okay;
@@ -1580,10 +1578,12 @@ steal_page(struct domain *d, struct page
         // page->u.inused._domain = 0;
         _nd = x >> 32;
 
-        if (unlikely(!(memflags & MEMF_no_refcount) &&
+        if (
+            // when !MEMF_no_refcount, page might be put_page()'d or
+            // it will be put_page()'d later depending on queued.
+            unlikely(!(memflags & MEMF_no_refcount) &&
                      ((x & (PGC_count_mask | PGC_allocated)) !=
                       (1 | PGC_allocated))) ||
-
             // when MEMF_no_refcount, page isn't de-assigned from
             // this domain yet. So count_info = 2
             unlikely((memflags & MEMF_no_refcount) &&
@@ -1664,11 +1664,10 @@ guest_physmap_remove_page(struct domain 
     perfc_incrc(guest_physmap_remove_page);
 }
 
-//XXX sledgehammer.
-//    flush finer range.
 static void
-domain_page_flush(struct domain* d, unsigned long mpaddr,
-                  volatile pte_t* ptep, pte_t old_pte)
+domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
+                          volatile pte_t* ptep, pte_t old_pte,
+                          struct page_info* page)
 {
 #ifdef CONFIG_XEN_IA64_TLB_TRACK
     struct tlb_track_entry* entry;
@@ -1678,26 +1677,63 @@ domain_page_flush(struct domain* d, unsi
         shadow_mark_page_dirty(d, mpaddr >> PAGE_SHIFT);
 
 #ifndef CONFIG_XEN_IA64_TLB_TRACK
+    //XXX sledgehammer.
+    //    flush finer range.
     domain_flush_vtlb_all();
+    put_page(page);
 #else
     switch (tlb_track_search_and_remove(d->arch.tlb_track,
                                         ptep, old_pte, &entry)) {
     case TLB_TRACK_NOT_TRACKED:
         // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_TRACKED\n", __func__);
+        /* This page is zapped from this domain
+         * by memory decrease or exchange or dom0vp_zap_physmap.
+         * I.e. the page is zapped for returning this page to xen
+         * (balloon driver or DMA page allocation) or
+         * foreign domain mapped page is unmapped from the domain.
+         * In the former case the page is to be freed so that
+         * we can defer freeing page to batch.
+         * In the latter case the page is unmapped so that
+         * we need to flush it. But to optimize it, we
+         * queue the page and flush vTLB only once.
+         * I.e. The caller must call dfree_flush() explicitly.
+         */
         domain_flush_vtlb_all();
+        put_page(page);
         break;
     case TLB_TRACK_NOT_FOUND:
+        // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_FOUND\n", __func__);
+        /* This page is zapped from this domain
+         * by grant table page unmap.
+         * Luckily the domain that mapped this page didn't
+         * access this page so that we don't have to flush vTLB.
+         * Probably the domain did only DMA.
+         */
         /* do nothing */
-        // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_FOUND\n", __func__);
+        put_page(page)
         break;
     case TLB_TRACK_FOUND:
         // dprintk(XENLOG_WARNING, "%s TLB_TRACK_FOUND\n", __func__);
+        /* This page is zapped from this domain
+         * by grant table page unmap.
+         * Fortunately this page is accessced via only one virtual
+         * memory address. So it is easy to flush it.
+         */
         domain_flush_vtlb_track_entry(d, entry);
         tlb_track_free_entry(d->arch.tlb_track, entry);
+        put_page(page)
         break;
     case TLB_TRACK_MANY:
         gdprintk(XENLOG_INFO, "%s TLB_TRACK_MANY\n", __func__);
+        /* This page is zapped from this domain
+         * by grant table page unmap.
+         * Unfortunately this page is accessced via many virtual
+         * memory address (or too many times with single virtual address).
+         * So we abondaned to track virtual addresses.
+         * full vTLB flush is necessary.
+         */
         domain_flush_vtlb_all();
+        put_page(page)
         break;
     case TLB_TRACK_AGAIN:
         gdprintk(XENLOG_ERR, "%s TLB_TRACK_AGAIN\n", __func__);
@@ -1705,7 +1741,7 @@ domain_page_flush(struct domain* d, unsi
         break;
     }
 #endif
-    perfc_incrc(domain_page_flush);
+    perfc_incrc(domain_page_flush_and_put);
 }
 
 int
diff -r 4816a891b3d6 -r 371d2837a1fe xen/include/asm-ia64/perfc_defn.h
--- a/xen/include/asm-ia64/perfc_defn.h Fri Nov 10 11:19:57 2006 -0700
+++ b/xen/include/asm-ia64/perfc_defn.h Fri Nov 10 11:34:39 2006 -0700
@@ -134,7 +134,7 @@ PERFCOUNTER_CPU(steal_page,             
 PERFCOUNTER_CPU(steal_page,                     "steal_page")
 PERFCOUNTER_CPU(guest_physmap_add_page,         "guest_physmap_add_page")
 PERFCOUNTER_CPU(guest_physmap_remove_page,      "guest_physmap_remove_page")
-PERFCOUNTER_CPU(domain_page_flush,              "domain_page_flush")
+PERFCOUNTER_CPU(domain_page_flush_and_put,      "domain_page_flush_and_put")
 
 // dom0vp
 PERFCOUNTER_CPU(dom0vp_phystomach,              "dom0vp_phystomach")

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.