[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] X86: offline/broken page handler for pod cache



X86: offline/broken page handler for pod cache

When offline a page, or, when a broken page occur, the page maybe populated, 
or, may at pod cache.
This patch is to handle the offline/broken page at pod cache.
It scan pod cache, if hit, remove and replace it,
and then put the offline/broken page to page_offlined_list/page_broken_list;

Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>

diff -r 5a8eb5e3d8bc xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Wed Mar 30 14:44:55 2011 +0800
+++ b/xen/arch/x86/mm/p2m.c     Wed Apr 06 15:45:46 2011 +0800
@@ -679,6 +679,78 @@ p2m_pod_empty_cache(struct domain *d)
     BUG_ON(p2m->pod.count != 0);
 
     spin_unlock(&d->page_alloc_lock);
+}
+
+int
+p2m_pod_offline_or_broken_hit(struct page_info *p)
+{
+    struct domain *d;
+    struct p2m_domain *p2m;
+    struct page_info *q, *tmp;
+    unsigned long mfn, bmfn;
+
+    if ( !(d = page_get_owner(p)) || !(p2m = p2m_get_hostp2m(d)) )
+        return 0;
+
+    spin_lock(&d->page_alloc_lock);
+    bmfn = mfn_x(page_to_mfn(p));
+    page_list_for_each_safe(q, tmp, &p2m->pod.super)
+    {
+        mfn = mfn_x(page_to_mfn(q));
+        if ( (bmfn >= mfn) && ((bmfn - mfn) < SUPERPAGE_PAGES) )
+        {
+            unsigned long i;
+            page_list_del(q, &p2m->pod.super);
+            for ( i = 0; i < SUPERPAGE_PAGES; i++)
+            {
+                q = mfn_to_page(_mfn(mfn + i));
+                page_list_add_tail(q, &p2m->pod.single);
+            }
+            page_list_del(p, &p2m->pod.single);
+            p2m->pod.count--;
+            goto pod_hit;
+        }
+    }
+
+    page_list_for_each_safe(q, tmp, &p2m->pod.single)
+    {
+        mfn = mfn_x(page_to_mfn(q));
+        if ( mfn == bmfn )
+        {
+            page_list_del(p, &p2m->pod.single);
+            p2m->pod.count--;
+            goto pod_hit;
+        }
+    }
+
+    spin_unlock(&d->page_alloc_lock);
+    return 0;
+
+pod_hit:
+    page_list_add_tail(p, &d->arch.relmem_list);
+    spin_unlock(&d->page_alloc_lock);
+    return 1;
+}
+
+void
+p2m_pod_offline_or_broken_replace(struct page_info *p)
+{
+    struct domain *d;
+    struct p2m_domain *p2m;
+
+    if ( !(d = page_get_owner(p)) || !(p2m = p2m_get_hostp2m(d)) )
+        return;
+
+    free_domheap_page(p);
+
+    p = alloc_domheap_page(d, 0);
+    if ( unlikely(!p) )
+        return;
+
+    p2m_lock(p2m);
+    p2m_pod_cache_add(p2m, p, 0);
+    p2m_unlock(p2m);
+    return;
 }
 
 /* This function is needed for two reasons:
diff -r 5a8eb5e3d8bc xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Wed Mar 30 14:44:55 2011 +0800
+++ b/xen/common/page_alloc.c   Wed Apr 06 15:45:46 2011 +0800
@@ -41,6 +41,7 @@
 #include <asm/page.h>
 #include <asm/numa.h>
 #include <asm/flushtlb.h>
+#include <asm/p2m.h>
 
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
@@ -714,10 +715,15 @@ int offline_page(unsigned long mfn, int 
     }
     else if ( (owner = page_get_owner_and_reference(pg)) )
     {
+        if ( p2m_pod_offline_or_broken_hit(pg) )
+            goto pod_replace;
+        else
+        {
             *status = PG_OFFLINE_OWNED | PG_OFFLINE_PENDING |
               (owner->domain_id << PG_OFFLINE_OWNER_SHIFT);
             /* Release the reference since it will not be allocated anymore */
             put_page(pg);
+        }
     }
     else if ( old_info & PGC_xen_heap )
     {
@@ -742,6 +748,18 @@ int offline_page(unsigned long mfn, int 
         *status |= PG_OFFLINE_BROKEN;
 
     spin_unlock(&heap_lock);
+
+    return ret;
+
+pod_replace:
+    put_page(pg);
+    spin_unlock(&heap_lock);
+
+    p2m_pod_offline_or_broken_replace(pg);
+    *status = PG_OFFLINE_OFFLINED;
+
+    if ( broken )
+        *status |= PG_OFFLINE_BROKEN;
 
     return ret;
 }
diff -r 5a8eb5e3d8bc xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Wed Mar 30 14:44:55 2011 +0800
+++ b/xen/include/asm-x86/p2m.h Wed Apr 06 15:45:46 2011 +0800
@@ -457,6 +457,14 @@ p2m_pod_demand_populate(struct p2m_domai
                         unsigned int order,
                         p2m_query_t q);
 
+/* Scan pod cache when offline/broken page triggered */
+int
+p2m_pod_offline_or_broken_hit(struct page_info *p);
+
+/* Replace pod cache when offline/broken page triggered */
+void
+p2m_pod_offline_or_broken_replace(struct page_info *p);
+
 /* Add a page to a domain's p2m table */
 int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
                             unsigned long mfn, unsigned int page_order, 

Attachment: pod_broken_page.patch
Description: pod_broken_page.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.