[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] merge



# HG changeset patch
# User iap10@xxxxxxxxxxxxxxxxxxxxx
# Node ID 8864f0be80c6352c0572752643bad1caa3ee45d9
# Parent  cbde8271c23638ae1d7d6f68b4f4cc5b5c52bbda
# Parent  424166f4f3cfe5b689b71cbfc5b4ea1fbdcab2ed
merge

diff -r cbde8271c236 -r 8864f0be80c6 
linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c        Sat Aug 13 
16:01:23 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c        Sat Aug 13 
20:49:29 2005
@@ -286,8 +286,8 @@
         pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
         pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
         mfn = pte_mfn(*pte);
-        HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
+        BUG_ON(HYPERVISOR_update_va_mapping(
+            vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
         phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
             INVALID_P2M_ENTRY;
         BUG_ON(HYPERVISOR_dom_mem_op(
@@ -300,9 +300,9 @@
 
     /* 3. Map the new extent in place of old pages. */
     for (i = 0; i < (1<<order); i++) {
-        HYPERVISOR_update_va_mapping(
+        BUG_ON(HYPERVISOR_update_va_mapping(
             vstart + (i*PAGE_SIZE),
-            __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0);
+            __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
         xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
         phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
     }
@@ -345,7 +345,8 @@
 #ifdef CONFIG_X86_64
         xen_l1_entry_update(pte, __pte(0));
 #else
-        HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
+        BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 
+                                           __pte_ma(0), 0));
 #endif
         phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
             INVALID_P2M_ENTRY;
diff -r cbde8271c236 -r 8864f0be80c6 
linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c   Sat Aug 13 16:01:23 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c   Sat Aug 13 20:49:29 2005
@@ -222,8 +222,8 @@
        unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
 
        if (!pte_write(*virt_to_ptep(va)))
-               HYPERVISOR_update_va_mapping(
-                       va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0);
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
 
        ClearPageForeign(pte);
        set_page_count(pte, 1);
@@ -355,10 +355,10 @@
 
        if (!pte_write(*ptep)) {
                xen_pgd_unpin(__pa(pgd));
-               HYPERVISOR_update_va_mapping(
+               BUG_ON(HYPERVISOR_update_va_mapping(
                        (unsigned long)pgd,
                        pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
-                       0);
+                       0));
        }
 
        /* in the PAE case user pgd entries are overwritten before usage */
@@ -451,9 +451,9 @@
 
        if (PageHighMem(page))
                return;
-       HYPERVISOR_update_va_mapping(
+       BUG_ON(HYPERVISOR_update_va_mapping(
                (unsigned long)__va(pfn << PAGE_SHIFT),
-               pfn_pte(pfn, flags), 0);
+               pfn_pte(pfn, flags), 0));
 }
 
 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
@@ -492,10 +492,10 @@
     spin_lock(&mm->page_table_lock);
 
     mm_walk(mm, PAGE_KERNEL_RO);
-    HYPERVISOR_update_va_mapping(
+    BUG_ON(HYPERVISOR_update_va_mapping(
         (unsigned long)mm->pgd,
         pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
-        UVMF_TLB_FLUSH);
+        UVMF_TLB_FLUSH));
     xen_pgd_pin(__pa(mm->pgd));
     mm->context.pinned = 1;
     spin_lock(&mm_unpinned_lock);
@@ -510,9 +510,9 @@
     spin_lock(&mm->page_table_lock);
 
     xen_pgd_unpin(__pa(mm->pgd));
-    HYPERVISOR_update_va_mapping(
+    BUG_ON(HYPERVISOR_update_va_mapping(
         (unsigned long)mm->pgd,
-        pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0);
+        pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
     mm_walk(mm, PAGE_KERNEL);
     xen_tlb_flush();
     mm->context.pinned = 0;
diff -r cbde8271c236 -r 8864f0be80c6 
linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c
--- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Sat Aug 13 
16:01:23 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Sat Aug 13 
20:49:29 2005
@@ -211,11 +211,11 @@
             /* Link back into the page tables if it's not a highmem page. */
             if ( pfn < max_low_pfn )
             {
-                HYPERVISOR_update_va_mapping(
+                BUG_ON(HYPERVISOR_update_va_mapping(
                     (unsigned long)__va(pfn << PAGE_SHIFT),
                     __pte_ma((mfn_list[i] << PAGE_SHIFT) |
                              pgprot_val(PAGE_KERNEL)),
-                    0);
+                    0));
             }
 
             /* Finally, relinquish the memory back to the system allocator. */
@@ -249,8 +249,8 @@
             {
                 v = phys_to_virt(pfn << PAGE_SHIFT);
                 scrub_pages(v, 1);
-                HYPERVISOR_update_va_mapping(
-                    (unsigned long)v, __pte_ma(0), 0);
+                BUG_ON(HYPERVISOR_update_va_mapping(
+                    (unsigned long)v, __pte_ma(0), 0));
             }
 #ifdef CONFIG_XEN_SCRUB_PAGES
             else
diff -r cbde8271c236 -r 8864f0be80c6 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h Sat Aug 13 
16:01:23 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h Sat Aug 13 
20:49:29 2005
@@ -386,13 +386,6 @@
          "4" (flags)
        : "memory" );
 
-    if ( unlikely(ret < 0) )
-    {
-        printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
-               va, (new_val).pte_low, flags);
-        BUG();
-    }
-
     return ret;
 }
 
diff -r cbde8271c236 -r 8864f0be80c6 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Sat Aug 13 
16:01:23 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Sat Aug 13 
20:49:29 2005
@@ -497,7 +497,7 @@
        do {                                                              \
                if (__dirty) {                                            \
                        if ( likely((__vma)->vm_mm == current->mm) ) {    \
-                           HYPERVISOR_update_va_mapping((__address), 
(__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned 
long)((__vma)->vm_mm->cpu_vm_mask.bits)); \
+                           BUG_ON(HYPERVISOR_update_va_mapping((__address), 
(__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned 
long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
                        } else {                                          \
                             xen_l1_entry_update((__ptep), (__entry)); \
                            flush_tlb_page((__vma), (__address));         \
diff -r cbde8271c236 -r 8864f0be80c6 xen/Rules.mk
--- a/xen/Rules.mk      Sat Aug 13 16:01:23 2005
+++ b/xen/Rules.mk      Sat Aug 13 20:49:29 2005
@@ -2,7 +2,7 @@
 # If you change any of these configuration options then you must
 # 'make clean' before rebuilding.
 #
-verbose     ?= n
+verbose     ?= y
 debug       ?= n
 perfc       ?= n
 perfc_arrays?= n
diff -r cbde8271c236 -r 8864f0be80c6 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Sat Aug 13 16:01:23 2005
+++ b/xen/arch/x86/mm.c Sat Aug 13 20:49:29 2005
@@ -2850,6 +2850,129 @@
 #define PTWR_PRINTK(_f, _a...) ((void)0)
 #endif
 
+
+#ifdef PERF_ARRAYS
+
+/**************** writeable pagetables profiling functions *****************/
+
+#define ptwr_eip_buckets        256
+
+int ptwr_eip_stat_threshold[] = {1, 10, 50, 100, L1_PAGETABLE_ENTRIES};
+
+#define ptwr_eip_stat_thresholdN (sizeof(ptwr_eip_stat_threshold)/sizeof(int))
+
+struct {
+    unsigned long eip;
+    domid_t       id;
+    u32           val[ptwr_eip_stat_thresholdN];
+} typedef ptwr_eip_stat_t;
+
+ptwr_eip_stat_t ptwr_eip_stats[ptwr_eip_buckets];
+
+static inline unsigned int ptwr_eip_stat_hash( unsigned long eip, domid_t id )
+{
+    return (((unsigned long) id) ^ eip ^ (eip>>8) ^ (eip>>16) ^ (eip>24)) % 
+       ptwr_eip_buckets;
+}
+
+static void ptwr_eip_stat_inc(u32 *n)
+{
+    (*n)++;
+    if(*n == 0)
+    {
+       (*n)=~0;
+       /* rescale all buckets */
+       int i;
+       for(i=0;i<ptwr_eip_buckets;i++)
+       {
+           int j;
+           for(j=0;j<ptwr_eip_stat_thresholdN;j++)
+               ptwr_eip_stats[i].val[j] = 
+                   (((u64)ptwr_eip_stats[i].val[j])+1)>>1;
+       }
+    }
+}
+
+static void ptwr_eip_stat_update( unsigned long eip, domid_t id, int modified )
+{
+    int i, b;
+
+    i = b = ptwr_eip_stat_hash( eip, id );
+
+    do
+    {
+       if (!ptwr_eip_stats[i].eip)
+       { /* doesn't exist */
+           ptwr_eip_stats[i].eip = eip;
+           ptwr_eip_stats[i].id = id;
+           memset(ptwr_eip_stats[i].val,0, sizeof(ptwr_eip_stats[i].val));
+       }
+
+       if (ptwr_eip_stats[i].eip == eip)
+       {
+           int j;
+           for(j=0;j<ptwr_eip_stat_thresholdN;j++)
+           {
+               if(modified <= ptwr_eip_stat_threshold[j])
+               {
+                   break;
+               }
+           }
+           BUG_ON(j>=ptwr_eip_stat_thresholdN);
+           ptwr_eip_stat_inc(&(ptwr_eip_stats[i].val[j]));    
+           return;
+       }
+       i = (i+1) % ptwr_eip_buckets;
+    }
+    while(i!=b);
+   
+    printk("ptwr_eip_stat: too many EIPs in use!\n");
+    
+    ptwr_eip_stat_print();
+    ptwr_eip_stat_reset();
+}
+
+void ptwr_eip_stat_reset()
+{
+    memset( ptwr_eip_stats, 0, sizeof(ptwr_eip_stats));
+}
+
+void ptwr_eip_stat_print()
+{
+    struct domain *e;
+    domid_t d;
+
+    for_each_domain(e)
+    {
+       int i;
+       d = e->domain_id;
+
+       for(i=0;i<ptwr_eip_buckets;i++)
+       {
+           if ( ptwr_eip_stats[i].eip && ptwr_eip_stats[i].id == d )
+           {
+               int j;
+               printk("D %d  eip %08lx ",
+                      ptwr_eip_stats[i].id, ptwr_eip_stats[i].eip );
+
+               for(j=0;j<ptwr_eip_stat_thresholdN;j++)
+                   printk("<=%u %4u \t",
+                          ptwr_eip_stat_threshold[j],
+                          ptwr_eip_stats[i].val[j] );
+               printk("\n");
+           }   
+       }
+    }
+}
+
+#else /* PERF_ARRAYS */
+
+#define ptwr_eip_stat_update( eip, id, modified ) ((void)0)
+
+#endif
+
+/*******************************************************************/
+
 /* Re-validate a given p.t. page, given its prior snapshot */
 int revalidate_l1(
     struct domain *d, l1_pgentry_t *l1page, l1_pgentry_t *snapshot)
@@ -2967,6 +3090,7 @@
     modified = revalidate_l1(d, pl1e, d->arch.ptwr[which].page);
     unmap_domain_page(pl1e);
     perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
+    ptwr_eip_stat_update(  d->arch.ptwr[which].eip, d->domain_id, modified);
     d->arch.ptwr[which].prev_nr_updates  = modified;
 
     /*
@@ -3122,7 +3246,8 @@
 };
 
 /* Write page fault handler: check if guest is trying to modify a PTE. */
-int ptwr_do_page_fault(struct domain *d, unsigned long addr)
+int ptwr_do_page_fault(struct domain *d, unsigned long addr, 
+                      struct cpu_user_regs *regs)
 {
     unsigned long    pfn;
     struct pfn_info *page;
@@ -3157,6 +3282,10 @@
     {
         return 0;
     }
+
+#if 0 /* Leave this in as useful for debugging */ 
+    goto emulate; 
+#endif
 
     /* Get the L2 index at which this L1 p.t. is always mapped. */
     l2_idx = page->u.inuse.type_info & PGT_va_mask;
@@ -3227,7 +3356,11 @@
     d->arch.ptwr[which].l1va   = addr | 1;
     d->arch.ptwr[which].l2_idx = l2_idx;
     d->arch.ptwr[which].vcpu   = current;
-    
+
+#ifdef PERF_ARRAYS
+    d->arch.ptwr[which].eip    = regs->eip;
+#endif
+
     /* For safety, disconnect the L1 p.t. page from current space. */
     if ( which == PTWR_PT_ACTIVE )
     {
diff -r cbde8271c236 -r 8864f0be80c6 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Sat Aug 13 16:01:23 2005
+++ b/xen/arch/x86/traps.c      Sat Aug 13 20:49:29 2005
@@ -438,7 +438,7 @@
              &&
              KERNEL_MODE(v, regs) &&
              ((regs->error_code & 3) == 3) && /* write-protection fault */
-             ptwr_do_page_fault(d, addr) )
+             ptwr_do_page_fault(d, addr, regs) )
         {
             UNLOCK_BIGLOCK(d);
             return EXCRET_fault_fixed;
diff -r cbde8271c236 -r 8864f0be80c6 xen/common/perfc.c
--- a/xen/common/perfc.c        Sat Aug 13 16:01:23 2005
+++ b/xen/common/perfc.c        Sat Aug 13 20:49:29 2005
@@ -7,6 +7,7 @@
 #include <xen/spinlock.h>
 #include <public/dom0_ops.h>
 #include <asm/uaccess.h>
+#include <xen/mm.h>
 
 #undef  PERFCOUNTER
 #undef  PERFCOUNTER_CPU
@@ -81,6 +82,10 @@
         }
         printk("\n");
     }
+
+#ifdef PERF_ARRAYS
+    ptwr_eip_stat_print();
+#endif
 }
 
 void perfc_reset(unsigned char key)
@@ -118,6 +123,10 @@
             break;
         }
     }
+
+#ifdef PERF_ARRAYS
+    ptwr_eip_stat_reset();
+#endif
 }
 
 static dom0_perfc_desc_t perfc_d[NR_PERFCTRS];
diff -r cbde8271c236 -r 8864f0be80c6 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Sat Aug 13 16:01:23 2005
+++ b/xen/include/asm-x86/mm.h  Sat Aug 13 20:49:29 2005
@@ -316,6 +316,9 @@
     unsigned int prev_nr_updates;
     /* Exec domain which created writable mapping. */
     struct vcpu *vcpu;
+    /* EIP of the address which took the original write fault
+       used for stats collection only */
+    unsigned long eip;
 };
 
 #define PTWR_PT_ACTIVE 0
@@ -327,7 +330,8 @@
 int  ptwr_init(struct domain *);
 void ptwr_destroy(struct domain *);
 void ptwr_flush(struct domain *, const int);
-int  ptwr_do_page_fault(struct domain *, unsigned long);
+int  ptwr_do_page_fault(struct domain *, unsigned long, 
+                       struct cpu_user_regs *);
 int  revalidate_l1(struct domain *, l1_pgentry_t *, l1_pgentry_t *);
 
 void cleanup_writable_pagetable(struct domain *d);
@@ -350,6 +354,18 @@
 #define _audit_domain(_d, _f) ((void)0)
 #define audit_domain(_d)      ((void)0)
 #define audit_domains()       ((void)0)
+
+#endif
+
+#ifdef PERF_ARRAYS
+
+void ptwr_eip_stat_reset();
+void ptwr_eip_stat_print();
+
+#else
+
+#define ptwr_eip_stat_reset() ((void)0)
+#define ptwr_eip_stat_print() ((void)0)
 
 #endif
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.