[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xen: Clean up some paging files: no tab and trailing spaces.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1186474049 -3600
# Node ID 7953164cebb6dfbbee08d06c91f424b63d87ed71
# Parent  ff2dae3ebb1de85229ef3067a0257316bcc78aab
xen: Clean up some paging files: no tab and trailing spaces.
Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
---
 xen/arch/x86/mm/hap/guest_walk.c |    2 
 xen/arch/x86/mm/hap/hap.c        |   56 ++++++------
 xen/arch/x86/mm/p2m.c            |  180 +++++++++++++++++++--------------------
 xen/arch/x86/mm/paging.c         |   86 +++++++++---------
 xen/include/asm-x86/domain.h     |   17 +--
 xen/include/asm-x86/hap.h        |    2 
 6 files changed, 172 insertions(+), 171 deletions(-)

diff -r ff2dae3ebb1d -r 7953164cebb6 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c  Tue Aug 07 09:06:38 2007 +0100
+++ b/xen/arch/x86/mm/hap/guest_walk.c  Tue Aug 07 09:07:29 2007 +0100
@@ -84,7 +84,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
         mfn = get_mfn_from_gpfn(gpfn);
         if ( mfn == INVALID_MFN )
         {
-            HAP_PRINTK("bad pfn=0x%lx from gva=0x%lx at lev%d\n", gpfn, gva, 
+            HAP_PRINTK("bad pfn=0x%lx from gva=0x%lx at lev%d\n", gpfn, gva,
                        lev);
             success = 0;
             break;
diff -r ff2dae3ebb1d -r 7953164cebb6 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Tue Aug 07 09:06:38 2007 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Tue Aug 07 09:07:29 2007 +0100
@@ -73,7 +73,7 @@ int hap_disable_log_dirty(struct domain 
     hap_unlock(d);
 
     /* set l1e entries of P2M table with normal mode */
-    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);    
+    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);
     return 0;
 }
 
@@ -111,7 +111,7 @@ static struct page_info *hap_alloc(struc
 
 static void hap_free(struct domain *d, mfn_t mfn)
 {
-    struct page_info *pg = mfn_to_page(mfn); 
+    struct page_info *pg = mfn_to_page(mfn);
 
     ASSERT(hap_locked_by_me(d));
 
@@ -128,7 +128,7 @@ static struct page_info *hap_alloc_p2m_p
 
 #if CONFIG_PAGING_LEVELS == 3
     /* Under PAE mode, top-level P2M table should be allocated below 4GB space
-     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to 
+     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
      * force this requirement, and exchange the guaranteed 32-bit-clean
      * page for the one we just hap_alloc()ed. */
     if ( d->arch.paging.hap.p2m_pages == 0
@@ -166,9 +166,9 @@ void hap_free_p2m_page(struct domain *d,
         HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
                   pg->count_info, pg->u.inuse.type_info);
     pg->count_info = 0;
-    /* Free should not decrement domain's total allocation, since 
+    /* Free should not decrement domain's total allocation, since
      * these pages were allocated without an owner. */
-    page_set_owner(pg, NULL); 
+    page_set_owner(pg, NULL);
     free_domheap_page(pg);
     d->arch.paging.hap.p2m_pages--;
     ASSERT(d->arch.paging.hap.p2m_pages >= 0);
@@ -221,7 +221,7 @@ hap_set_allocation(struct domain *d, uns
             pg->count_info = 0;
             free_domheap_page(pg);
         }
-        
+
         /* Check to see if we need to yield and try again */
         if ( preempted && hypercall_preempt_check() )
         {
@@ -275,7 +275,7 @@ static void hap_install_xen_entries_in_l
 
     l2e = hap_map_domain_page(l2hmfn);
     ASSERT(l2e != NULL);
-    
+
     /* Copy the common Xen mappings from the idle domain */
     memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
            &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
@@ -318,7 +318,7 @@ static void hap_install_xen_entries_in_l
 
     l2e = hap_map_domain_page(l2mfn);
     ASSERT(l2e != NULL);
-    
+
     /* Copy the common Xen mappings from the idle domain */
     memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT],
            &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
@@ -362,7 +362,7 @@ static mfn_t hap_make_monitor_table(stru
     }
 #elif CONFIG_PAGING_LEVELS == 3
     {
-        mfn_t m3mfn, m2mfn; 
+        mfn_t m3mfn, m2mfn;
         l3_pgentry_t *l3e;
         l2_pgentry_t *l2e;
         int i;
@@ -384,8 +384,8 @@ static mfn_t hap_make_monitor_table(stru
         l2e = hap_map_domain_page(m2mfn);
         for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
             l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
-                (l3e_get_flags(l3e[i]) & _PAGE_PRESENT) 
-                ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR) 
+                (l3e_get_flags(l3e[i]) & _PAGE_PRESENT)
+                ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR)
                 : l2e_empty();
         hap_unmap_domain_page(l2e);
         hap_unmap_domain_page(l3e);
@@ -536,7 +536,7 @@ void hap_teardown(struct domain *d)
                       d->arch.paging.hap.p2m_pages);
         ASSERT(d->arch.paging.hap.total_pages == 0);
     }
-    
+
     d->arch.paging.mode &= ~PG_log_dirty;
 
     hap_unlock(d);
@@ -555,7 +555,7 @@ int hap_domctl(struct domain *d, xen_dom
         hap_unlock(d);
         if ( preempted )
             /* Not finished.  Set up to re-run the call. */
-            rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h", 
+            rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h",
                                                u_domctl);
         else
             /* Finished.  Return the new allocation */
@@ -578,11 +578,11 @@ void hap_vcpu_init(struct vcpu *v)
 /************************************************/
 /*          HAP PAGING MODE FUNCTIONS           */
 /************************************************/
-/* 
+/*
  * HAP guests can handle page faults (in the guest page tables) without
  * needing any action from Xen, so we should not be intercepting them.
  */
-static int hap_page_fault(struct vcpu *v, unsigned long va, 
+static int hap_page_fault(struct vcpu *v, unsigned long va,
                           struct cpu_user_regs *regs)
 {
     HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
@@ -591,9 +591,9 @@ static int hap_page_fault(struct vcpu *v
     return 0;
 }
 
-/* 
+/*
  * HAP guests can handle invlpg without needing any action from Xen, so
- * should not be intercepting it. 
+ * should not be intercepting it.
  */
 static int hap_invlpg(struct vcpu *v, unsigned long va)
 {
@@ -649,7 +649,7 @@ static void hap_update_paging_modes(stru
 }
 
 #if CONFIG_PAGING_LEVELS == 3
-static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e) 
+static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e)
 /* Special case, only used for PAE hosts: update the mapping of the p2m
  * table.  This is trivial in other paging modes (one top-level entry
  * points to the top-level p2m, no maintenance needed), but PAE makes
@@ -660,13 +660,13 @@ static void p2m_install_entry_in_monitor
     l2_pgentry_t *ml2e;
     struct vcpu *v;
     unsigned int index;
-    
+
     index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
     ASSERT(index < MACHPHYS_MBYTES>>1);
-    
+
     for_each_vcpu ( d, v )
     {
-        if ( pagetable_get_pfn(v->arch.monitor_table) == 0 ) 
+        if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
             continue;
 
         ASSERT(paging_mode_external(v->domain));
@@ -689,7 +689,7 @@ static void p2m_install_entry_in_monitor
 }
 #endif
 
-static void 
+static void
 hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
                     mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
 {
@@ -698,12 +698,12 @@ hap_write_p2m_entry(struct vcpu *v, unsi
     safe_write_pte(p, new);
 #if CONFIG_PAGING_LEVELS == 3
     /* install P2M in monitor table for PAE Xen */
-    if ( level == 3 ) 
+    if ( level == 3 )
         /* We have written to the p2m l3: need to sync the per-vcpu
          * copies of it in the monitor tables */
         p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
 #endif
-    
+
     hap_unlock(v->domain);
 }
 
@@ -715,7 +715,7 @@ static unsigned long hap_gva_to_gfn_real
 
 /* Entry points into this mode of the hap code. */
 struct paging_mode hap_paging_real_mode = {
-    .page_fault             = hap_page_fault, 
+    .page_fault             = hap_page_fault,
     .invlpg                 = hap_invlpg,
     .gva_to_gfn             = hap_gva_to_gfn_real_mode,
     .update_cr3             = hap_update_cr3,
@@ -725,7 +725,7 @@ struct paging_mode hap_paging_real_mode 
 };
 
 struct paging_mode hap_paging_protected_mode = {
-    .page_fault             = hap_page_fault, 
+    .page_fault             = hap_page_fault,
     .invlpg                 = hap_invlpg,
     .gva_to_gfn             = hap_gva_to_gfn_2level,
     .update_cr3             = hap_update_cr3,
@@ -735,7 +735,7 @@ struct paging_mode hap_paging_protected_
 };
 
 struct paging_mode hap_paging_pae_mode = {
-    .page_fault             = hap_page_fault, 
+    .page_fault             = hap_page_fault,
     .invlpg                 = hap_invlpg,
     .gva_to_gfn             = hap_gva_to_gfn_3level,
     .update_cr3             = hap_update_cr3,
@@ -745,7 +745,7 @@ struct paging_mode hap_paging_pae_mode =
 };
 
 struct paging_mode hap_paging_long_mode = {
-    .page_fault             = hap_page_fault, 
+    .page_fault             = hap_page_fault,
     .invlpg                 = hap_invlpg,
     .gva_to_gfn             = hap_gva_to_gfn_4level,
     .update_cr3             = hap_update_cr3,
diff -r ff2dae3ebb1d -r 7953164cebb6 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Aug 07 09:06:38 2007 +0100
+++ b/xen/arch/x86/mm/p2m.c     Tue Aug 07 09:07:29 2007 +0100
@@ -2,12 +2,12 @@
  * arch/x86/mm/p2m.c
  *
  * physical-to-machine mappings for automatically-translated domains.
- * 
+ *
  * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -34,7 +34,7 @@
 
 /*
  * The P2M lock.  This protects all updates to the p2m table.
- * Updates are expected to be safe against concurrent reads, 
+ * Updates are expected to be safe against concurrent reads,
  * which do *not* require the lock.
  *
  * Locking discipline: always acquire this lock before the shadow or HAP one
@@ -80,7 +80,7 @@
 #define P2M_DEBUG(_f, _a...)                                 \
     debugtrace_printk("p2mdebug: %s(): " _f, __func__, ##_a)
 #else
-#define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0) 
+#define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0)
 #endif
 
 
@@ -119,8 +119,8 @@ p2m_find_entry(void *table, unsigned lon
 // Returns 0 on error.
 //
 static int
-p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table, 
-               unsigned long *gfn_remainder, unsigned long gfn, u32 shift, 
+p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
+               unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
                u32 max, unsigned long type)
 {
     l1_pgentry_t *p2m_entry;
@@ -146,7 +146,7 @@ p2m_next_level(struct domain *d, mfn_t *
 
         switch ( type ) {
         case PGT_l3_page_table:
-            paging_write_p2m_entry(d, gfn, 
+            paging_write_p2m_entry(d, gfn,
                                    p2m_entry, *table_mfn, new_entry, 4);
             break;
         case PGT_l2_page_table:
@@ -154,11 +154,11 @@ p2m_next_level(struct domain *d, mfn_t *
             /* for PAE mode, PDPE only has PCD/PWT/P bits available */
             new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
 #endif
-            paging_write_p2m_entry(d, gfn, 
+            paging_write_p2m_entry(d, gfn,
                                    p2m_entry, *table_mfn, new_entry, 3);
             break;
         case PGT_l1_page_table:
-            paging_write_p2m_entry(d, gfn, 
+            paging_write_p2m_entry(d, gfn,
                                    p2m_entry, *table_mfn, new_entry, 2);
             break;
         default:
@@ -216,7 +216,7 @@ set_p2m_entry(struct domain *d, unsigned
     ASSERT(p2m_entry);
 
     /* Track the highest gfn for which we have ever had a valid mapping */
-    if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) ) 
+    if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) )
         d->arch.p2m.max_mapped_pfn = gfn;
 
     if ( mfn_valid(mfn) )
@@ -229,7 +229,7 @@ set_p2m_entry(struct domain *d, unsigned
 
     /* Success */
     rv = 1;
- 
+
  out:
     unmap_domain_page(table);
     return rv;
@@ -250,7 +250,7 @@ void p2m_init(struct domain *d)
 // controlled by CONFIG_PAGING_LEVELS).
 //
 // The alloc_page and free_page functions will be used to get memory to
-// build the p2m, and to release it again at the end of day. 
+// build the p2m, and to release it again at the end of day.
 //
 // Returns 0 for success or -errno.
 //
@@ -264,7 +264,7 @@ int p2m_alloc_table(struct domain *d,
     struct page_info *page, *p2m_top;
     unsigned int page_count = 0;
     unsigned long gfn;
-    
+
     p2m_lock(d);
 
     if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
@@ -288,7 +288,7 @@ int p2m_alloc_table(struct domain *d,
     list_add_tail(&p2m_top->list, &d->arch.p2m.pages);
 
     p2m_top->count_info = 1;
-    p2m_top->u.inuse.type_info = 
+    p2m_top->u.inuse.type_info =
 #if CONFIG_PAGING_LEVELS == 4
         PGT_l4_page_table
 #elif CONFIG_PAGING_LEVELS == 3
@@ -301,7 +301,7 @@ int p2m_alloc_table(struct domain *d,
     d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
 
     P2M_PRINTK("populating p2m table\n");
- 
+
     /* Initialise physmap tables for slot zero. Other code assumes this. */
     gfn = 0;
     mfn = _mfn(INVALID_MFN);
@@ -365,17 +365,17 @@ gfn_to_mfn_foreign(struct domain *d, uns
     paddr_t addr = ((paddr_t)gpfn) << PAGE_SHIFT;
     l2_pgentry_t *l2e;
     l1_pgentry_t *l1e;
-    
+
     ASSERT(paging_mode_translate(d));
     mfn = pagetable_get_mfn(d->arch.phys_table);
 
 
-    if ( gpfn > d->arch.p2m.max_mapped_pfn ) 
+    if ( gpfn > d->arch.p2m.max_mapped_pfn )
         /* This pfn is higher than the highest the p2m map currently holds */
         return _mfn(INVALID_MFN);
 
 #if CONFIG_PAGING_LEVELS >= 4
-    { 
+    {
         l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
         l4e += l4_table_offset(addr);
         if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
@@ -398,7 +398,7 @@ gfn_to_mfn_foreign(struct domain *d, uns
          * the bounds of the p2m. */
         l3e += (addr >> L3_PAGETABLE_SHIFT);
 #else
-        l3e += l3_table_offset(addr);        
+        l3e += l3_table_offset(addr);
 #endif
         if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
         {
@@ -443,18 +443,18 @@ static void audit_p2m(struct domain *d)
     mfn_t p2mfn;
     unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0;
     int test_linear;
-    
+
     if ( !paging_mode_translate(d) )
         return;
 
     //P2M_PRINTK("p2m audit starts\n");
 
-    test_linear = ( (d == current->domain) 
+    test_linear = ( (d == current->domain)
                     && !pagetable_is_null(current->arch.monitor_table) );
     if ( test_linear )
-        local_flush_tlb(); 
-
-    /* Audit part one: walk the domain's page allocation list, checking 
+        local_flush_tlb();
+
+    /* Audit part one: walk the domain's page allocation list, checking
      * the m2p entries. */
     for ( entry = d->page_list.next;
           entry != &d->page_list;
@@ -463,11 +463,11 @@ static void audit_p2m(struct domain *d)
         page = list_entry(entry, struct page_info, list);
         mfn = mfn_x(page_to_mfn(page));
 
-        // P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn); 
+        // P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn);
 
         od = page_get_owner(page);
 
-        if ( od != d ) 
+        if ( od != d )
         {
             P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n",
                        mfn, od, (od?od->domain_id:-1), d, d->domain_id);
@@ -475,19 +475,19 @@ static void audit_p2m(struct domain *d)
         }
 
         gfn = get_gpfn_from_mfn(mfn);
-        if ( gfn == INVALID_M2P_ENTRY ) 
+        if ( gfn == INVALID_M2P_ENTRY )
         {
             orphans_i++;
             //P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n",
-            //               mfn); 
+            //               mfn);
             continue;
         }
 
-        if ( gfn == 0x55555555 ) 
+        if ( gfn == 0x55555555 )
         {
             orphans_d++;
-            //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n", 
-            //               mfn); 
+            //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n",
+            //               mfn);
             continue;
         }
 
@@ -503,7 +503,7 @@ static void audit_p2m(struct domain *d)
                         : -1u));
             /* This m2p entry is stale: the domain has another frame in
              * this physical slot.  No great disaster, but for neatness,
-             * blow away the m2p entry. */ 
+             * blow away the m2p entry. */
             set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY, 
__PAGE_HYPERVISOR|_PAGE_USER);
         }
 
@@ -517,9 +517,9 @@ static void audit_p2m(struct domain *d)
             }
         }
 
-        // P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx, lp2mfn=%#lx\n", 
-        //                mfn, gfn, p2mfn, lp2mfn); 
-    }   
+        // P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx, lp2mfn=%#lx\n",
+        //                mfn, gfn, p2mfn, lp2mfn);
+    }
 
     /* Audit part two: walk the domain's p2m table, checking the entries. */
     if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
@@ -527,7 +527,7 @@ static void audit_p2m(struct domain *d)
         l2_pgentry_t *l2e;
         l1_pgentry_t *l1e;
         int i1, i2;
-        
+
 #if CONFIG_PAGING_LEVELS == 4
         l4_pgentry_t *l4e;
         l3_pgentry_t *l3e;
@@ -553,8 +553,8 @@ static void audit_p2m(struct domain *d)
             }
             l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
 #endif /* now at levels 3 or 4... */
-            for ( i3 = 0; 
-                  i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); 
+            for ( i3 = 0;
+                  i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
                   i3++ )
             {
                 if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
@@ -572,7 +572,7 @@ static void audit_p2m(struct domain *d)
                         continue;
                     }
                     l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
-                    
+
                     for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
                     {
                         if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
@@ -610,14 +610,14 @@ static void audit_p2m(struct domain *d)
     }
 
     //P2M_PRINTK("p2m audit complete\n");
-    //if ( orphans_i | orphans_d | mpbad | pmbad ) 
+    //if ( orphans_i | orphans_d | mpbad | pmbad )
     //    P2M_PRINTK("p2m audit found %lu orphans (%lu inval %lu debug)\n",
     //                   orphans_i + orphans_d, orphans_i, orphans_d,
-    if ( mpbad | pmbad ) 
+    if ( mpbad | pmbad )
         P2M_PRINTK("p2m audit found %lu odd p2m, %lu bad m2p entries\n",
                    pmbad, mpbad);
 }
-#else 
+#else
 #define audit_p2m(_d) do { (void)(_d); } while(0)
 #endif /* P2M_AUDIT */
 
@@ -645,7 +645,7 @@ guest_physmap_remove_page(struct domain 
     audit_p2m(d);
     p2m_remove_page(d, gfn, mfn);
     audit_p2m(d);
-    p2m_unlock(d);    
+    p2m_unlock(d);
 }
 
 void
@@ -683,11 +683,11 @@ guest_physmap_add_page(struct domain *d,
         /* This machine frame is already mapped at another physical address */
         P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
                   mfn, ogfn, gfn);
-        if ( mfn_valid(omfn = gfn_to_mfn(d, ogfn)) ) 
-        {
-            P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", 
+        if ( mfn_valid(omfn = gfn_to_mfn(d, ogfn)) )
+        {
+            P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
                       ogfn , mfn_x(omfn));
-            if ( mfn_x(omfn) == mfn ) 
+            if ( mfn_x(omfn) == mfn )
                 p2m_remove_page(d, ogfn, mfn);
         }
     }
@@ -720,15 +720,15 @@ void p2m_set_flags_global(struct domain 
     int i4;
 #endif /* CONFIG_PAGING_LEVELS == 4 */
 #endif /* CONFIG_PAGING_LEVELS >= 3 */
-    
+
     if ( !paging_mode_translate(d) )
         return;
- 
+
     if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
         return;
 
     p2m_lock(d);
-        
+
 #if CONFIG_PAGING_LEVELS == 4
     l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
 #elif CONFIG_PAGING_LEVELS == 3
@@ -739,52 +739,52 @@ void p2m_set_flags_global(struct domain 
 
 #if CONFIG_PAGING_LEVELS >= 3
 #if CONFIG_PAGING_LEVELS >= 4
-    for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ ) 
-    {
-       if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
-       {
-           continue;
-       }
-       l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
+    for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
+    {
+        if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
+        {
+            continue;
+        }
+        l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
 #endif /* now at levels 3 or 4... */
-       for ( i3 = 0; 
-             i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); 
-             i3++ )
-       {
-           if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
-           {
-               continue;
-           }
-           l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
+        for ( i3 = 0;
+              i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
+              i3++ )
+        {
+            if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
+            {
+                continue;
+            }
+            l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
 #endif /* all levels... */
-           for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
-           {
-               if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
-               {
-                   continue;
-               }
+            for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
+            {
+                if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
+                {
+                    continue;
+                }
 
                 l1mfn = _mfn(l2e_get_pfn(l2e[i2]));
-               l1e = map_domain_page(mfn_x(l1mfn));
-               
-               for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
-               {
-                   if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
-                       continue;
-                   mfn = l1e_get_pfn(l1e[i1]);
-                   gfn = get_gpfn_from_mfn(mfn);
-                   /* create a new 1le entry using l1e_flags */
-                   l1e_content = l1e_from_pfn(mfn, l1e_flags);
-                   paging_write_p2m_entry(d, gfn, &l1e[i1], 
+                l1e = map_domain_page(mfn_x(l1mfn));
+
+                for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
+                {
+                    if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
+                        continue;
+                    mfn = l1e_get_pfn(l1e[i1]);
+                    gfn = get_gpfn_from_mfn(mfn);
+                    /* create a new 1le entry using l1e_flags */
+                    l1e_content = l1e_from_pfn(mfn, l1e_flags);
+                    paging_write_p2m_entry(d, gfn, &l1e[i1],
                                            l1mfn, l1e_content, 1);
-               }
-               unmap_domain_page(l1e);
-           }
+                }
+                unmap_domain_page(l1e);
+            }
 #if CONFIG_PAGING_LEVELS >= 3
-           unmap_domain_page(l2e);
-       }
+            unmap_domain_page(l2e);
+        }
 #if CONFIG_PAGING_LEVELS >= 4
-       unmap_domain_page(l3e);
+        unmap_domain_page(l3e);
     }
 #endif
 #endif
@@ -814,7 +814,7 @@ int p2m_set_flags(struct domain *d, padd
     mfn = gfn_to_mfn(d, gfn);
     if ( mfn_valid(mfn) )
         set_p2m_entry(d, gfn, mfn, l1e_flags);
-    
+
     p2m_unlock(d);
 
     return 1;
diff -r ff2dae3ebb1d -r 7953164cebb6 xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Tue Aug 07 09:06:38 2007 +0100
+++ b/xen/arch/x86/mm/paging.c  Tue Aug 07 09:07:29 2007 +0100
@@ -54,10 +54,10 @@ boolean_param("hap", opt_hap_enabled);
 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
 
 /* The log-dirty lock.  This protects the log-dirty bitmap from
- * concurrent accesses (and teardowns, etc). 
- * 
+ * concurrent accesses (and teardowns, etc).
+ *
  * Locking discipline: always acquire shadow or HAP lock before this one.
- * 
+ *
  * Because mark_dirty is called from a lot of places, the log-dirty lock
  * may be acquired with the shadow or HAP locks already held.  When the
  * log-dirty code makes callbacks into HAP or shadow code to reset
@@ -105,7 +105,7 @@ int paging_alloc_log_dirty_bitmap(struct
 
     d->arch.paging.log_dirty.bitmap_size =
         (domain_get_maximum_gpfn(d) + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
-    d->arch.paging.log_dirty.bitmap = 
+    d->arch.paging.log_dirty.bitmap =
         xmalloc_array(unsigned long,
                       d->arch.paging.log_dirty.bitmap_size / BITS_PER_LONG);
     if ( d->arch.paging.log_dirty.bitmap == NULL )
@@ -152,8 +152,8 @@ int paging_log_dirty_enable(struct domai
 
     log_dirty_unlock(d);
 
-    /* Safe because the domain is paused. */    
-    ret = d->arch.paging.log_dirty.enable_log_dirty(d);    
+    /* Safe because the domain is paused. */
+    ret = d->arch.paging.log_dirty.enable_log_dirty(d);
 
     /* Possibility of leaving the bitmap allocated here but it'll be
      * tidied on domain teardown. */
@@ -202,7 +202,7 @@ void paging_mark_dirty(struct domain *d,
     pfn = get_gpfn_from_mfn(mfn_x(gmfn));
 
     /*
-     * Values with the MSB set denote MFNs that aren't really part of the 
+     * Values with the MSB set denote MFNs that aren't really part of the
      * domain's pseudo-physical memory map (e.g., the shared info frame).
      * Nothing to do here...
      */
@@ -212,11 +212,11 @@ void paging_mark_dirty(struct domain *d,
         return;
     }
 
-    if ( likely(pfn < d->arch.paging.log_dirty.bitmap_size) ) 
-    { 
+    if ( likely(pfn < d->arch.paging.log_dirty.bitmap_size) )
+    {
         if ( !__test_and_set_bit(pfn, d->arch.paging.log_dirty.bitmap) )
         {
-            PAGING_DEBUG(LOGDIRTY, 
+            PAGING_DEBUG(LOGDIRTY,
                          "marked mfn %" PRI_mfn " (pfn=%lx), dom %d\n",
                          mfn_x(gmfn), pfn, d->domain_id);
             d->arch.paging.log_dirty.dirty_count++;
@@ -227,21 +227,21 @@ void paging_mark_dirty(struct domain *d,
         PAGING_PRINTK("mark_dirty OOR! "
                       "mfn=%" PRI_mfn " pfn=%lx max=%x (dom %d)\n"
                       "owner=%d c=%08x t=%" PRtype_info "\n",
-                      mfn_x(gmfn), 
-                      pfn, 
+                      mfn_x(gmfn),
+                      pfn,
                       d->arch.paging.log_dirty.bitmap_size,
                       d->domain_id,
                       (page_get_owner(mfn_to_page(gmfn))
                        ? page_get_owner(mfn_to_page(gmfn))->domain_id
                        : -1),
-                      mfn_to_page(gmfn)->count_info, 
+                      mfn_to_page(gmfn)->count_info,
                       mfn_to_page(gmfn)->u.inuse.type_info);
     }
-    
-    log_dirty_unlock(d);
-}
-
-/* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN, 
+
+    log_dirty_unlock(d);
+}
+
+/* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN,
  * clear the bitmap and stats as well. */
 int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc)
 {
@@ -252,15 +252,15 @@ int paging_log_dirty_op(struct domain *d
 
     clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
 
-    PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", 
+    PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n",
                  (clean) ? "clean" : "peek",
                  d->domain_id,
-                 d->arch.paging.log_dirty.fault_count, 
+                 d->arch.paging.log_dirty.fault_count,
                  d->arch.paging.log_dirty.dirty_count);
 
     sc->stats.fault_count = d->arch.paging.log_dirty.fault_count;
     sc->stats.dirty_count = d->arch.paging.log_dirty.dirty_count;
-    
+
     if ( clean )
     {
         d->arch.paging.log_dirty.fault_count = 0;
@@ -276,7 +276,7 @@ int paging_log_dirty_op(struct domain *d
         rv = -EINVAL; /* perhaps should be ENOMEM? */
         goto out;
     }
- 
+
     if ( sc->pages > d->arch.paging.log_dirty.bitmap_size )
         sc->pages = d->arch.paging.log_dirty.bitmap_size;
 
@@ -322,11 +322,11 @@ int paging_log_dirty_op(struct domain *d
 
 
 /* Note that this function takes three function pointers. Callers must supply
- * these functions for log dirty code to call. This function usually is 
- * invoked when paging is enabled. Check shadow_enable() and hap_enable() for 
+ * these functions for log dirty code to call. This function usually is
+ * invoked when paging is enabled. Check shadow_enable() and hap_enable() for
  * reference.
  *
- * These function pointers must not be followed with the log-dirty lock held. 
+ * These function pointers must not be followed with the log-dirty lock held.
  */
 void paging_log_dirty_init(struct domain *d,
                            int    (*enable_log_dirty)(struct domain *d),
@@ -335,7 +335,7 @@ void paging_log_dirty_init(struct domain
 {
     /* We initialize log dirty lock first */
     log_dirty_lock_init(d);
-    
+
     d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
     d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
     d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
@@ -387,7 +387,7 @@ int paging_domctl(struct domain *d, xen_
                  d->domain_id);
         return -EINVAL;
     }
-    
+
     if ( unlikely(d->is_dying) )
     {
         gdprintk(XENLOG_INFO, "Ignoring paging op on dying domain %u\n",
@@ -401,38 +401,38 @@ int paging_domctl(struct domain *d, xen_
                      d->domain_id);
         return -EINVAL;
     }
-    
+
     /* Code to handle log-dirty. Note that some log dirty operations
-     * piggy-back on shadow operations. For example, when 
+     * piggy-back on shadow operations. For example, when
      * XEN_DOMCTL_SHADOW_OP_OFF is called, it first checks whether log dirty
-     * mode is enabled. If does, we disables log dirty and continues with 
-     * shadow code. For this reason, we need to further dispatch domctl 
+     * mode is enabled. If does, we disables log dirty and continues with
+     * shadow code. For this reason, we need to further dispatch domctl
      * to next-level paging code (shadow or hap).
      */
     switch ( sc->op )
     {
     case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
-        return paging_log_dirty_enable(d);     
-       
-    case XEN_DOMCTL_SHADOW_OP_ENABLE:  
+        return paging_log_dirty_enable(d);
+
+    case XEN_DOMCTL_SHADOW_OP_ENABLE:
         if ( sc->mode & XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY )
             return paging_log_dirty_enable(d);
 
     case XEN_DOMCTL_SHADOW_OP_OFF:
         if ( paging_mode_log_dirty(d) )
-            if ( (rc = paging_log_dirty_disable(d)) != 0 ) 
+            if ( (rc = paging_log_dirty_disable(d)) != 0 )
                 return rc;
 
     case XEN_DOMCTL_SHADOW_OP_CLEAN:
     case XEN_DOMCTL_SHADOW_OP_PEEK:
-       return paging_log_dirty_op(d, sc);
-    }
-       
+        return paging_log_dirty_op(d, sc);
+    }
+
     /* Here, dispatch domctl to the appropriate paging code */
     if ( opt_hap_enabled && is_hvm_domain(d) )
-       return hap_domctl(d, sc, u_domctl);
-    else
-       return shadow_domctl(d, sc, u_domctl);
+        return hap_domctl(d, sc, u_domctl);
+    else
+        return shadow_domctl(d, sc, u_domctl);
 }
 
 /* Call when destroying a domain */
@@ -492,7 +492,7 @@ void paging_dump_vcpu_info(struct vcpu *
 {
     if ( paging_mode_enabled(v->domain) )
     {
-        printk("    paging assistance: ");        
+        printk("    paging assistance: ");
         if ( paging_mode_shadow(v->domain) )
         {
             if ( v->arch.paging.mode )
@@ -504,7 +504,7 @@ void paging_dump_vcpu_info(struct vcpu *
                 printk("not shadowed\n");
         }
         else if ( paging_mode_hap(v->domain) && v->arch.paging.mode )
-            printk("hap, %u levels\n", 
+            printk("hap, %u levels\n",
                    v->arch.paging.mode->guest_levels);
         else
             printk("none\n");
diff -r ff2dae3ebb1d -r 7953164cebb6 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Tue Aug 07 09:06:38 2007 +0100
+++ b/xen/include/asm-x86/domain.h      Tue Aug 07 09:07:29 2007 +0100
@@ -77,10 +77,10 @@ struct shadow_domain {
     int               locker; /* processor which holds the lock */
     const char       *locker_function; /* Func that took it */
     unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
-    struct list_head  pinned_shadows; 
+    struct list_head  pinned_shadows;
 
     /* Memory allocation */
-    struct list_head  freelists[SHADOW_MAX_ORDER + 1]; 
+    struct list_head  freelists[SHADOW_MAX_ORDER + 1];
     struct list_head  p2m_freelist;
     unsigned int      total_pages;  /* number of pages allocated */
     unsigned int      free_pages;   /* number of pages on freelists */
@@ -116,7 +116,7 @@ struct hap_domain {
     spinlock_t        lock;
     int               locker;
     const char       *locker_function;
-    
+
     struct list_head  freelist;
     unsigned int      total_pages;  /* number of pages allocated */
     unsigned int      free_pages;   /* number of pages on freelists */
@@ -131,13 +131,13 @@ struct p2m_domain {
     spinlock_t         lock;
     int                locker;   /* processor which holds the lock */
     const char        *locker_function; /* Func that took it */
-    
+
     /* Pages used to construct the p2m */
     struct list_head   pages;
 
     /* Functions to call to get or free pages for the p2m */
     struct page_info * (*alloc_page  )(struct domain *d);
-    void               (*free_page   )(struct domain *d, 
+    void               (*free_page   )(struct domain *d,
                                        struct page_info *pg);
 
     /* Highest guest frame that's ever been mapped in the p2m */
@@ -177,6 +177,7 @@ struct paging_domain {
     /* log dirty support */
     struct log_dirty_domain log_dirty;
 };
+
 struct paging_vcpu {
     /* Pointers to mode-specific entry points. */
     struct paging_mode *mode;
@@ -184,9 +185,9 @@ struct paging_vcpu {
     unsigned int translate_enabled:1;
     /* HVM guest: last emulate was to a pagetable */
     unsigned int last_write_was_pt:1;
-    /* Translated guest: virtual TLB */    
+    /* Translated guest: virtual TLB */
     struct shadow_vtlb *vtlb;
-    spinlock_t          vtlb_lock; 
+    spinlock_t          vtlb_lock;
 
     /* paging support extension */
     struct shadow_vcpu shadow;
@@ -303,7 +304,7 @@ struct arch_vcpu
      * shadow refcounts are in use */
     pagetable_t shadow_table[4];        /* (MFN) shadow(s) of guest */
     pagetable_t monitor_table;          /* (MFN) hypervisor PT (for HVM) */
-    unsigned long cr3;                     /* (MA) value to install in HW CR3 
*/
+    unsigned long cr3;                  /* (MA) value to install in HW CR3 */
 
     /* Current LDT details. */
     unsigned long shadow_ldt_mapcnt;
diff -r ff2dae3ebb1d -r 7953164cebb6 xen/include/asm-x86/hap.h
--- a/xen/include/asm-x86/hap.h Tue Aug 07 09:06:38 2007 +0100
+++ b/xen/include/asm-x86/hap.h Tue Aug 07 09:07:29 2007 +0100
@@ -3,7 +3,7 @@
  *
  * hardware-assisted paging
  * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
- * 
+ *
  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.