[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Unify access to mpt using macros.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 551870a55f240791695d30fd7fa92a1bf4e48387
# Parent  1b9f23175fa8f37814a4949aadac1d680004a925
Unify access to mpt using macros.
Also some code cleanup to x86_64 fault.c

Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>

diff -r 1b9f23175fa8 -r 551870a55f24 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c   Tue Aug 30 17:09:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c   Tue Aug 30 17:53:49 2005
@@ -149,7 +149,7 @@
        pmd_t *pmd;
        pte_t *pte;
 
-        pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
+       pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
        pgd += pgd_index(address);
 
        printk("PGD %lx ", pgd_val(*pgd));
@@ -296,9 +296,9 @@
 #define MEM_VERBOSE 1
 
 #ifdef MEM_VERBOSE
-#define MEM_LOG(_f, _a...)                           \
-  printk("fault.c:[%d]-> " _f "\n", \
-          __LINE__ , ## _a )
+#define MEM_LOG(_f, _a...)                     \
+       printk("fault.c:[%d]-> " _f "\n",       \
+       __LINE__ , ## _a )
 #else
 #define MEM_LOG(_f, _a...) ((void)0)
 #endif
@@ -325,7 +325,7 @@
        siginfo_t info;
 
        if (!user_mode(regs))
-                error_code &= ~4; /* means kernel */
+               error_code &= ~4; /* means kernel */
 
 #ifdef CONFIG_CHECKING
        { 
diff -r 1b9f23175fa8 -r 551870a55f24 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Aug 30 17:09:43 2005
+++ b/xen/arch/x86/domain.c     Tue Aug 30 17:53:49 2005
@@ -255,13 +255,13 @@
     v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
     v->cpumap = CPUMAP_RUNANYWHERE;
     SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
-    machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
-                           PAGE_SHIFT] = INVALID_M2P_ENTRY;
+    set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT,
+            INVALID_M2P_ENTRY);
     
     d->arch.mm_perdomain_pt = alloc_xenheap_page();
     memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
-    machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> 
-                           PAGE_SHIFT] = INVALID_M2P_ENTRY;
+    set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT,
+            INVALID_M2P_ENTRY);
     v->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
     v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
         l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
diff -r 1b9f23175fa8 -r 551870a55f24 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Tue Aug 30 17:09:43 2005
+++ b/xen/arch/x86/domain_build.c       Tue Aug 30 17:53:49 2005
@@ -592,8 +592,7 @@
     if ( opt_dom0_translate )
     {
         si->shared_info  = d->next_io_page << PAGE_SHIFT;
-        set_machinetophys(virt_to_phys(d->shared_info) >> PAGE_SHIFT,
-                          d->next_io_page);
+        set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, 
d->next_io_page);
         d->next_io_page++;
     }
     else
@@ -614,7 +613,7 @@
             mfn = alloc_epfn - (pfn - REVERSE_START);
 #endif
         ((u32 *)vphysmap_start)[pfn] = mfn;
-        machine_to_phys_mapping[mfn] = pfn;
+        set_pfn_from_mfn(mfn, pfn);
     }
     while ( pfn < nr_pages )
     {
@@ -627,7 +626,7 @@
 #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
 #endif
             ((u32 *)vphysmap_start)[pfn] = mfn;
-            machine_to_phys_mapping[mfn] = pfn;
+            set_pfn_from_mfn(mfn, pfn);
 #undef pfn
             page++; pfn++;
         }
diff -r 1b9f23175fa8 -r 551870a55f24 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Aug 30 17:09:43 2005
+++ b/xen/arch/x86/mm.c Tue Aug 30 17:53:49 2005
@@ -1452,7 +1452,7 @@
                                 "!= exp %" PRtype_info ") "
                                 "for mfn %lx (pfn %x)",
                                 x, type, page_to_pfn(page),
-                                machine_to_phys_mapping[page_to_pfn(page)]);
+                                get_pfn_from_mfn(page_to_pfn(page)));
                     return 0;
                 }
                 else if ( (x & PGT_va_mask) == PGT_va_mutable )
@@ -2206,7 +2206,7 @@
                 printk("privileged guest dom%d requests pfn=%lx to "
                        "map mfn=%lx for dom%d\n",
                        d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id);
-                set_machinetophys(mfn, gpfn);
+                set_pfn_from_mfn(mfn, gpfn);
                 set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache);
                 okay = 1;
                 shadow_unlock(FOREIGNDOM);
@@ -2225,7 +2225,7 @@
                 break;
             }
 
-            set_machinetophys(mfn, gpfn);
+            set_pfn_from_mfn(mfn, gpfn);
             okay = 1;
 
             /*
diff -r 1b9f23175fa8 -r 551870a55f24 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Tue Aug 30 17:09:43 2005
+++ b/xen/arch/x86/shadow32.c   Tue Aug 30 17:53:49 2005
@@ -827,7 +827,7 @@
     {
         page = list_entry(list_ent, struct pfn_info, list);
         mfn = page_to_pfn(page);
-        pfn = machine_to_phys_mapping[mfn];
+        pfn = get_pfn_from_mfn(mfn);
         ASSERT(pfn != INVALID_M2P_ENTRY);
         ASSERT(pfn < (1u<<20));
 
@@ -841,7 +841,7 @@
     {
         page = list_entry(list_ent, struct pfn_info, list);
         mfn = page_to_pfn(page);
-        pfn = machine_to_phys_mapping[mfn];
+        pfn = get_pfn_from_mfn(mfn);
         if ( (pfn != INVALID_M2P_ENTRY) &&
              (pfn < (1u<<20)) )
         {
diff -r 1b9f23175fa8 -r 551870a55f24 xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c      Tue Aug 30 17:09:43 2005
+++ b/xen/arch/x86/shadow_public.c      Tue Aug 30 17:53:49 2005
@@ -1311,7 +1311,7 @@
     {
         page = list_entry(list_ent, struct pfn_info, list);
         mfn = page_to_pfn(page);
-        pfn = machine_to_phys_mapping[mfn];
+        pfn = get_pfn_from_mfn(mfn);
         ASSERT(pfn != INVALID_M2P_ENTRY);
         ASSERT(pfn < (1u<<20));
 
@@ -1325,7 +1325,7 @@
     {
         page = list_entry(list_ent, struct pfn_info, list);
         mfn = page_to_pfn(page);
-        pfn = machine_to_phys_mapping[mfn];
+        pfn = get_pfn_from_mfn(mfn);
         if ( (pfn != INVALID_M2P_ENTRY) &&
              (pfn < (1u<<20)) )
         {
diff -r 1b9f23175fa8 -r 551870a55f24 xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Tue Aug 30 17:09:43 2005
+++ b/xen/arch/x86/vmx.c        Tue Aug 30 17:53:49 2005
@@ -694,7 +694,7 @@
         return 0;
     }
 
-    mfn = phys_to_machine_mapping(laddr >> PAGE_SHIFT);
+    mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT);
     addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK);
 
     if (dir == COPY_IN)
@@ -795,7 +795,7 @@
         * removed some translation or changed page attributes.
         * We simply invalidate the shadow.
         */
-       mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
+       mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
        if (mfn != pagetable_get_pfn(d->arch.guest_table)) {
            printk("Invalid CR3 value=%x", c->cr3);
            domain_crash_synchronous();
@@ -813,7 +813,7 @@
            domain_crash_synchronous(); 
            return 0;
        }
-       mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
+       mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
        d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
        update_pagetables(d);
        /* 
@@ -968,7 +968,7 @@
         /*
          * The guest CR3 must be pointing to the guest physical.
          */
-        if ( !VALID_MFN(mfn = phys_to_machine_mapping(
+        if ( !VALID_MFN(mfn = get_mfn_from_pfn(
                             d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
              !get_page(pfn_to_page(mfn), d->domain) )
         {
@@ -1164,7 +1164,7 @@
              * removed some translation or changed page attributes.
              * We simply invalidate the shadow.
              */
-            mfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
+            mfn = get_mfn_from_pfn(value >> PAGE_SHIFT);
             if (mfn != pagetable_get_pfn(d->arch.guest_table))
                 __vmx_bug(regs);
             shadow_sync_all(d->domain);
@@ -1175,7 +1175,7 @@
              */
             VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
             if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) ||
-                 !VALID_MFN(mfn = phys_to_machine_mapping(value >> 
PAGE_SHIFT)) ||
+                 !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
                  !get_page(pfn_to_page(mfn), d->domain) )
             {
                 printk("Invalid CR3 value=%lx", value);
diff -r 1b9f23175fa8 -r 551870a55f24 xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       Tue Aug 30 17:09:43 2005
+++ b/xen/arch/x86/vmx_platform.c       Tue Aug 30 17:53:49 2005
@@ -521,7 +521,7 @@
     if ( vmx_paging_enabled(current) )
     {
         gpa = gva_to_gpa(guest_eip);
-        mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT);
+        mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
 
         /* Does this cross a page boundary ? */
         if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) )
@@ -532,7 +532,7 @@
     }
     else
     {
-        mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT);
+        mfn = get_mfn_from_pfn(guest_eip >> PAGE_SHIFT);
     }
 
     inst_start = map_domain_page(mfn);
@@ -542,7 +542,7 @@
     if ( remaining )
     {
         gpa = gva_to_gpa(guest_eip+inst_len+remaining);
-        mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT);
+        mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT);
 
         inst_start = map_domain_page(mfn);
         memcpy((char *)buf+inst_len, inst_start, remaining);
diff -r 1b9f23175fa8 -r 551870a55f24 xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   Tue Aug 30 17:09:43 2005
+++ b/xen/arch/x86/vmx_vmcs.c   Tue Aug 30 17:53:49 2005
@@ -148,7 +148,7 @@
     offset = (addr & ~PAGE_MASK);
     addr = round_pgdown(addr);
 
-    mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
+    mpfn = get_mfn_from_pfn(addr >> PAGE_SHIFT);
     p = map_domain_page(mpfn);
 
     e820p = (struct e820entry *) ((unsigned long) p + offset); 
@@ -175,7 +175,7 @@
     unmap_domain_page(p);        
 
     /* Initialise shared page */
-    mpfn = phys_to_machine_mapping(gpfn);
+    mpfn = get_mfn_from_pfn(gpfn);
     p = map_domain_page(mpfn);
     d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p;
 
diff -r 1b9f23175fa8 -r 551870a55f24 xen/common/grant_table.c
--- a/xen/common/grant_table.c  Tue Aug 30 17:09:43 2005
+++ b/xen/common/grant_table.c  Tue Aug 30 17:53:49 2005
@@ -1211,13 +1211,13 @@
         DPRINTK("Bad pfn (%lx)\n", pfn);
     else
     {
-        machine_to_phys_mapping[frame] = pfn;
+        set_pfn_from_mfn(frame, pfn);
 
         if ( unlikely(shadow_mode_log_dirty(ld)))
              mark_dirty(ld, frame);
 
         if (shadow_mode_translate(ld))
-            __phys_to_machine_mapping[pfn] = frame;
+            set_mfn_from_pfn(pfn, frame);
     }
     sha->frame = __mfn_to_gpfn(rd, frame);
     sha->domid = rd->domain_id;
@@ -1268,8 +1268,7 @@
     {
         SHARE_PFN_WITH_DOMAIN(
             virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d);
-        machine_to_phys_mapping[(virt_to_phys(t->shared) >> PAGE_SHIFT) + i] =
-            INVALID_M2P_ENTRY;
+        set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, 
INVALID_M2P_ENTRY);
     }
 
     /* Okay, install the structure. */
diff -r 1b9f23175fa8 -r 551870a55f24 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Tue Aug 30 17:09:43 2005
+++ b/xen/include/asm-ia64/mm.h Tue Aug 30 17:53:49 2005
@@ -405,7 +405,7 @@
 /* If pmt table is provided by control pannel later, we need __get_user
 * here. However if it's allocated by HV, we should access it directly
 */
-#define phys_to_machine_mapping(d, gpfn)                       \
+#define get_mfn_from_pfn(d, gpfn)                      \
     ((d) == dom0 ? gpfn :                                      \
        (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] :      \
                INVALID_MFN))
@@ -414,7 +414,7 @@
     machine_to_phys_mapping[(mfn)]
 
 #define __gpfn_to_mfn(_d, gpfn)                        \
-    phys_to_machine_mapping((_d), (gpfn))
+    get_mfn_from_pfn((_d), (gpfn))
 
 #define __gpfn_invalid(_d, gpfn)                       \
        (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
diff -r 1b9f23175fa8 -r 551870a55f24 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Tue Aug 30 17:09:43 2005
+++ b/xen/include/asm-x86/mm.h  Tue Aug 30 17:53:49 2005
@@ -255,10 +255,13 @@
  * contiguous (or near contiguous) physical memory.
  */
 #undef  machine_to_phys_mapping
-#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START)
+#define machine_to_phys_mapping  ((u32 *)RDWR_MPT_VIRT_START)
 #define INVALID_M2P_ENTRY        (~0U)
 #define VALID_M2P(_e)            (!((_e) & (1U<<31)))
 #define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
+
+#define set_pfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
+#define get_pfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
 
 /*
  * The phys_to_machine_mapping is the reversed mapping of MPT for full
@@ -266,17 +269,17 @@
  * guests, so we steal the address space that would have normally
  * been used by the read-only MPT map.
  */
-#define __phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START)
-#define INVALID_MFN               (~0UL)
-#define VALID_MFN(_mfn)           (!((_mfn) & (1U<<31)))
-
-/* Returns the machine physical */
-static inline unsigned long phys_to_machine_mapping(unsigned long pfn) 
+#define phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START)
+#define INVALID_MFN             (~0UL)
+#define VALID_MFN(_mfn)         (!((_mfn) & (1U<<31)))
+
+#define set_mfn_from_pfn(pfn, mfn) (phys_to_machine_mapping[(pfn)] = (mfn))
+static inline unsigned long get_mfn_from_pfn(unsigned long pfn) 
 {
     unsigned long mfn;
     l1_pgentry_t pte;
 
-    if ( (__copy_from_user(&pte, &__phys_to_machine_mapping[pfn],
+    if ( (__copy_from_user(&pte, &phys_to_machine_mapping[pfn],
                            sizeof(pte)) == 0) &&
          (l1e_get_flags(pte) & _PAGE_PRESENT) )
        mfn = l1e_get_pfn(pte);
@@ -285,7 +288,6 @@
     
     return mfn; 
 }
-#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
 
 #ifdef MEMORY_GUARD
 void memguard_init(void);
diff -r 1b9f23175fa8 -r 551870a55f24 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Tue Aug 30 17:09:43 2005
+++ b/xen/include/asm-x86/shadow.h      Tue Aug 30 17:53:49 2005
@@ -269,14 +269,14 @@
 
 #define __mfn_to_gpfn(_d, mfn)                         \
     ( (shadow_mode_translate(_d))                      \
-      ? machine_to_phys_mapping[(mfn)]                 \
+      ? get_pfn_from_mfn(mfn)                                   \
       : (mfn) )
 
 #define __gpfn_to_mfn(_d, gpfn)                        \
     ({                                                 \
         ASSERT(current->domain == (_d));               \
         (shadow_mode_translate(_d))                    \
-        ? phys_to_machine_mapping(gpfn)                \
+        ? get_mfn_from_pfn(gpfn)                \
         : (gpfn);                                      \
     })
 
@@ -461,7 +461,7 @@
     // This wants the nice compact set of PFNs from 0..domain's max,
     // which __mfn_to_gpfn() only returns for translated domains.
     //
-    pfn = machine_to_phys_mapping[mfn];
+    pfn = get_pfn_from_mfn(mfn);
 
     /*
      * Values with the MSB set denote MFNs that aren't really part of the 
@@ -562,7 +562,7 @@
     old_hl2e = v->arch.hl2_vtable[index];
 
     if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) &&
-         VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) )
+         VALID_MFN(mfn = get_mfn_from_pfn(l2e_get_pfn(gl2e))) )
         new_hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
     else
         new_hl2e = l1e_empty();
diff -r 1b9f23175fa8 -r 551870a55f24 xen/include/asm-x86/shadow_64.h
--- a/xen/include/asm-x86/shadow_64.h   Tue Aug 30 17:09:43 2005
+++ b/xen/include/asm-x86/shadow_64.h   Tue Aug 30 17:53:49 2005
@@ -138,7 +138,7 @@
             return NULL;
         mfn = entry_get_value(*le_e) >> PAGE_SHIFT;
         if ((flag & GUEST_ENTRY) && shadow_mode_translate(d))
-            mfn = phys_to_machine_mapping(mfn);
+            mfn = get_mfn_from_pfn(mfn);
         le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT);
         index = table_offset_64(va, (level + i - 1));
         le_e = &le_p[index];
@@ -257,7 +257,7 @@
                 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
                     return NULL;
 
-                l1mfn = phys_to_machine_mapping(
+                l1mfn = get_mfn_from_pfn(
                   l2e_get_pfn(gl2e));
 
                 l1va = (l1_pgentry_32_t *)
@@ -299,7 +299,7 @@
                     return NULL;
 
 
-                l1mfn = phys_to_machine_mapping(
+                l1mfn = get_mfn_from_pfn(
                   l2e_get_pfn(gl2e));
                 l1va = (l1_pgentry_32_t *) phys_to_virt(
                   l1mfn << L1_PAGETABLE_SHIFT);
diff -r 1b9f23175fa8 -r 551870a55f24 xen/include/asm-x86/vmx_platform.h
--- a/xen/include/asm-x86/vmx_platform.h        Tue Aug 30 17:09:43 2005
+++ b/xen/include/asm-x86/vmx_platform.h        Tue Aug 30 17:53:49 2005
@@ -91,6 +91,6 @@
 extern void vmx_io_assist(struct vcpu *v);
 
 // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO 
frame.
-#define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> 
PAGE_SHIFT)))
+#define mmio_space(gpa) (!VALID_MFN(get_mfn_from_pfn((gpa) >> PAGE_SHIFT)))
 
 #endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.