[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH,RFC 7/17] 32-on-64 phys2machine handling



Index: 2006-10-04/xen/arch/x86/domain.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/domain.c       2006-10-04 15:11:03.000000000 
+0200
+++ 2006-10-04/xen/arch/x86/domain.c    2006-10-04 15:18:36.000000000 +0200
@@ -143,11 +143,34 @@ struct vcpu *alloc_vcpu_struct(struct do
 
     pae_l3_cache_init(&v->arch.pae_l3_cache);
 
+#ifdef CONFIG_COMPAT
+    if ( IS_COMPAT(d) )
+    {
+        struct page_info *pg = alloc_domheap_page(NULL);
+        l4_pgentry_t *l4tab;
+
+        if ( !pg )
+        {
+            xfree(v);
+            return NULL;
+        }
+        l4tab = copy_page(page_to_virt(pg), idle_pg_table);
+        l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
+            l4e_from_page(pg, __PAGE_HYPERVISOR);
+        l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
+            l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
+        v->arch.guest_table = pagetable_from_page(pg);
+        v->arch.guest_table_user = v->arch.guest_table;
+    }
+#endif
+
     return v;
 }
 
 void free_vcpu_struct(struct vcpu *v)
 {
+    if ( IS_COMPAT(v->domain) )
+        free_domheap_page(pagetable_get_page(v->arch.guest_table));
     xfree(v);
 }
 
@@ -323,7 +346,8 @@ int arch_set_info_guest(
     if ( !(c->flags & VGCF_HVM_GUEST) )
     {
         cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c->ctrlreg[3]));
-        v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
+        if ( !IS_COMPAT(d) )
+            v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
     }
 
     if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
@@ -336,7 +360,7 @@ int arch_set_info_guest(
         if ( !hvm_initialize_guest_resources(v) )
             return -EINVAL;
     }
-    else
+    else if ( !IS_COMPAT(d) )
     {
         if ( shadow_mode_refcounts(d)
              ? !get_page(mfn_to_page(cr3_pfn), d)
@@ -346,7 +370,24 @@ int arch_set_info_guest(
             destroy_gdt(v);
             return -EINVAL;
         }
-    }    
+    }
+#ifdef CONFIG_COMPAT
+    else
+    {
+        l4_pgentry_t *l4tab;
+
+        if ( shadow_mode_refcounts(d)
+             ? !get_page(mfn_to_page(cr3_pfn), d)
+             : !get_page_and_type(mfn_to_page(cr3_pfn), d,
+                                PGT_l3_page_table) )
+        {
+            destroy_gdt(v);
+            return -EINVAL;
+        }
+        l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
+        *l4tab = l4e_from_pfn(cr3_pfn, 
_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY);
+    }
+#endif
 
     /* Shadow: make sure the domain has enough shadow memory to
      * boot another vcpu */
Index: 2006-10-04/xen/arch/x86/domain_build.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/domain_build.c 2006-10-04 15:11:03.000000000 
+0200
+++ 2006-10-04/xen/arch/x86/domain_build.c      2006-10-04 15:16:05.000000000 
+0200
@@ -85,9 +85,11 @@ string_param("dom0_ioports_disable", opt
 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
 #define L3_PROT (_PAGE_PRESENT)
 #elif defined(__x86_64__)
-/* Allow ring-3 access in long mode as guest cannot use ring 1. */
+/* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
 #define BASE_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
 #define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL)
+/* ... except for compatibility mode guests. */
+#define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
 #define L2_PROT (BASE_PROT|_PAGE_DIRTY)
 #define L3_PROT (BASE_PROT|_PAGE_DIRTY)
 #define L4_PROT (BASE_PROT|_PAGE_DIRTY)
@@ -393,7 +395,9 @@ int construct_dom0(struct domain *d,
     vinitrd_start    = round_pgup(dsi.v_end);
     vinitrd_end      = vinitrd_start + initrd_len;
     vphysmap_start   = round_pgup(vinitrd_end);
-    vphysmap_end     = vphysmap_start + (nr_pages * sizeof(unsigned long));
+    vphysmap_end     = vphysmap_start + (nr_pages * (!IS_COMPAT(d) ?
+                                                     sizeof(unsigned long) :
+                                                     sizeof(unsigned int)));
     vstartinfo_start = round_pgup(vphysmap_end);
     vstartinfo_end   = (vstartinfo_start +
                         sizeof(struct start_info) +
@@ -422,7 +426,9 @@ int construct_dom0(struct domain *d,
        ((_l) & ~((1UL<<(_s))-1))) >> (_s))
         if ( (1 + /* # L4 */
               NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
-              NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
+              (!IS_COMPAT(d) ?
+               NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */
+               4) + /* # compat L2 */
               NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
              <= nr_pt_pages )
             break;
@@ -624,8 +630,10 @@ int construct_dom0(struct domain *d,
 #elif defined(__x86_64__)
 
     /* Overlap with Xen protected area? */
-    if ( (dsi.v_start < HYPERVISOR_VIRT_END) &&
-         (v_end > HYPERVISOR_VIRT_START) )
+    if ( !IS_COMPAT(d) ?
+         ((dsi.v_start < HYPERVISOR_VIRT_END) &&
+          (v_end > HYPERVISOR_VIRT_START)) :
+         (v_end > HYPERVISOR_COMPAT_VIRT_START) )
     {
         printk("DOM0 image overlaps with Xen private area.\n");
         return -EINVAL;
@@ -638,8 +646,18 @@ int construct_dom0(struct domain *d,
     }
 
     /* WARNING: The new domain must have its 'processor' field filled in! */
-    maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
-    l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+    if ( !IS_COMPAT(d) )
+    {
+        maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
+        l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+    }
+    else
+    {
+        page = alloc_domheap_page(NULL);
+        if ( !page )
+            panic("Not enough RAM for domain 0 PML4.\n");
+        l4start = l4tab = page_to_virt(page);
+    }
     memcpy(l4tab, idle_pg_table, PAGE_SIZE);
     l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
         l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
@@ -684,7 +702,7 @@ int construct_dom0(struct domain *d,
             *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
             l2tab++;
         }
-        *l1tab = l1e_from_pfn(mfn, L1_PROT);
+        *l1tab = l1e_from_pfn(mfn, !IS_COMPAT(d) ? L1_PROT : COMPAT_L1_PROT);
         l1tab++;
 
         page = mfn_to_page(mfn);
@@ -695,6 +713,29 @@ int construct_dom0(struct domain *d,
         mfn++;
     }
 
+    if ( IS_COMPAT(d) )
+    {
+        /* Ensure the first four L3 entries are all populated. */
+        for ( i = 0, l3tab = l3start; i < 4; ++i, ++l3tab )
+        {
+            if ( !l3e_get_intpte(*l3tab) )
+            {
+                maddr_to_page(mpt_alloc)->u.inuse.type_info = 
PGT_l2_page_table;
+                l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+                clear_page(l2tab);
+                *l3tab = l3e_from_paddr(__pa(l2tab), L3_PROT);
+            }
+            if ( i == 3 )
+                l3e_get_page(*l3tab)->u.inuse.type_info |= PGT_pae_xen_l2;
+        }
+        /* Install read-only guest visible MPT mapping. */
+        l3tab = l3start + l3_table_offset(COMPAT_RO_MPT_VIRT_START);
+        l2tab = l3e_to_l2e(*l3tab);
+        memcpy(&l2tab[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT],
+               
&compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
+               COMPAT_L2_PAGETABLE_XEN_SLOTS * sizeof(*l2tab));
+    }
+
     /* Pages that are part of page tables must be read only. */
     l4tab = l4start + l4_table_offset(vpt_start);
     l3start = l3tab = l4e_to_l3e(*l4tab);
@@ -713,7 +754,8 @@ int construct_dom0(struct domain *d,
         page->u.inuse.type_info |= PGT_validated | 1;
 
         /* Top-level p.t. is pinned. */
-        if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_l4_page_table )
+        if ( (page->u.inuse.type_info & PGT_type_mask) ==
+             (!IS_COMPAT(d) ? PGT_l4_page_table : PGT_l3_page_table) )
         {
             page->count_info        += 1;
             page->u.inuse.type_info += 1 | PGT_pinned;
@@ -800,7 +842,7 @@ int construct_dom0(struct domain *d,
     si->shared_info = virt_to_maddr(d->shared_info);
 
     si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
-    si->pt_base      = vpt_start;
+    si->pt_base      = vpt_start + 2 * PAGE_SIZE * !!IS_COMPAT(d);
     si->nr_pt_frames = nr_pt_pages;
     si->mfn_list     = vphysmap_start;
     sprintf(si->magic, "xen-%i.%i-x86_%d%s",
@@ -816,7 +858,10 @@ int construct_dom0(struct domain *d,
         if ( pfn > REVERSE_START )
             mfn = alloc_epfn - (pfn - REVERSE_START);
 #endif
-        ((unsigned long *)vphysmap_start)[pfn] = mfn;
+        if ( !IS_COMPAT(d) )
+            ((unsigned long *)vphysmap_start)[pfn] = mfn;
+        else
+            ((unsigned int *)vphysmap_start)[pfn] = mfn;
         set_gpfn_from_mfn(mfn, pfn);
     }
     while ( pfn < nr_pages )
@@ -829,7 +874,10 @@ int construct_dom0(struct domain *d,
 #ifndef NDEBUG
 #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
 #endif
-            ((unsigned long *)vphysmap_start)[pfn] = mfn;
+            if ( !IS_COMPAT(d) )
+                ((unsigned long *)vphysmap_start)[pfn] = mfn;
+            else
+                ((unsigned int *)vphysmap_start)[pfn] = mfn;
             set_gpfn_from_mfn(mfn, pfn);
 #undef pfn
             page++; pfn++;
Index: 2006-10-04/xen/arch/x86/mm.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/mm.c   2006-10-04 15:03:07.000000000 +0200
+++ 2006-10-04/xen/arch/x86/mm.c        2006-10-04 15:16:05.000000000 +0200
@@ -132,13 +132,6 @@
  */
 #define MMU_UPDATE_PREEMPTED          (~(~0U>>1))
 
-static void free_l2_table(struct page_info *page);
-static void free_l1_table(struct page_info *page);
-
-static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t, unsigned long,
-                        unsigned long type);
-static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t, unsigned long gl1mfn);
-
 /* Used to defer flushing of memory structures. */
 struct percpu_mm_info {
 #define DOP_FLUSH_TLB      (1<<0) /* Flush the local TLB.                    */
@@ -164,6 +157,10 @@ struct page_info *frame_table;
 unsigned long max_page;
 unsigned long total_pages;
 
+#ifdef CONFIG_COMPAT
+l2_pgentry_t *compat_idle_pg_table_l2 = NULL;
+#endif
+
 void __init init_frametable(void)
 {
     unsigned long nr_pages, page_step, i, mfn;
@@ -852,13 +849,18 @@ static int alloc_l1_table(struct page_in
     return 0;
 }
 
-#ifdef CONFIG_X86_PAE
-static int create_pae_xen_mappings(l3_pgentry_t *pl3e)
+#if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT)
+static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
 {
     struct page_info *page;
     l2_pgentry_t    *pl2e;
     l3_pgentry_t     l3e3;
+#ifndef CONFIG_COMPAT
     int              i;
+#else
+    if ( !IS_COMPAT(d) )
+        return 1;
+#endif
 
     pl3e = (l3_pgentry_t *)((unsigned long)pl3e & PAGE_MASK);
 
@@ -893,6 +895,7 @@ static int create_pae_xen_mappings(l3_pg
 
     /* Xen private mappings. */
     pl2e = map_domain_page(l3e_get_pfn(l3e3));
+#ifndef CONFIG_COMPAT
     memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
            &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
            L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
@@ -906,11 +909,18 @@ static int create_pae_xen_mappings(l3_pg
             (l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ?
             l2e_from_pfn(l3e_get_pfn(pl3e[i]), __PAGE_HYPERVISOR) :
         l2e_empty();
+#else
+    memcpy(&pl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT],
+           
&compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
+           COMPAT_L2_PAGETABLE_XEN_SLOTS * sizeof(*pl2e));
+#endif
     unmap_domain_page(pl2e);
 
     return 1;
 }
+#endif
 
+#ifdef CONFIG_X86_PAE
 /* Flush a pgdir update into low-memory caches. */
 static void pae_flush_pgd(
     unsigned long mfn, unsigned int idx, l3_pgentry_t nl3e)
@@ -947,10 +957,12 @@ static void pae_flush_pgd(
 }
 
 #elif CONFIG_X86_64
-# define create_pae_xen_mappings(pl3e) (1)
+# ifndef CONFIG_COMPAT
+#  define create_pae_xen_mappings(d, pl3e) (1)
+# endif
 # define pae_flush_pgd(mfn, idx, nl3e) ((void)0)
 #else
-# define create_pae_xen_mappings(pl3e) (1)
+# define create_pae_xen_mappings(d, pl3e) (1)
 #endif
 
 static int alloc_l2_table(struct page_info *page, unsigned long type)
@@ -966,7 +978,7 @@ static int alloc_l2_table(struct page_in
 
     for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
     {
-        if ( is_guest_l2_slot(type, i) &&
+        if ( is_guest_l2_slot(d, type, i) &&
              unlikely(!get_page_from_l2e(pl2e[i], pfn, d)) )
             goto fail;
         
@@ -993,7 +1005,7 @@ static int alloc_l2_table(struct page_in
  fail:
     MEM_LOG("Failure in alloc_l2_table: entry %d", i);
     while ( i-- > 0 )
-        if ( is_guest_l2_slot(type, i) )
+        if ( is_guest_l2_slot(d, type, i) )
             put_page_from_l2e(pl2e[i], pfn);
 
     unmap_domain_page(pl2e);
@@ -1029,8 +1041,8 @@ static int alloc_l3_table(struct page_in
     pl3e = map_domain_page(pfn);
     for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
     {
-#ifdef CONFIG_X86_PAE
-        if ( i == 3 )
+#if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT)
+        if ( (CONFIG_PAGING_LEVELS < 4 || IS_COMPAT(d)) && i == 3 )
         {
             if ( !(l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ||
                  (l3e_get_flags(pl3e[i]) & L3_DISALLOW_MASK) ||
@@ -1049,7 +1061,7 @@ static int alloc_l3_table(struct page_in
         adjust_guest_l3e(pl3e[i]);
     }
 
-    if ( !create_pae_xen_mappings(pl3e) )
+    if ( !create_pae_xen_mappings(d, pl3e) )
         goto fail;
 
     unmap_domain_page(pl3e);
@@ -1132,6 +1144,9 @@ static void free_l1_table(struct page_in
 
 static void free_l2_table(struct page_info *page)
 {
+#ifdef CONFIG_COMPAT
+    struct domain *d = page_get_owner(page);
+#endif
     unsigned long pfn = page_to_mfn(page);
     l2_pgentry_t *pl2e;
     int i;
@@ -1139,7 +1154,7 @@ static void free_l2_table(struct page_in
     pl2e = map_domain_page(pfn);
 
     for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
-        if ( is_guest_l2_slot(page->u.inuse.type_info, i) )
+        if ( is_guest_l2_slot(d, page->u.inuse.type_info, i) )
             put_page_from_l2e(pl2e[i], pfn);
 
     unmap_domain_page(pl2e);
@@ -1312,11 +1327,12 @@ static int mod_l1_entry(l1_pgentry_t *pl
 static int mod_l2_entry(l2_pgentry_t *pl2e, 
                         l2_pgentry_t nl2e, 
                         unsigned long pfn,
-                        unsigned long type)
+                        unsigned long type,
+                        struct domain *d)
 {
     l2_pgentry_t ol2e;
 
-    if ( unlikely(!is_guest_l2_slot(type,pgentry_ptr_to_slot(pl2e))) )
+    if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) )
     {
         MEM_LOG("Illegal L2 update attempt in Xen-private area %p", pl2e);
         return 0;
@@ -1363,7 +1379,8 @@ static int mod_l2_entry(l2_pgentry_t *pl
 /* Update the L3 entry at pl3e to new value nl3e. pl3e is within frame pfn. */
 static int mod_l3_entry(l3_pgentry_t *pl3e, 
                         l3_pgentry_t nl3e, 
-                        unsigned long pfn)
+                        unsigned long pfn,
+                        struct domain *d)
 {
     l3_pgentry_t ol3e;
     int okay;
@@ -1415,7 +1432,7 @@ static int mod_l3_entry(l3_pgentry_t *pl
         return 0;
     }
 
-    okay = create_pae_xen_mappings(pl3e);
+    okay = create_pae_xen_mappings(d, pl3e);
     BUG_ON(!okay);
 
     pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e);
@@ -2278,24 +2295,25 @@ int do_mmu_update(
                 {
                     l2_pgentry_t l2e = l2e_from_intpte(req.val);
                     okay = mod_l2_entry(
-                        (l2_pgentry_t *)va, l2e, mfn, type_info);
+                        (l2_pgentry_t *)va, l2e, mfn, type_info, FOREIGNDOM);
                 }
                 break;
 #if CONFIG_PAGING_LEVELS >= 3
                 case PGT_l3_page_table:
                 {
                     l3_pgentry_t l3e = l3e_from_intpte(req.val);
-                    okay = mod_l3_entry(va, l3e, mfn);
+                    okay = mod_l3_entry(va, l3e, mfn, FOREIGNDOM);
                 }
                 break;
 #endif
 #if CONFIG_PAGING_LEVELS >= 4
                 case PGT_l4_page_table:
-                {
-                    l4_pgentry_t l4e = l4e_from_intpte(req.val);
-                    okay = mod_l4_entry(va, l4e, mfn);
-                }
-                break;
+                    if ( !IS_COMPAT(FOREIGNDOM) )
+                    {
+                        l4_pgentry_t l4e = l4e_from_intpte(req.val);
+                        okay = mod_l4_entry(va, l4e, mfn);
+                    }
+                    break;
 #endif
                 }
 
Index: 2006-10-04/xen/arch/x86/x86_64/mm.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/x86_64/mm.c    2006-10-04 15:03:07.000000000 
+0200
+++ 2006-10-04/xen/arch/x86/x86_64/mm.c 2006-10-04 15:16:05.000000000 +0200
@@ -118,6 +118,37 @@ void __init paging_init(void)
             pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
     }
 
+#ifdef CONFIG_COMPAT
+    /* Create user-accessible L2 directory to map the MPT for compatibility 
guests. */
+    BUILD_BUG_ON(l4_table_offset(RDWR_MPT_VIRT_START) !=
+                 l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));
+    l3_ro_mpt = 
l4e_to_l3e(idle_pg_table[l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)]);
+    pg = alloc_domheap_page(NULL);
+    compat_idle_pg_table_l2 = l2_ro_mpt = clear_page(page_to_virt(pg));
+    l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)] =
+        l3e_from_page(pg, __PAGE_HYPERVISOR);
+    l2_ro_mpt += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START);
+    /*
+     * Allocate and map the compatibility mode machine-to-phys table.
+     */
+    mpt_size = (mpt_size >> 1) + (1UL << (L2_PAGETABLE_SHIFT - 1));
+    if ( mpt_size > RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START )
+        mpt_size = RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START;
+    for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
+    {
+        if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
+            panic("Not enough memory for compat m2p table\n");
+        map_pages_to_xen(
+            RDWR_COMPAT_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), 
page_to_mfn(pg),
+            1UL << PAGETABLE_ORDER,
+            PAGE_HYPERVISOR);
+        memset((void *)(RDWR_COMPAT_MPT_VIRT_START + (i << 
L2_PAGETABLE_SHIFT)), 0x55,
+               1UL << L2_PAGETABLE_SHIFT);
+        /* NB. Cannot be GLOBAL as the pt entries get copied into per-VM 
space. */
+        *l2_ro_mpt++ = l2e_from_page(pg, _PAGE_PSE|_PAGE_PRESENT);
+    }
+#endif
+
     /* Set up linear page table mapping. */
     idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
         l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR);
@@ -175,6 +206,27 @@ void subarch_init_memory(void)
             share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
+#ifdef CONFIG_COMPAT
+    for ( v  = RDWR_COMPAT_MPT_VIRT_START;
+          v != RDWR_COMPAT_MPT_VIRT_END;
+          v += 1 << L2_PAGETABLE_SHIFT )
+    {
+        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
+            l3_table_offset(v)];
+        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+            continue;
+        l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
+        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
+            continue;
+        m2p_start_mfn = l2e_get_pfn(l2e);
+
+        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+        {
+            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
+            share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
+        }
+    }
+#endif
 }
 
 long subarch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
@@ -182,7 +234,8 @@ long subarch_memory_op(int op, XEN_GUEST
     struct xen_machphys_mfn_list xmml;
     l3_pgentry_t l3e;
     l2_pgentry_t l2e;
-    unsigned long mfn, v;
+    unsigned long v;
+    xen_pfn_t mfn;
     unsigned int i;
     long rc = 0;
 
Index: 2006-10-04/xen/include/asm-x86/config.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/config.h        2006-10-04 
15:03:07.000000000 +0200
+++ 2006-10-04/xen/include/asm-x86/config.h     2006-10-04 15:16:05.000000000 
+0200
@@ -135,7 +135,11 @@ static inline void FORCE_CRASH(void) 
  *    Page-frame information array.
  *  0xffff828800000000 - 0xffff828bffffffff [16GB,  2^34 bytes, PML4:261]
  *    ioremap()/fixmap area.
- *  0xffff828c00000000 - 0xffff82ffffffffff [464GB,             PML4:261]
+ *  0xffff828c00000000 - 0xffff828c3fffffff [1GB,   2^30 bytes, PML4:261]
+ *    Compatibility machine-to-phys translation table.
+ *  0xffff828c40000000 - 0xffff828c7fffffff [1GB,   2^30 bytes, PML4:261]
+ *    High read-only compatibility machine-to-phys translation table.
+ *  0xffff828c80000000 - 0xffff82ffffffffff [462GB,             PML4:261]
  *    Reserved for future use.
  *  0xffff830000000000 - 0xffff83ffffffffff [1TB,   2^40 bytes, PML4:262-263]
  *    1:1 direct mapping of all physical memory. Xen and its heap live here.
@@ -184,6 +188,12 @@ static inline void FORCE_CRASH(void) 
 /* Slot 261: ioremap()/fixmap area (16GB). */
 #define IOREMAP_VIRT_START      (FRAMETABLE_VIRT_END)
 #define IOREMAP_VIRT_END        (IOREMAP_VIRT_START + (16UL<<30))
+/* Slot 261: compatibility machine-to-phys conversion table (1GB). */
+#define RDWR_COMPAT_MPT_VIRT_START IOREMAP_VIRT_END
+#define RDWR_COMPAT_MPT_VIRT_END   (RDWR_COMPAT_MPT_VIRT_START + (1UL << 30))
+/* Slot 261: high read-only compatibility machine-to-phys conversion table 
(1GB). */
+#define HIRO_COMPAT_MPT_VIRT_START RDWR_COMPAT_MPT_VIRT_END
+#define HIRO_COMPAT_MPT_VIRT_END   (HIRO_COMPAT_MPT_VIRT_START + (1UL << 30))
 /* Slot 262-263: A direct 1:1 mapping of all of physical memory. */
 #define DIRECTMAP_VIRT_START    (PML4_ADDR(262))
 #define DIRECTMAP_VIRT_END      (DIRECTMAP_VIRT_START + PML4_ENTRY_BYTES*2)
@@ -196,6 +206,16 @@ static inline void FORCE_CRASH(void) 
 #define MACH2PHYS_COMPAT_VIRT_END      
mk_unsigned_long(__MACH2PHYS_COMPAT_VIRT_END)
 #define MACH2PHYS_COMPAT_NR_ENTRIES    
((MACH2PHYS_COMPAT_VIRT_END-MACH2PHYS_COMPAT_VIRT_START)>>2)
 
+#define COMPAT_RO_MPT_VIRT_START       MACH2PHYS_COMPAT_VIRT_START
+#define COMPAT_RO_MPT_VIRT_END         MACH2PHYS_COMPAT_VIRT_END
+
+#define COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT \
+    l2_table_offset(HYPERVISOR_COMPAT_VIRT_START)
+#define COMPAT_L2_PAGETABLE_LAST_XEN_SLOT  \
+    l2_table_offset(~0U)
+#define COMPAT_L2_PAGETABLE_XEN_SLOTS \
+    (COMPAT_L2_PAGETABLE_LAST_XEN_SLOT - COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT + 
1)
+
 #define PGT_base_page_table     PGT_l4_page_table
 
 #define __HYPERVISOR_CS64 0xe008
Index: 2006-10-04/xen/include/asm-x86/mm.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/mm.h    2006-10-04 15:03:07.000000000 
+0200
+++ 2006-10-04/xen/include/asm-x86/mm.h 2006-10-04 15:16:05.000000000 +0200
@@ -293,7 +293,15 @@ int check_descriptor(const struct domain
 #define INVALID_M2P_ENTRY        (~0UL)
 #define VALID_M2P(_e)            (!((_e) & (1UL<<(BITS_PER_LONG-1))))
 
+#ifdef CONFIG_COMPAT
+#define compat_machine_to_phys_mapping ((unsigned int 
*)RDWR_COMPAT_MPT_VIRT_START)
+#define set_gpfn_from_mfn(mfn, pfn) \
+    ((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 
4 || \
+            (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(pfn))), \
+     machine_to_phys_mapping[(mfn)] = (pfn))
+#else
 #define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
+#endif
 #define get_gpfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
 
 
Index: 2006-10-04/xen/include/asm-x86/page.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/page.h  2006-09-11 09:06:11.000000000 
+0200
+++ 2006-10-04/xen/include/asm-x86/page.h       2006-10-04 15:16:05.000000000 
+0200
@@ -173,6 +173,7 @@ typedef struct { u32 pfn; } pagetable_t;
 typedef struct { u64 pfn; } pagetable_t;
 #endif
 #define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
+#define pagetable_get_page(x)   mfn_to_page((x).pfn)
 #define pagetable_get_pfn(x)    ((x).pfn)
 #define pagetable_is_null(x)    ((x).pfn == 0)
 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
@@ -254,6 +255,9 @@ extern l2_pgentry_t   idle_pg_table_l2[R
 #else
 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
 extern l2_pgentry_t   idle_pg_table_l2[ROOT_PAGETABLE_ENTRIES];
+#ifdef CONFIG_COMPAT
+extern l2_pgentry_t  *compat_idle_pg_table_l2;
+#endif
 #endif
 void paging_init(void);
 void setup_idle_pagetable(void);
Index: 2006-10-04/xen/include/asm-x86/x86_32/page-2level.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/x86_32/page-2level.h    2006-09-11 
09:06:11.000000000 +0200
+++ 2006-10-04/xen/include/asm-x86/x86_32/page-2level.h 2006-10-04 
15:16:05.000000000 +0200
@@ -38,7 +38,7 @@ typedef l2_pgentry_t root_pgentry_t;
 
 /* misc */
 #define is_guest_l1_slot(_s)    (1)
-#define is_guest_l2_slot(_t,_s) ((_s) < L2_PAGETABLE_FIRST_XEN_SLOT)
+#define is_guest_l2_slot(_d, _t,_s) ((_s) < L2_PAGETABLE_FIRST_XEN_SLOT)
 
 /*
  * PTE pfn and flags:
Index: 2006-10-04/xen/include/asm-x86/x86_32/page-3level.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/x86_32/page-3level.h    2006-09-20 
13:19:25.000000000 +0200
+++ 2006-10-04/xen/include/asm-x86/x86_32/page-3level.h 2006-10-04 
15:16:05.000000000 +0200
@@ -48,7 +48,7 @@ typedef l3_pgentry_t root_pgentry_t;
 
 /* misc */
 #define is_guest_l1_slot(s)    (1)
-#define is_guest_l2_slot(t,s)                                              \
+#define is_guest_l2_slot(d,t,s)                                            \
     ( !((t) & PGT_pae_xen_l2) ||                                           \
       ((s) < (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES - 1))) )
 #define is_guest_l3_slot(s)    (1)
Index: 2006-10-04/xen/include/asm-x86/x86_64/page.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/x86_64/page.h   2006-09-20 
13:19:25.000000000 +0200
+++ 2006-10-04/xen/include/asm-x86/x86_64/page.h        2006-10-04 
15:16:05.000000000 +0200
@@ -48,7 +48,10 @@ typedef l4_pgentry_t root_pgentry_t;
 #define l4_linear_offset(_a) (((_a) & VADDR_MASK) >> L4_PAGETABLE_SHIFT)
 
 #define is_guest_l1_slot(_s) (1)
-#define is_guest_l2_slot(_t, _s) (1)
+#define is_guest_l2_slot(_d, _t, _s)                   \
+    ( !IS_COMPAT(_d) ||                                \
+      !((_t) & PGT_pae_xen_l2) ||                      \
+      ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT) )
 #define is_guest_l3_slot(_s) (1)
 #define is_guest_l4_slot(_s)                   \
     (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.