[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [BAND-AID PATCH] x86: partially undo (disable) d639e6a05a



As from osstest results it looks like commit ("x86: allow 64-bit PV
guest kernels to suppress user mode exposure of M2P") is guilty in
causing migration failures, comment out the meat of it without fully
reverting, until it is understood what is causing the issue.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
I'm intending to push this unless the bisector manages to disprove the
suspicion about aforementioned commit by tonight.

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1396,25 +1396,31 @@ void init_guest_l4_table(l4_pgentry_t l4
         l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
+#if 0 /* FIXME */
     if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) )
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+#endif
 }
 
 void fill_ro_mpt(unsigned long mfn)
 {
+#if 0 /* FIXME */
     l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
         idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
     unmap_domain_page(l4tab);
+#endif
 }
 
 void zap_ro_mpt(unsigned long mfn)
 {
+#if 0 /* FIXME */
     l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
     unmap_domain_page(l4tab);
+#endif
 }
 
 static int alloc_l4_table(struct page_info *page)
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1435,8 +1435,10 @@ void sh_install_xen_entries_in_l4(struct
         shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
                             __PAGE_HYPERVISOR);
 
+#if 0 /* FIXME */
     if ( !VM_ASSIST(d, m2p_strict) )
         sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
+#endif
 
     /* Shadow linear mapping for 4-level shadows.  N.B. for 3-level
      * shadows on 64-bit xen, this linear mapping is later replaced by the
@@ -3978,6 +3980,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
         /* PAGING_LEVELS==4 implies 64-bit, which means that
          * map_domain_page_global can't fail */
         BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
+#if 0 /* FIXME */
         if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) )
         {
             shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable;
@@ -3991,6 +3994,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
                 sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
                     shadow_l4e_empty();
         }
+#endif
     }
     else
         v->arch.paging.shadow.guest_vtable = __linear_l4_table;



Attachment: x86-m2p-strict-partial-undo.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.