[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/xen: enable early use of set_fixmap in 32-bit Xen PV guest



Commit 7b25b9cb0dad83 ("x86/xen/time: Initialize pv xen time in
init_hypervisor_platform()") moved the mapping of the shared info area
before pagetable_init(). This breaks booting as 32-bit PV guest as the
use of set_fixmap isn't possible at this time on 32-bit.

This can be worked around by populating the needed PMD on 32-bit
kernel earlier.

In order not to reimplement populate_extra_pte() using extend_brk()
for allocating new page tables extend alloc_low_pages() to do that in
case the early page table pool is not yet available.

Fixes: 7b25b9cb0dad83
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 arch/x86/mm/init.c          | 17 ++++++++++++-----
 arch/x86/xen/enlighten_pv.c |  2 ++
 arch/x86/xen/mmu_pv.c       |  2 ++
 3 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index acfab322fbe0..5c32a7665492 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -99,15 +99,22 @@ __ref void *alloc_low_pages(unsigned int num)
        }
 
        if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
-               unsigned long ret;
-               if (min_pfn_mapped >= max_pfn_mapped)
-                       panic("alloc_low_pages: ran out of memory");
-               ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
+               unsigned long ret = 0;
+
+               if (min_pfn_mapped < max_pfn_mapped) {
+                       ret = memblock_find_in_range(
+                                       min_pfn_mapped << PAGE_SHIFT,
                                        max_pfn_mapped << PAGE_SHIFT,
                                        PAGE_SIZE * num , PAGE_SIZE);
+               }
+               if (ret)
+                       memblock_reserve(ret, PAGE_SIZE * num);
+               else if (can_use_brk_pgt)
+                       ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
+
                if (!ret)
                        panic("alloc_low_pages: can not alloc memory");
-               memblock_reserve(ret, PAGE_SIZE * num);
+
                pfn = ret >> PAGE_SHIFT;
        } else {
                pfn = pgt_buf_end;
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index ee3b00c7acda..d7b2022ee301 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -122,6 +122,8 @@ static void __init xen_banner(void)
 
 static void __init xen_pv_init_platform(void)
 {
+       populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
+
        set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
        HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
 
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 52206ad81e4b..9e7012858420 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2171,6 +2171,8 @@ void __init xen_relocate_p2m(void)
 #else  /* !CONFIG_X86_64 */
 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
+RESERVE_BRK(fixup_kernel_pmd, PAGE_SIZE);
+RESERVE_BRK(fixup_kernel_pte, PAGE_SIZE);
 
 static void __init xen_write_cr3_init(unsigned long cr3)
 {
-- 
2.13.7


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.