[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] crash on boot with 4.6.1 on fedora 24
On 11/05/16 11:16, David Vrabel wrote: > > Why don't we get the RW bits correct when making the pteval when we > already have the pfn, instead trying to fix it up afterwards. Kevin, can you try this patch. David 8<----------------- x86/xen: avoid m2p lookup when setting early page table entries When page tables entries are set using xen_set_pte_init() during early boot there is no page fault handler that could handle a fault when performing an M2P lookup. In 64 guest (usually dom0) early_ioremap() would fault in xen_set_pte_init() because an M2P lookup faults because the MFN is in MMIO space and not mapped in the M2P. This lookup is done to see if the PFN in in the range used for the initial page table pages, so that the PTE may be set as read-only. The M2P lookup can be avoided by moving the check (and clear of RW) earlier when the PFN is still available. [ Not entirely happy with this as the 32/64 bit paths diverge even more. Is there some way to unify them instead? ] Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx> --- arch/x86/xen/mmu.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 478a2de..897fad4 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1562,7 +1562,7 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) return pte; } #else /* CONFIG_X86_64 */ -static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) +static pteval_t __init mask_rw_pte(pteval_t pte) { unsigned long pfn; @@ -1577,10 +1577,10 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) * page tables for mapping the p2m list, too, and page tables MUST be * mapped read-only. */ - pfn = pte_pfn(pte); + pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT; if (pfn >= xen_start_info->first_p2m_pfn && pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) - pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW); + pte &= ~_PAGE_RW; return pte; } @@ -1600,13 +1600,26 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) * so always write the PTE directly and rely on Xen trapping and * emulating any updates as necessary. */ +__visible __init pte_t xen_make_pte_init(pteval_t pte) +{ +#ifdef CONFIG_X86_64 + pte = mask_rw_pte(pte); +#endif + pte = pte_pfn_to_mfn(pte); + + if ((pte & PTE_PFN_MASK) >> PAGE_SHIFT == INVALID_P2M_ENTRY) + pte = 0; + + return native_make_pte(pte); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); + static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) { +#ifdef CONFIG_X86_32 if (pte_mfn(pte) != INVALID_P2M_ENTRY) pte = mask_rw_pte(ptep, pte); - else - pte = __pte_ma(0); - +#endif native_set_pte(ptep, pte); } @@ -2407,6 +2420,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.release_pud = xen_release_pud; #endif + pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte); #ifdef CONFIG_X86_64 pv_mmu_ops.write_cr3 = &xen_write_cr3; @@ -2455,7 +2469,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .pte_val = PV_CALLEE_SAVE(xen_pte_val), .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), - .make_pte = PV_CALLEE_SAVE(xen_make_pte), + .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), #ifdef CONFIG_X86_PAE -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |