[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 3/3] x86/p2m-pt: pass level instead of page type to p2m_next_level()
This in turn calls for p2m_alloc_ptp() also being passed the numeric level. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: New. --- Question is whether passing the level to p2m_alloc_ptp() is really all that useful: p2m-ept.c's only use passes zero anyway, and p2m.c's uniform passing of 4 doesn't necessarily match reality afaict. --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -569,7 +569,7 @@ int p2m_set_entry(struct p2m_domain *p2m return rc; } -mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type) +mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level) { struct page_info *pg; @@ -581,7 +581,10 @@ mfn_t p2m_alloc_ptp(struct p2m_domain *p return INVALID_MFN; page_list_add_tail(pg, &p2m->pages); - pg->u.inuse.type_info = type | 1 | PGT_validated; + BUILD_BUG_ON(PGT_l1_page_table * 2 != PGT_l2_page_table); + BUILD_BUG_ON(PGT_l1_page_table * 3 != PGT_l3_page_table); + BUILD_BUG_ON(PGT_l1_page_table * 4 != PGT_l4_page_table); + pg->u.inuse.type_info = (PGT_l1_page_table * level) | 1 | PGT_validated; return page_to_mfn(pg); } @@ -632,7 +635,7 @@ int p2m_alloc_table(struct p2m_domain *p P2M_PRINTK("allocating p2m table\n"); - top_mfn = p2m_alloc_ptp(p2m, PGT_l4_page_table); + top_mfn = p2m_alloc_ptp(p2m, 4); if ( mfn_eq(top_mfn, INVALID_MFN) ) { p2m_unlock(p2m); --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -71,12 +71,6 @@ #define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent)) #define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED)) -static const unsigned long pgt[] = { - PGT_l1_page_table, - PGT_l2_page_table, - PGT_l3_page_table -}; - static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m, p2m_type_t t, mfn_t mfn, @@ -189,7 +183,7 @@ static void p2m_add_iommu_flags(l1_pgent static int p2m_next_level(struct p2m_domain *p2m, void **table, unsigned long *gfn_remainder, unsigned long gfn, u32 shift, - u32 max, unsigned long type, bool_t unmap) + u32 max, unsigned int level, bool_t unmap) { l1_pgentry_t *p2m_entry, new_entry; void *next; @@ -204,30 +198,15 @@ p2m_next_level(struct p2m_domain *p2m, v /* PoD/paging: Not present doesn't imply empty. */ if ( !flags ) { - mfn_t mfn = p2m_alloc_ptp(p2m, type); + mfn_t mfn = p2m_alloc_ptp(p2m, level); if ( mfn_eq(mfn, INVALID_MFN) ) return -ENOMEM; new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW); - switch ( type ) { - case PGT_l3_page_table: - p2m_add_iommu_flags(&new_entry, 3, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 4); - break; - case PGT_l2_page_table: - p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3); - break; - case PGT_l1_page_table: - p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2); - break; - default: - BUG(); - break; - } + p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); } else if ( flags & _PAGE_PSE ) { @@ -235,15 +214,14 @@ p2m_next_level(struct p2m_domain *p2m, v unsigned long pfn = l1e_get_pfn(*p2m_entry); mfn_t mfn; l1_pgentry_t *l1_entry; - unsigned int i, level; + unsigned int i; - switch ( type ) + switch ( level ) { - case PGT_l2_page_table: - level = 2; + case 2: break; - case PGT_l1_page_table: + case 1: /* * New splintered mappings inherit the flags of the old superpage, * with a little reorganisation for the _PAGE_PSE_PAT bit. @@ -252,8 +230,6 @@ p2m_next_level(struct p2m_domain *p2m, v pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT */ else flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */ - - level = 1; break; default: @@ -261,7 +237,7 @@ p2m_next_level(struct p2m_domain *p2m, v return -EINVAL; } - mfn = p2m_alloc_ptp(p2m, type); + mfn = p2m_alloc_ptp(p2m, level); if ( mfn_eq(mfn, INVALID_MFN) ) return -ENOMEM; @@ -325,7 +301,7 @@ static int p2m_pt_set_recalc_range(struc err = p2m_next_level(p2m, &table, &gfn_remainder, first_gfn, i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER, - pgt[i - 1], 1); + i, 1); if ( err ) goto out; } @@ -393,7 +369,7 @@ static int do_recalc(struct p2m_domain * err = p2m_next_level(p2m, &table, &gfn_remainder, gfn, level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER, - pgt[level - 1], 0); + level, 0); if ( err ) goto out; @@ -557,7 +533,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn, L4_PAGETABLE_SHIFT - PAGE_SHIFT, - L4_PAGETABLE_ENTRIES, PGT_l3_page_table, 1); + L4_PAGETABLE_ENTRIES, 3, 1); if ( rc ) goto out; @@ -605,7 +581,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, { rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn, L3_PAGETABLE_SHIFT - PAGE_SHIFT, - L3_PAGETABLE_ENTRIES, PGT_l2_page_table, 1); + L3_PAGETABLE_ENTRIES, 2, 1); if ( rc ) goto out; } @@ -616,7 +592,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn, L2_PAGETABLE_SHIFT - PAGE_SHIFT, - L2_PAGETABLE_ENTRIES, PGT_l1_page_table, 1); + L2_PAGETABLE_ENTRIES, 1, 1); if ( rc ) goto out; --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -684,7 +684,7 @@ void p2m_mem_paging_resume(struct domain * Internal functions, only called by other p2m code */ -mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type); +mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level); void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg); /* Directly set a p2m entry: only for use by p2m code. Does not need _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |