[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 1/3] x86/p2m-pt: simplify p2m_next_level()
Calculate entry PFN and flags just once. Convert the two successive main if()-s to and if/elf-if chain. Restrict variable scope where reasonable. Take the opportunity and also make the induction variable unsigned. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Re-do mostly from scratch following review feedback. Note: I have trouble seeing how the old code worked, when the 2M page shattering path specified neither read nor write permission for the IOMMU. Am I overlooking a reason why this was (and should remain) so? --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -191,18 +191,18 @@ p2m_next_level(struct p2m_domain *p2m, v unsigned long *gfn_remainder, unsigned long gfn, u32 shift, u32 max, unsigned long type, bool_t unmap) { - l1_pgentry_t *l1_entry; - l1_pgentry_t *p2m_entry; - l1_pgentry_t new_entry; + l1_pgentry_t *p2m_entry, new_entry; void *next; - int i; + unsigned int flags; if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn, shift, max)) ) return -ENOENT; + flags = l1e_get_flags(*p2m_entry); + /* PoD/paging: Not present doesn't imply empty. */ - if ( !l1e_get_flags(*p2m_entry) ) + if ( !flags ) { struct page_info *pg; @@ -231,70 +231,61 @@ p2m_next_level(struct p2m_domain *p2m, v break; } } - - ASSERT(l1e_get_flags(*p2m_entry) & (_PAGE_PRESENT|_PAGE_PSE)); - - /* split 1GB pages into 2MB pages */ - if ( type == PGT_l2_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) + else if ( flags & _PAGE_PSE ) { - unsigned long flags, pfn; + /* Split superpages pages into smaller ones. */ + unsigned long pfn = l1e_get_pfn(*p2m_entry); struct page_info *pg; + l1_pgentry_t *l1_entry; + unsigned int i, level; - pg = p2m_alloc_ptp(p2m, PGT_l2_page_table); - if ( pg == NULL ) - return -ENOMEM; - - flags = l1e_get_flags(*p2m_entry); - pfn = l1e_get_pfn(*p2m_entry); - - l1_entry = __map_domain_page(pg); - for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) + switch ( type ) { - new_entry = l1e_from_pfn(pfn | (i * L1_PAGETABLE_ENTRIES), flags); - p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2); - } - unmap_domain_page(l1_entry); - new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), - P2M_BASE_FLAGS | _PAGE_RW); /* disable PSE */ - p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3); - } + case PGT_l2_page_table: + level = 2; + break; + case PGT_l1_page_table: + /* + * New splintered mappings inherit the flags of the old superpage, + * with a little reorganisation for the _PAGE_PSE_PAT bit. + */ + if ( pfn & 1 ) /* ==> _PAGE_PSE_PAT was set */ + pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT */ + else + flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */ - /* split single 2MB large page into 4KB page in P2M table */ - if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) - { - unsigned long flags, pfn; - struct page_info *pg; + level = 1; + break; - pg = p2m_alloc_ptp(p2m, PGT_l1_page_table); + default: + ASSERT_UNREACHABLE(); + return -EINVAL; + } + + pg = p2m_alloc_ptp(p2m, type); if ( pg == NULL ) return -ENOMEM; - /* New splintered mappings inherit the flags of the old superpage, - * with a little reorganisation for the _PAGE_PSE_PAT bit. */ - flags = l1e_get_flags(*p2m_entry); - pfn = l1e_get_pfn(*p2m_entry); - if ( pfn & 1 ) /* ==> _PAGE_PSE_PAT was set */ - pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT */ - else - flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */ - l1_entry = __map_domain_page(pg); - for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) + + for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ ) { - new_entry = l1e_from_pfn(pfn | i, flags); - p2m_add_iommu_flags(&new_entry, 0, 0); - p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1); + new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)), + flags); + p2m_add_iommu_flags(&new_entry, level - 1, + IOMMUF_readable|IOMMUF_writable); + p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level); } + unmap_domain_page(l1_entry); - new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), P2M_BASE_FLAGS | _PAGE_RW); - p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2); + p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); } + else + ASSERT(flags & _PAGE_PRESENT); next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry))); if ( unmap ) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |