[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging-4.11] x86/EPT: ept_set_middle_entry() related adjustments
commit 18be3aabd83fa9f683e8ddb0bca2066dd302c20e Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Tue Jul 7 15:20:10 2020 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Jul 7 15:20:10 2020 +0200 x86/EPT: ept_set_middle_entry() related adjustments ept_split_super_page() wants to further modify the newly allocated table, so have ept_set_middle_entry() return the mapped pointer rather than tearing it down and then getting re-established right again. Similarly ept_next_level() wants to hand back a mapped pointer of the next level page, so re-use the one established by ept_set_middle_entry() in case that path was taken. Pull the setting of suppress_ve ahead of insertion into the higher level table, and don't have ept_split_super_page() set the field a 2nd time. This is part of XSA-328. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> master commit: 1104288186ee73a7f9bfa41cbaa5bb7611521028 master date: 2020-07-07 14:36:52 +0200 --- xen/arch/x86/mm/p2m-ept.c | 41 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index ca18e43580..2ec66c21f8 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -228,8 +228,9 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m, ept_entry_t *entry, #define GUEST_TABLE_SUPER_PAGE 2 #define GUEST_TABLE_POD_PAGE 3 -/* Fill in middle levels of ept table */ -static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry) +/* Fill in middle level of ept table; return pointer to mapped new table. */ +static ept_entry_t *ept_set_middle_entry(struct p2m_domain *p2m, + ept_entry_t *ept_entry) { mfn_t mfn; ept_entry_t *table; @@ -237,7 +238,12 @@ static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry) mfn = p2m_alloc_ptp(p2m, 0); if ( mfn_eq(mfn, INVALID_MFN) ) - return 0; + return NULL; + + table = map_domain_page(mfn); + + for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ ) + table[i].suppress_ve = 1; ept_entry->epte = 0; ept_entry->mfn = mfn_x(mfn); @@ -249,14 +255,7 @@ static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry) ept_entry->suppress_ve = 1; - table = map_domain_page(mfn); - - for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ ) - table[i].suppress_ve = 1; - - unmap_domain_page(table); - - return 1; + return table; } /* free ept sub tree behind an entry */ @@ -294,10 +293,10 @@ static bool_t ept_split_super_page(struct p2m_domain *p2m, ASSERT(is_epte_superpage(ept_entry)); - if ( !ept_set_middle_entry(p2m, &new_ept) ) + table = ept_set_middle_entry(p2m, &new_ept); + if ( !table ) return 0; - table = map_domain_page(_mfn(new_ept.mfn)); trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER); for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ ) @@ -308,7 +307,6 @@ static bool_t ept_split_super_page(struct p2m_domain *p2m, epte->sp = (level > 1); epte->mfn += i * trunk; epte->snp = (iommu_enabled && iommu_snoop); - epte->suppress_ve = 1; ept_p2m_type_to_flags(p2m, epte, epte->sa_p2mt, epte->access); @@ -347,8 +345,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only, ept_entry_t **table, unsigned long *gfn_remainder, int next_level) { - unsigned long mfn; - ept_entry_t *ept_entry, e; + ept_entry_t *ept_entry, *next = NULL, e; u32 shift, index; shift = next_level * EPT_TABLE_ORDER; @@ -373,19 +370,17 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only, if ( read_only ) return GUEST_TABLE_MAP_FAILED; - if ( !ept_set_middle_entry(p2m, ept_entry) ) + next = ept_set_middle_entry(p2m, ept_entry); + if ( !next ) return GUEST_TABLE_MAP_FAILED; - else - e = atomic_read_ept_entry(ept_entry); /* Refresh */ + /* e is now stale and hence may not be used anymore below. */ } - /* The only time sp would be set here is if we had hit a superpage */ - if ( is_epte_superpage(&e) ) + else if ( is_epte_superpage(&e) ) return GUEST_TABLE_SUPER_PAGE; - mfn = e.mfn; unmap_domain_page(*table); - *table = map_domain_page(_mfn(mfn)); + *table = next ?: map_domain_page(_mfn(e.mfn)); *gfn_remainder &= (1UL << shift) - 1; return GUEST_TABLE_NORMAL_PAGE; } -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.11
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |