[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 07/16] x86/P2M: p2m_{alloc,free}_ptp() and p2m_alloc_table() are HVM-only
This also includes the two p2m related fields. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -94,7 +94,9 @@ static int p2m_initialise(struct domain int ret = 0; mm_rwlock_init(&p2m->lock); +#ifdef CONFIG_HVM INIT_PAGE_LIST_HEAD(&p2m->pages); +#endif p2m->domain = d; p2m->default_access = p2m_access_rwx; @@ -628,6 +630,7 @@ struct page_info *p2m_get_page_from_gfn( } #ifdef CONFIG_HVM + /* Returns: 0 for success, -errno for failure */ int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma) @@ -667,7 +670,6 @@ int p2m_set_entry(struct p2m_domain *p2m return rc; } -#endif mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level) { @@ -746,6 +748,8 @@ int p2m_alloc_table(struct p2m_domain *p return 0; } +#endif /* CONFIG_HVM */ + /* * hvm fixme: when adding support for pvh non-hardware domains, this path must * cleanup any foreign p2m types (release refcnts on them). @@ -754,7 +758,9 @@ void p2m_teardown(struct p2m_domain *p2m /* Return all the p2m pages to Xen. * We know we don't have any extra mappings to these pages */ { +#ifdef CONFIG_HVM struct page_info *pg; +#endif struct domain *d; if (p2m == NULL) @@ -763,11 +769,16 @@ void p2m_teardown(struct p2m_domain *p2m d = p2m->domain; p2m_lock(p2m); + ASSERT(atomic_read(&d->shr_pages) == 0); + +#ifdef CONFIG_HVM p2m->phys_table = pagetable_null(); while ( (pg = page_list_remove_head(&p2m->pages)) ) d->arch.paging.free_page(d, pg); +#endif + p2m_unlock(p2m); } --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -2700,8 +2700,10 @@ int shadow_enable(struct domain *d, u32 out_locked: paging_unlock(d); out_unlocked: +#ifdef CONFIG_HVM if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) ) p2m_teardown(p2m); +#endif if ( rv != 0 && pg != NULL ) { pg->count_info &= ~PGC_count_mask; --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -339,12 +339,14 @@ static uint64_t domain_pgd_maddr(struct ASSERT(spin_is_locked(&hd->arch.mapping_lock)); +#ifdef CONFIG_HVM if ( iommu_use_hap_pt(d) ) { pagetable_t pgt = p2m_get_pagetable(p2m_get_hostp2m(d)); return pagetable_get_paddr(pgt); } +#endif if ( !hd->arch.vtd.pgd_maddr ) { --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -202,9 +202,6 @@ struct p2m_domain { /* Lock that protects updates to the p2m */ mm_rwlock_t lock; - /* Shadow translated domain: p2m mapping */ - pagetable_t phys_table; - /* * Same as a domain's dirty_cpumask but limited to * this p2m and those physical cpus whose vcpu's are in @@ -223,9 +220,6 @@ struct p2m_domain { */ p2m_access_t default_access; - /* Pages used to construct the p2m */ - struct page_list_head pages; - /* Host p2m: Log-dirty ranges registered for the domain. */ struct rangeset *logdirty_ranges; @@ -233,6 +227,12 @@ struct p2m_domain { bool global_logdirty; #ifdef CONFIG_HVM + /* Translated domain: p2m mapping */ + pagetable_t phys_table; + + /* Pages used to construct the p2m */ + struct page_list_head pages; + /* Alternate p2m: count of vcpu's currently using this p2m. */ atomic_t active_vcpus;
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |