[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/p2m: Add p2m_change_type_range() operation
# HG changeset patch # User Tim Deegan <Tim.Deegan@xxxxxxxxxx> # Date 1309426014 -3600 # Node ID 922e0beae95b436c1474b88847e13775a6053cf9 # Parent 80e00ac43548717deedca484d483ebc41b78d076 x86/p2m: Add p2m_change_type_range() operation that defers the nested-p2m flush until the entire batch has been updated. Use it in the HAP log-dirty operations for tracking VRAM changes. This should avoid a lot of unpleasant IPI storms as the log-dirty code on one CPU repeatedly shoots down the nested p2m of another CPU. Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> --- diff -r 80e00ac43548 -r 922e0beae95b xen/arch/x86/mm/hap/hap.c --- a/xen/arch/x86/mm/hap/hap.c Thu Jun 30 10:26:54 2011 +0100 +++ b/xen/arch/x86/mm/hap/hap.c Thu Jun 30 10:26:54 2011 +0100 @@ -58,7 +58,6 @@ static int hap_enable_vram_tracking(struct domain *d) { - int i; struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; if ( !dirty_vram ) @@ -70,8 +69,8 @@ paging_unlock(d); /* set l1e entries of P2M table to be read-only. */ - for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) - p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty); + p2m_change_type_range(d, dirty_vram->begin_pfn, dirty_vram->end_pfn, + p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(d->domain_dirty_cpumask); return 0; @@ -79,7 +78,6 @@ static int hap_disable_vram_tracking(struct domain *d) { - int i; struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; if ( !dirty_vram ) @@ -90,8 +88,8 @@ paging_unlock(d); /* set l1e entries of P2M table with normal mode */ - for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) - p2m_change_type(d, i, p2m_ram_logdirty, p2m_ram_rw); + p2m_change_type_range(d, dirty_vram->begin_pfn, dirty_vram->end_pfn, + p2m_ram_logdirty, p2m_ram_rw); flush_tlb_mask(d->domain_dirty_cpumask); return 0; @@ -99,15 +97,14 @@ static void hap_clean_vram_tracking(struct domain *d) { - int i; struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; if ( !dirty_vram ) return; /* set l1e entries of P2M table to be read-only. */ - for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) - p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty); + p2m_change_type_range(d, dirty_vram->begin_pfn, dirty_vram->end_pfn, + p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(d->domain_dirty_cpumask); } @@ -863,7 +860,8 @@ paging_lock(d); old_flags = l1e_get_flags(*p); - if ( nestedhvm_enabled(d) && (old_flags & _PAGE_PRESENT) ) { + if ( nestedhvm_enabled(d) && (old_flags & _PAGE_PRESENT) + && !p2m_get_hostp2m(d)->defer_nested_flush ) { /* We are replacing a valid entry so we need to flush nested p2ms, * unless the only change is an increase in access rights. */ mfn_t omfn = _mfn(l1e_get_pfn(*p)); diff -r 80e00ac43548 -r 922e0beae95b xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Thu Jun 30 10:26:54 2011 +0100 +++ b/xen/arch/x86/mm/p2m.c Thu Jun 30 10:26:54 2011 +0100 @@ -537,6 +537,37 @@ return pt; } +/* Modify the p2m type of a range of gfns from ot to nt. + * Resets the access permissions. */ +void p2m_change_type_range(struct domain *d, + unsigned long start, unsigned long end, + p2m_type_t ot, p2m_type_t nt) +{ + p2m_type_t pt; + unsigned long gfn; + mfn_t mfn; + struct p2m_domain *p2m = p2m_get_hostp2m(d); + + BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt)); + + p2m_lock(p2m); + p2m->defer_nested_flush = 1; + + for ( gfn = start; gfn < end; gfn++ ) + { + mfn = gfn_to_mfn_query(d, gfn, &pt); + if ( pt == ot ) + set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access); + } + + p2m->defer_nested_flush = 0; + if ( nestedhvm_enabled(d) ) + p2m_flush_nestedp2m(d); + p2m_unlock(p2m); +} + + + int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) { diff -r 80e00ac43548 -r 922e0beae95b xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h Thu Jun 30 10:26:54 2011 +0100 +++ b/xen/include/asm-x86/p2m.h Thu Jun 30 10:26:54 2011 +0100 @@ -209,6 +209,12 @@ #define CR3_EADDR (~0ULL) uint64_t cr3; + /* Host p2m: when this flag is set, don't flush all the nested-p2m + * tables on every host-p2m change. The setter of this flag + * is responsible for performing the full flush before releasing the + * host p2m's lock. */ + int defer_nested_flush; + /* Pages used to construct the p2m */ struct page_list_head pages; @@ -408,6 +414,11 @@ void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt); +/* Change types across a range of p2m entries (start ... end-1) */ +void p2m_change_type_range(struct domain *d, + unsigned long start, unsigned long end, + p2m_type_t ot, p2m_type_t nt); + /* Compare-exchange the type of a single p2m entry */ p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, p2m_type_t ot, p2m_type_t nt); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |