[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/P2M: simplify write_p2m_entry()
commit 3131c990b737f9892badd3184115829f05ca1edc Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Fri Apr 25 12:07:22 2014 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Fri Apr 25 12:07:22 2014 +0200 x86/P2M: simplify write_p2m_entry() The "table_mfn" parameter really isn't needed anywhere, so it gets dropped. The "struct vcpu *" one was always bogus (as was being made up by paging_write_p2m_entry()), and is not commonly used. It can be easily enough made up in the one place (sh_unshadow_for_p2m_change()) it is needed, and we can otherwise pass "struct domain *" instead, properly reflecting that P2M operations are per-domain. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Tim Deegan <tim@xxxxxxx> --- xen/arch/x86/mm/hap/hap.c | 5 +-- xen/arch/x86/mm/hap/nested_hap.c | 2 +- xen/arch/x86/mm/p2m-pt.c | 46 +++++++++++++++++-------------------- xen/arch/x86/mm/paging.c | 9 ++----- xen/arch/x86/mm/shadow/common.c | 21 +++++++++-------- xen/arch/x86/mm/shadow/private.h | 6 ++-- xen/include/asm-x86/p2m.h | 5 +-- xen/include/asm-x86/paging.h | 11 ++++----- 8 files changed, 48 insertions(+), 57 deletions(-) diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 7694e51..a7593e7 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -683,10 +683,9 @@ static void hap_update_paging_modes(struct vcpu *v) } static void -hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p, - mfn_t table_mfn, l1_pgentry_t new, unsigned int level) +hap_write_p2m_entry(struct domain *d, unsigned long gfn, l1_pgentry_t *p, + l1_pgentry_t new, unsigned int level) { - struct domain *d = v->domain; uint32_t old_flags; bool_t flush_nestedp2m = 0; diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c index 9d8bfc8..137a87c 100644 --- a/xen/arch/x86/mm/hap/nested_hap.c +++ b/xen/arch/x86/mm/hap/nested_hap.c @@ -79,7 +79,7 @@ void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, - l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level) + l1_pgentry_t *p, l1_pgentry_t new, unsigned int level) { struct domain *d = p2m->domain; uint32_t old_flags; diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c index b53db70..83d2fde 100644 --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -157,7 +157,7 @@ static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry, /* Returns: 0 for success, -errno for failure */ static int -p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table, +p2m_next_level(struct p2m_domain *p2m, void **table, unsigned long *gfn_remainder, unsigned long gfn, u32 shift, u32 max, unsigned long type) { @@ -186,15 +186,15 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table, switch ( type ) { case PGT_l3_page_table: p2m_add_iommu_flags(&new_entry, 3, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 4); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 4); break; case PGT_l2_page_table: p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 3); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3); break; case PGT_l1_page_table: p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 2); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2); break; default: BUG(); @@ -222,14 +222,13 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table, { new_entry = l1e_from_pfn(pfn + (i * L1_PAGETABLE_ENTRIES), flags); p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, - l1_entry+i, *table_mfn, new_entry, 2); + p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2); } unmap_domain_page(l1_entry); new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), __PAGE_HYPERVISOR|_PAGE_USER); //disable PSE p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 3); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3); } @@ -257,20 +256,17 @@ p2m_next_level(struct p2m_domain *p2m, mfn_t *table_mfn, void **table, { new_entry = l1e_from_pfn(pfn + i, flags); p2m_add_iommu_flags(&new_entry, 0, 0); - p2m->write_p2m_entry(p2m, gfn, - l1_entry+i, *table_mfn, new_entry, 1); + p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1); } unmap_domain_page(l1_entry); new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), __PAGE_HYPERVISOR|_PAGE_USER); p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); - p2m->write_p2m_entry(p2m, gfn, - p2m_entry, *table_mfn, new_entry, 2); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2); } - *table_mfn = _mfn(l1e_get_pfn(*p2m_entry)); - next = map_domain_page(mfn_x(*table_mfn)); + next = map_domain_page(l1e_get_pfn(*p2m_entry)); unmap_domain_page(*table); *table = next; @@ -283,8 +279,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma) { /* XXX -- this might be able to be faster iff current->domain == d */ - mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); - void *table = map_domain_page(mfn_x(table_mfn)); + void *table; unsigned long i, gfn_remainder = gfn; l1_pgentry_t *p2m_entry; l1_pgentry_t entry_content; @@ -313,7 +308,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t); } - rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn, + table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); + rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn, L4_PAGETABLE_SHIFT - PAGE_SHIFT, L4_PAGETABLE_ENTRIES, PGT_l3_page_table); if ( rc ) @@ -350,7 +346,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, old_mfn = l1e_get_pfn(*p2m_entry); } - p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 3); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3); /* NB: paging_write_p2m_entry() handles tlb flushes properly */ /* Free old intermediate tables if necessary */ @@ -359,8 +355,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, } else { - rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, - gfn, L3_PAGETABLE_SHIFT - PAGE_SHIFT, + rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn, + L3_PAGETABLE_SHIFT - PAGE_SHIFT, L3_PAGETABLE_ENTRIES, PGT_l2_page_table); if ( rc ) goto out; @@ -368,7 +364,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, if ( page_order == PAGE_ORDER_4K ) { - rc = p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn, + rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn, L2_PAGETABLE_SHIFT - PAGE_SHIFT, L2_PAGETABLE_ENTRIES, PGT_l1_page_table); if ( rc ) @@ -391,7 +387,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, old_mfn = l1e_get_pfn(*p2m_entry); } /* level 1 entry */ - p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1); /* NB: paging_write_p2m_entry() handles tlb flushes properly */ } else if ( page_order == PAGE_ORDER_2M ) @@ -427,7 +423,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, old_mfn = l1e_get_pfn(*p2m_entry); } - p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 2); + p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2); /* NB: paging_write_p2m_entry() handles tlb flushes properly */ /* Free old intermediate tables if necessary */ @@ -661,7 +657,7 @@ static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m, l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); p2m->write_p2m_entry(p2m, gfn, (l1_pgentry_t *)&l3e[i3], - l3mfn, l1e_content, 3); + l1e_content, 3); continue; } @@ -688,7 +684,7 @@ static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m, l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); p2m->write_p2m_entry(p2m, gfn, (l1_pgentry_t *)&l2e[i2], - l2mfn, l1e_content, 2); + l1e_content, 2); continue; } @@ -707,7 +703,7 @@ static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m, flags = p2m_type_to_flags(nt, _mfn(mfn)); l1e_content = p2m_l1e_from_pfn(mfn, flags); p2m->write_p2m_entry(p2m, gfn, &l1e[i1], - l1mfn, l1e_content, 1); + l1e_content, 1); } unmap_domain_page(l1e); } diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c index ab5eacb..f956aa5 100644 --- a/xen/arch/x86/mm/paging.c +++ b/xen/arch/x86/mm/paging.c @@ -724,18 +724,15 @@ void paging_update_nestedmode(struct vcpu *v) } void paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, - l1_pgentry_t *p, mfn_t table_mfn, - l1_pgentry_t new, unsigned int level) + l1_pgentry_t *p, l1_pgentry_t new, + unsigned int level) { struct domain *d = p2m->domain; struct vcpu *v = current; if ( v->domain != d ) v = d->vcpu ? d->vcpu[0] : NULL; if ( likely(v && paging_mode_enabled(d) && paging_get_hostmode(v) != NULL) ) - { - return paging_get_hostmode(v)->write_p2m_entry(v, gfn, p, table_mfn, - new, level); - } + paging_get_hostmode(v)->write_p2m_entry(d, gfn, p, new, level); else safe_write_pte(p, new); } diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 9258d2a..3c803b6 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -3310,11 +3310,14 @@ static int shadow_test_disable(struct domain *d) * shadow processing jobs. */ -static void sh_unshadow_for_p2m_change(struct vcpu *v, unsigned long gfn, - l1_pgentry_t *p, mfn_t table_mfn, - l1_pgentry_t new, unsigned int level) +static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn, + l1_pgentry_t *p, l1_pgentry_t new, + unsigned int level) { - struct domain *d = v->domain; + struct vcpu *v = current; + + if ( v->domain != d ) + v = d->vcpu ? d->vcpu[0] : NULL; /* The following assertion is to make sure we don't step on 1GB host * page support of HVM guest. */ @@ -3379,18 +3382,16 @@ static void sh_unshadow_for_p2m_change(struct vcpu *v, unsigned long gfn, } void -shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, - l1_pgentry_t *p, mfn_t table_mfn, - l1_pgentry_t new, unsigned int level) +shadow_write_p2m_entry(struct domain *d, unsigned long gfn, + l1_pgentry_t *p, l1_pgentry_t new, + unsigned int level) { - struct domain *d = v->domain; - paging_lock(d); /* If there are any shadows, update them. But if shadow_teardown() * has already been called then it's not safe to try. */ if ( likely(d->arch.paging.shadow.total_pages != 0) ) - sh_unshadow_for_p2m_change(v, gfn, p, table_mfn, new, level); + sh_unshadow_for_p2m_change(d, gfn, p, new, level); /* Update the entry with new content */ safe_write_pte(p, new); diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h index 58844f1..b778fcf 100644 --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -359,9 +359,9 @@ extern int sh_remove_write_access(struct vcpu *v, mfn_t readonly_mfn, unsigned long fault_addr); /* Functions that atomically write PT/P2M entries and update state */ -void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, - l1_pgentry_t *p, mfn_t table_mfn, - l1_pgentry_t new, unsigned int level); +void shadow_write_p2m_entry(struct domain *d, unsigned long gfn, + l1_pgentry_t *p, l1_pgentry_t new, + unsigned int level); int shadow_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn); int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 6223070..86847e9 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -239,8 +239,7 @@ struct p2m_domain { void (*write_p2m_entry)(struct p2m_domain *p2m, unsigned long gfn, l1_pgentry_t *p, - mfn_t table_mfn, l1_pgentry_t new, - unsigned int level); + l1_pgentry_t new, unsigned int level); long (*audit_p2m)(struct p2m_domain *p2m); /* Default P2M access type for each page in the the domain: new pages, @@ -664,7 +663,7 @@ void p2m_flush(struct vcpu *v, struct p2m_domain *p2m); void p2m_flush_nestedp2m(struct domain *d); void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, - l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level); + l1_pgentry_t *p, l1_pgentry_t new, unsigned int level); #endif /* _XEN_P2M_H */ diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h index 8dd2a61..9b8f8de 100644 --- a/xen/include/asm-x86/paging.h +++ b/xen/include/asm-x86/paging.h @@ -111,9 +111,8 @@ struct paging_mode { unsigned int *page_order); void (*update_cr3 )(struct vcpu *v, int do_locking); void (*update_paging_modes )(struct vcpu *v); - void (*write_p2m_entry )(struct vcpu *v, unsigned long gfn, - l1_pgentry_t *p, mfn_t table_mfn, - l1_pgentry_t new, + void (*write_p2m_entry )(struct domain *d, unsigned long gfn, + l1_pgentry_t *p, l1_pgentry_t new, unsigned int level); int (*write_guest_entry )(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn); @@ -335,9 +334,9 @@ static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new) * we are writing. */ struct p2m_domain; -void paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, - l1_pgentry_t *p, mfn_t table_mfn, - l1_pgentry_t new, unsigned int level); +void paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, + l1_pgentry_t *p, l1_pgentry_t new, + unsigned int level); /* Called from the guest to indicate that the a process is being * torn down and its pagetables will soon be discarded */ -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |