[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 3/4] x86/mm: monitor table is HVM-only
Move the per-vCPU field to the HVM sub-structure. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -545,7 +545,7 @@ void write_ptbase(struct vcpu *v) * Should be called after CR3 is updated. * * Uses values found in vcpu->arch.(guest_table and guest_table_user), and - * for HVM guests, arch.monitor_table and hvm's guest CR3. + * for HVM guests, arch.hvm.monitor_table and hvm's guest CR3. * * Update ref counts to shadow tables appropriately. */ --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -393,7 +393,7 @@ static mfn_t hap_make_monitor_table(stru l4_pgentry_t *l4e; mfn_t m4mfn; - ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0); + ASSERT(pagetable_get_pfn(v->arch.hvm.monitor_table) == 0); if ( (pg = hap_alloc(d)) == NULL ) goto oom; @@ -579,10 +579,10 @@ void hap_teardown(struct domain *d, bool { if ( paging_get_hostmode(v) && paging_mode_external(d) ) { - mfn = pagetable_get_mfn(v->arch.monitor_table); + mfn = pagetable_get_mfn(v->arch.hvm.monitor_table); if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) ) hap_destroy_monitor_table(v, mfn); - v->arch.monitor_table = pagetable_null(); + v->arch.hvm.monitor_table = pagetable_null(); } } } @@ -758,10 +758,10 @@ static void hap_update_paging_modes(stru v->arch.paging.mode = hap_paging_get_mode(v); - if ( pagetable_is_null(v->arch.monitor_table) ) + if ( pagetable_is_null(v->arch.hvm.monitor_table) ) { mfn_t mmfn = hap_make_monitor_table(v); - v->arch.monitor_table = pagetable_from_mfn(mmfn); + v->arch.hvm.monitor_table = pagetable_from_mfn(mmfn); make_cr3(v, mmfn); hvm_update_host_cr3(v); } --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -2465,10 +2465,10 @@ static void sh_update_paging_modes(struc &SHADOW_INTERNAL_NAME(sh_paging_mode, 2); } - if ( pagetable_is_null(v->arch.monitor_table) ) + if ( pagetable_is_null(v->arch.hvm.monitor_table) ) { mfn_t mmfn = v->arch.paging.mode->shadow.make_monitor_table(v); - v->arch.monitor_table = pagetable_from_mfn(mmfn); + v->arch.hvm.monitor_table = pagetable_from_mfn(mmfn); make_cr3(v, mmfn); hvm_update_host_cr3(v); } @@ -2502,10 +2502,10 @@ static void sh_update_paging_modes(struc return; } - old_mfn = pagetable_get_mfn(v->arch.monitor_table); - v->arch.monitor_table = pagetable_null(); + old_mfn = pagetable_get_mfn(v->arch.hvm.monitor_table); + v->arch.hvm.monitor_table = pagetable_null(); new_mfn = v->arch.paging.mode->shadow.make_monitor_table(v); - v->arch.monitor_table = pagetable_from_mfn(new_mfn); + v->arch.hvm.monitor_table = pagetable_from_mfn(new_mfn); SHADOW_PRINTK("new monitor table %"PRI_mfn "\n", mfn_x(new_mfn)); @@ -2724,11 +2724,11 @@ void shadow_teardown(struct domain *d, b #ifdef CONFIG_HVM if ( shadow_mode_external(d) ) { - mfn_t mfn = pagetable_get_mfn(v->arch.monitor_table); + mfn_t mfn = pagetable_get_mfn(v->arch.hvm.monitor_table); if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) ) v->arch.paging.mode->shadow.destroy_monitor_table(v, mfn); - v->arch.monitor_table = pagetable_null(); + v->arch.hvm.monitor_table = pagetable_null(); } #endif /* CONFIG_HVM */ } --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -1521,7 +1521,7 @@ sh_make_monitor_table(struct vcpu *v) { struct domain *d = v->domain; - ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0); + ASSERT(pagetable_get_pfn(v->arch.hvm.monitor_table) == 0); /* Guarantee we can get the memory we need */ shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS); @@ -3699,7 +3699,7 @@ sh_update_linear_entries(struct vcpu *v) /* Don't try to update the monitor table if it doesn't exist */ if ( !shadow_mode_external(d) || - pagetable_get_pfn(v->arch.monitor_table) == 0 ) + pagetable_get_pfn(v->arch.hvm.monitor_table) == 0 ) return; #if SHADOW_PAGING_LEVELS == 4 @@ -3717,7 +3717,7 @@ sh_update_linear_entries(struct vcpu *v) { l4_pgentry_t *ml4e; - ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table)); + ml4e = map_domain_page(pagetable_get_mfn(v->arch.hvm.monitor_table)); ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] = l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]), __PAGE_HYPERVISOR_RW); @@ -3752,7 +3752,7 @@ sh_update_linear_entries(struct vcpu *v) l4_pgentry_t *ml4e; l3_pgentry_t *ml3e; int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START); - ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table)); + ml4e = map_domain_page(pagetable_get_mfn(v->arch.hvm.monitor_table)); ASSERT(l4e_get_flags(ml4e[linear_slot]) & _PAGE_PRESENT); l3mfn = l4e_get_mfn(ml4e[linear_slot]); @@ -4087,7 +4087,7 @@ sh_update_cr3(struct vcpu *v, int do_loc /// if ( shadow_mode_external(d) ) { - make_cr3(v, pagetable_get_mfn(v->arch.monitor_table)); + make_cr3(v, pagetable_get_mfn(v->arch.hvm.monitor_table)); } #if SHADOW_PAGING_LEVELS == 4 else // not shadow_mode_external... --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -583,7 +583,6 @@ struct arch_vcpu /* guest_table holds a ref to the page, and also a type-count unless * shadow refcounts are in use */ pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */ - pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */ unsigned long cr3; /* (MA) value to install in HW CR3 */ /* --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -176,6 +176,9 @@ struct hvm_vcpu { uint16_t p2midx; } fast_single_step; + /* (MFN) hypervisor page table */ + pagetable_t monitor_table; + struct hvm_vcpu_asid n1asid; u64 msr_tsc_adjust;
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |