[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/paging: move and conditionalize flush_tlb() hook
commit 0e1d55d970a7b2def24c75616e5873ea45e708d4 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Thu Mar 16 14:48:23 2023 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Thu Mar 16 14:48:23 2023 +0100 x86/paging: move and conditionalize flush_tlb() hook The hook isn't mode dependent, hence it's misplaced in struct paging_mode. (Or alternatively I see no reason why the alloc_page() and free_page() hooks don't also live there.) Move it to struct paging_domain. The hook also is used for HVM guests only, so make respective pieces conditional upon CONFIG_HVM. While there also add __must_check to the hook declaration, as it's imperative that callers deal with getting back "false". While moving the shadow implementation, introduce a "curr" local variable. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/include/asm/domain.h | 5 ++++ xen/arch/x86/include/asm/paging.h | 13 ++++----- xen/arch/x86/mm/hap/hap.c | 6 ++-- xen/arch/x86/mm/shadow/common.c | 61 +-------------------------------------- xen/arch/x86/mm/shadow/hvm.c | 60 ++++++++++++++++++++++++++++++++++++++ xen/arch/x86/mm/shadow/multi.c | 1 - 6 files changed, 74 insertions(+), 72 deletions(-) diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h index ff9cdb56e8..7bc126587d 100644 --- a/xen/arch/x86/include/asm/domain.h +++ b/xen/arch/x86/include/asm/domain.h @@ -237,6 +237,11 @@ struct paging_domain { void (*free_page)(struct domain *d, struct page_info *pg); void (*update_paging_modes)(struct vcpu *v); + +#ifdef CONFIG_HVM + /* Flush selected vCPUs TLBs. NULL for all. */ + bool __must_check (*flush_tlb)(const unsigned long *vcpu_bitmap); +#endif }; struct paging_vcpu { diff --git a/xen/arch/x86/include/asm/paging.h b/xen/arch/x86/include/asm/paging.h index e5e469c304..f75e1b771a 100644 --- a/xen/arch/x86/include/asm/paging.h +++ b/xen/arch/x86/include/asm/paging.h @@ -139,7 +139,6 @@ struct paging_mode { #endif void (*update_cr3 )(struct vcpu *v, int do_locking, bool noflush); - bool (*flush_tlb )(const unsigned long *vcpu_bitmap); unsigned int guest_levels; @@ -299,6 +298,12 @@ static inline unsigned long paging_ga_to_gfn_cr3(struct vcpu *v, page_order); } +/* Flush selected vCPUs TLBs. NULL for all. */ +static inline bool paging_flush_tlb(const unsigned long *vcpu_bitmap) +{ + return current->domain->arch.paging.flush_tlb(vcpu_bitmap); +} + #endif /* CONFIG_HVM */ /* Update all the things that are derived from the guest's CR3. @@ -407,12 +412,6 @@ static always_inline unsigned int paging_max_paddr_bits(const struct domain *d) return bits; } -/* Flush selected vCPUs TLBs. NULL for all. */ -static inline bool paging_flush_tlb(const unsigned long *vcpu_bitmap) -{ - return paging_get_hostmode(current)->flush_tlb(vcpu_bitmap); -} - #endif /* XEN_PAGING_H */ /* diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 5b200e1e72..16e9c3325f 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -445,6 +445,7 @@ static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn) /************************************************/ static void cf_check hap_update_paging_modes(struct vcpu *v); +static bool cf_check flush_tlb(const unsigned long *vcpu_bitmap); void hap_domain_init(struct domain *d) { @@ -458,6 +459,7 @@ void hap_domain_init(struct domain *d) paging_log_dirty_init(d, &hap_ops); d->arch.paging.update_paging_modes = hap_update_paging_modes; + d->arch.paging.flush_tlb = flush_tlb; } /* return 0 for success, -errno for failure */ @@ -847,7 +849,6 @@ static const struct paging_mode hap_paging_real_mode = { .gva_to_gfn = hap_gva_to_gfn_real_mode, .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_real_mode, .update_cr3 = hap_update_cr3, - .flush_tlb = flush_tlb, .guest_levels = 1 }; @@ -857,7 +858,6 @@ static const struct paging_mode hap_paging_protected_mode = { .gva_to_gfn = hap_gva_to_gfn_2_levels, .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_2_levels, .update_cr3 = hap_update_cr3, - .flush_tlb = flush_tlb, .guest_levels = 2 }; @@ -867,7 +867,6 @@ static const struct paging_mode hap_paging_pae_mode = { .gva_to_gfn = hap_gva_to_gfn_3_levels, .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_3_levels, .update_cr3 = hap_update_cr3, - .flush_tlb = flush_tlb, .guest_levels = 3 }; @@ -877,7 +876,6 @@ static const struct paging_mode hap_paging_long_mode = { .gva_to_gfn = hap_gva_to_gfn_4_levels, .p2m_ga_to_gfn = hap_p2m_ga_to_gfn_4_levels, .update_cr3 = hap_update_cr3, - .flush_tlb = flush_tlb, .guest_levels = 4 }; diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 9a00321457..a9260f5711 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -106,6 +106,7 @@ int shadow_domain_init(struct domain *d) d->arch.paging.shadow.oos_active = 0; #endif #ifdef CONFIG_HVM + d->arch.paging.flush_tlb = shadow_flush_tlb; d->arch.paging.shadow.pagetable_dying_op = 0; #endif @@ -3090,66 +3091,6 @@ static void cf_check sh_clean_dirty_bitmap(struct domain *d) paging_unlock(d); } - -static bool flush_vcpu(const struct vcpu *v, const unsigned long *vcpu_bitmap) -{ - return !vcpu_bitmap || test_bit(v->vcpu_id, vcpu_bitmap); -} - -/* Flush TLB of selected vCPUs. NULL for all. */ -bool cf_check shadow_flush_tlb(const unsigned long *vcpu_bitmap) -{ - static DEFINE_PER_CPU(cpumask_t, flush_cpumask); - cpumask_t *mask = &this_cpu(flush_cpumask); - struct domain *d = current->domain; - struct vcpu *v; - - /* Avoid deadlock if more than one vcpu tries this at the same time. */ - if ( !spin_trylock(&d->hypercall_deadlock_mutex) ) - return false; - - /* Pause all other vcpus. */ - for_each_vcpu ( d, v ) - if ( v != current && flush_vcpu(v, vcpu_bitmap) ) - vcpu_pause_nosync(v); - - /* Now that all VCPUs are signalled to deschedule, we wait... */ - for_each_vcpu ( d, v ) - if ( v != current && flush_vcpu(v, vcpu_bitmap) ) - while ( !vcpu_runnable(v) && v->is_running ) - cpu_relax(); - - /* All other vcpus are paused, safe to unlock now. */ - spin_unlock(&d->hypercall_deadlock_mutex); - - cpumask_clear(mask); - - /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */ - for_each_vcpu ( d, v ) - { - unsigned int cpu; - - if ( !flush_vcpu(v, vcpu_bitmap) ) - continue; - - paging_update_cr3(v, false); - - cpu = read_atomic(&v->dirty_cpu); - if ( is_vcpu_dirty_cpu(cpu) ) - __cpumask_set_cpu(cpu, mask); - } - - /* Flush TLBs on all CPUs with dirty vcpu state. */ - guest_flush_tlb_mask(d, mask); - - /* Done. */ - for_each_vcpu ( d, v ) - if ( v != current && flush_vcpu(v, vcpu_bitmap) ) - vcpu_unpause(v); - - return true; -} - /**************************************************************************/ /* Shadow-control XEN_DOMCTL dispatcher */ diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c index 88c3c16322..e2ee1c7705 100644 --- a/xen/arch/x86/mm/shadow/hvm.c +++ b/xen/arch/x86/mm/shadow/hvm.c @@ -688,6 +688,66 @@ static void sh_emulate_unmap_dest(struct vcpu *v, void *addr, atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version); } +static bool flush_vcpu(const struct vcpu *v, const unsigned long *vcpu_bitmap) +{ + return !vcpu_bitmap || test_bit(v->vcpu_id, vcpu_bitmap); +} + +/* Flush TLB of selected vCPUs. NULL for all. */ +bool cf_check shadow_flush_tlb(const unsigned long *vcpu_bitmap) +{ + static DEFINE_PER_CPU(cpumask_t, flush_cpumask); + cpumask_t *mask = &this_cpu(flush_cpumask); + const struct vcpu *curr = current; + struct domain *d = curr->domain; + struct vcpu *v; + + /* Avoid deadlock if more than one vcpu tries this at the same time. */ + if ( !spin_trylock(&d->hypercall_deadlock_mutex) ) + return false; + + /* Pause all other vcpus. */ + for_each_vcpu ( d, v ) + if ( v != curr && flush_vcpu(v, vcpu_bitmap) ) + vcpu_pause_nosync(v); + + /* Now that all VCPUs are signalled to deschedule, we wait... */ + for_each_vcpu ( d, v ) + if ( v != curr && flush_vcpu(v, vcpu_bitmap) ) + while ( !vcpu_runnable(v) && v->is_running ) + cpu_relax(); + + /* All other vcpus are paused, safe to unlock now. */ + spin_unlock(&d->hypercall_deadlock_mutex); + + cpumask_clear(mask); + + /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */ + for_each_vcpu ( d, v ) + { + unsigned int cpu; + + if ( !flush_vcpu(v, vcpu_bitmap) ) + continue; + + paging_update_cr3(v, false); + + cpu = read_atomic(&v->dirty_cpu); + if ( is_vcpu_dirty_cpu(cpu) ) + __cpumask_set_cpu(cpu, mask); + } + + /* Flush TLBs on all CPUs with dirty vcpu state. */ + guest_flush_tlb_mask(d, mask); + + /* Done. */ + for_each_vcpu ( d, v ) + if ( v != curr && flush_vcpu(v, vcpu_bitmap) ) + vcpu_unpause(v); + + return true; +} + mfn_t sh_make_monitor_table(const struct vcpu *v, unsigned int shadow_levels) { struct domain *d = v->domain; diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index bacfa27e23..415e18a8df 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -4167,7 +4167,6 @@ const struct paging_mode sh_paging_mode = { .gva_to_gfn = sh_gva_to_gfn, #endif .update_cr3 = sh_update_cr3, - .flush_tlb = shadow_flush_tlb, .guest_levels = GUEST_PAGING_LEVELS, #ifdef CONFIG_PV .shadow.write_guest_entry = sh_write_guest_entry, -- generated by git-patchbot for /home/xen/git/xen.git#master
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |