[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 2/4] x86/vcpu: track hvm vcpu number on the system
This number is used to calculate the average vcpus per pcpu ratio. Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx> --- v4: - move the place we increase/decrease the hvm vcpu number to hvm_vcpu_{initialise, destory} --- xen/arch/x86/hvm/hvm.c | 6 ++++++ xen/include/asm-x86/hvm/hvm.h | 3 +++ 2 files changed, 9 insertions(+) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 3ed6ec4..6a510b3 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -109,6 +109,9 @@ static const char __initconst warning_hvm_fep[] = static bool_t __initdata opt_altp2m_enabled = 0; boolean_param("altp2m", opt_altp2m_enabled); +/* Total number of HVM vCPUs on this system */ +atomic_t num_hvm_vcpus; + static int cpu_callback( struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -1512,6 +1515,7 @@ int hvm_vcpu_initialise(struct vcpu *v) hvm_update_guest_vendor(v); + atomic_inc(&num_hvm_vcpus); return 0; fail6: @@ -1530,6 +1534,8 @@ int hvm_vcpu_initialise(struct vcpu *v) void hvm_vcpu_destroy(struct vcpu *v) { + atomic_dec(&num_hvm_vcpus); + viridian_vcpu_deinit(v); hvm_all_ioreq_servers_remove_vcpu(v->domain, v); diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index b687e03..c51bd9f 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -25,6 +25,7 @@ #include <asm/hvm/asid.h> #include <public/domctl.h> #include <public/hvm/save.h> +#include <xen/atomic.h> #include <xen/mm.h> #ifdef CONFIG_HVM_FEP @@ -233,6 +234,8 @@ extern bool_t hvm_enabled; extern bool_t cpu_has_lmsl; extern s8 hvm_port80_allowed; +extern atomic_t num_hvm_vcpus; + extern const struct hvm_function_table *start_svm(void); extern const struct hvm_function_table *start_vmx(void); -- 1.8.3.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |