[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 07/10] x86/SVM: Add vcpu scheduling support for AVIC
Add hooks to manage AVIC data structure during vcpu scheduling. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Cc: Jan Beulich <JBeulich@xxxxxxxx> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/avic.c | 78 +++++++++++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/svm/svm.c | 10 ++++++ 2 files changed, 88 insertions(+) diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c index c0b7151..6351c8e 100644 --- a/xen/arch/x86/hvm/svm/avic.c +++ b/xen/arch/x86/hvm/svm/avic.c @@ -73,6 +73,79 @@ avic_get_phy_apic_id_ent(const struct vcpu *v, unsigned int index) return &avic_phy_apic_id_table[index]; } +static void avic_vcpu_load(struct vcpu *v) +{ + struct arch_svm_struct *s = &v->arch.hvm_svm; + int h_phy_apic_id; + struct avic_phy_apic_id_ent entry; + + if ( !s->avic_last_phy_id ) + return; + + if ( test_bit(_VPF_blocked, &v->pause_flags) ) + return; + + /* + * Note: APIC ID = 0xff is used for broadcast. + * APIC ID > 0xff is reserved. + */ + h_phy_apic_id = cpu_data[v->processor].apicid; + ASSERT(h_phy_apic_id < AVIC_PHY_APIC_ID_MAX); + + entry = *(s->avic_last_phy_id); + smp_rmb(); + entry.host_phy_apic_id = h_phy_apic_id; + entry.is_running = 1; + *(s->avic_last_phy_id) = entry; + smp_wmb(); +} + +static void avic_vcpu_unload(struct vcpu *v) +{ + struct arch_svm_struct *s = &v->arch.hvm_svm; + struct avic_phy_apic_id_ent entry; + + if ( !svm_avic || !s->avic_last_phy_id ) + return; + + entry = *(s->avic_last_phy_id); + smp_rmb(); + entry.is_running = 0; + *(s->avic_last_phy_id) = entry; + smp_wmb(); +} + +static void avic_vcpu_resume(struct vcpu *v) +{ + struct avic_phy_apic_id_ent entry; + struct arch_svm_struct *s = &v->arch.hvm_svm; + + ASSERT(svm_avic_vcpu_enabled(v)); + ASSERT(s->avic_last_phy_id); + ASSERT(!test_bit(_VPF_blocked, &v->pause_flags)); + + entry = *(s->avic_last_phy_id); + smp_rmb(); + entry.is_running = 1; + *(s->avic_last_phy_id) = entry; + smp_wmb(); +} + +static void avic_vcpu_block(struct vcpu *v) +{ + struct avic_phy_apic_id_ent entry; + struct arch_svm_struct *s = &v->arch.hvm_svm; + + ASSERT(svm_avic_vcpu_enabled(v)); + ASSERT(s->avic_last_phy_id); + + entry = *(s->avic_last_phy_id); + smp_rmb(); + entry.is_running = 0; + *(s->avic_last_phy_id) = entry; + smp_wmb(); +} + int svm_avic_dom_init(struct domain *d) { int ret = 0; @@ -127,6 +200,11 @@ int svm_avic_dom_init(struct domain *d) spin_lock_init(&d->arch.hvm_domain.svm.avic_ldr_mode_lock); + d->arch.hvm_domain.pi_ops.pi_switch_to = avic_vcpu_unload; + d->arch.hvm_domain.pi_ops.pi_switch_from = avic_vcpu_load; + d->arch.hvm_domain.pi_ops.vcpu_block = avic_vcpu_block; + d->arch.hvm_domain.pi_ops.pi_do_resume = avic_vcpu_resume; + return ret; err_out: svm_avic_dom_destroy(d); diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index cb5281c..df59b8d 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1012,6 +1012,10 @@ static void svm_ctxt_switch_from(struct vcpu *v) svm_tsc_ratio_save(v); svm_sync_vmcb(v); + + if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from ) + v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v); + svm_vmload(per_cpu(root_vmcb, cpu)); /* Resume use of ISTs now that the host TR is reinstated. */ @@ -1048,6 +1052,9 @@ static void svm_ctxt_switch_to(struct vcpu *v) svm_lwp_load(v); svm_tsc_ratio_load(v); + if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to ) + v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v); + if ( cpu_has_rdtscp ) wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); } @@ -1093,6 +1100,9 @@ static void noreturn svm_do_resume(struct vcpu *v) vmcb_set_vintr(vmcb, intr); } + if ( v->domain->arch.hvm_domain.pi_ops.pi_do_resume ) + v->domain->arch.hvm_domain.pi_ops.pi_do_resume(v); + hvm_do_resume(v); reset_stack_and_jump(svm_asm_do_resume); -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |