|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 06/10] x86/SVM: Add vcpu scheduling support for AVIC
From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Add hooks to manage AVIC data structure during vcpu scheduling.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@xxxxxxx>
---
xen/arch/x86/hvm/svm/avic.c | 51 +++++++++++++++++++++++++++++++++++++++++++++
xen/arch/x86/hvm/svm/svm.c | 10 +++++++++
2 files changed, 61 insertions(+)
diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c
index 2fba35fe47..7cc10c313a 100644
--- a/xen/arch/x86/hvm/svm/avic.c
+++ b/xen/arch/x86/hvm/svm/avic.c
@@ -36,6 +36,7 @@
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
+#define IS_RUNNING_BIT 62
/*
* Note:
* Currently, svm-avic mode is not supported with nested virtualization.
@@ -65,6 +66,51 @@ avic_get_physical_id_entry(struct svm_domain *d, unsigned
int index)
return &d->avic_physical_id_table[index];
}
+static void avic_vcpu_load(struct vcpu *v)
+{
+ struct arch_svm_struct *s = &v->arch.hvm_svm;
+ int h_phy_apic_id;
+
+ ASSERT(!test_bit(_VPF_blocked, &v->pause_flags));
+
+ /*
+ * Note: APIC ID = 0xff is used for broadcast.
+ * APIC ID > 0xff is reserved.
+ */
+ h_phy_apic_id = cpu_data[v->processor].apicid;
+ ASSERT(h_phy_apic_id < AVIC_PHY_APIC_ID_MAX);
+
+ s->avic_last_phy_id->host_phy_apic_id = h_phy_apic_id;
+ smp_wmb();
+ set_bit(IS_RUNNING_BIT, (u64*)(s->avic_last_phy_id));
+}
+
+static void avic_vcpu_unload(struct vcpu *v)
+{
+ struct arch_svm_struct *s = &v->arch.hvm_svm;
+
+ clear_bit(IS_RUNNING_BIT, (u64*)(s->avic_last_phy_id));
+}
+
+static void avic_vcpu_resume(struct vcpu *v)
+{
+ struct arch_svm_struct *s = &v->arch.hvm_svm;
+
+ ASSERT(svm_avic_vcpu_enabled(v));
+ ASSERT(!test_bit(_VPF_blocked, &v->pause_flags));
+
+ set_bit(IS_RUNNING_BIT, (u64*)(s->avic_last_phy_id));
+}
+
+static void avic_vcpu_block(struct vcpu *v)
+{
+ struct arch_svm_struct *s = &v->arch.hvm_svm;
+
+ ASSERT(svm_avic_vcpu_enabled(v));
+
+ clear_bit(IS_RUNNING_BIT, (u64*)(s->avic_last_phy_id));
+}
+
int svm_avic_dom_init(struct domain *d)
{
int ret = 0;
@@ -108,6 +154,11 @@ int svm_avic_dom_init(struct domain *d)
spin_lock_init(&d->arch.hvm_domain.svm.avic_dfr_mode_lock);
+ d->arch.hvm_domain.pi_ops.switch_from = avic_vcpu_unload;
+ d->arch.hvm_domain.pi_ops.switch_to = avic_vcpu_load;
+ d->arch.hvm_domain.pi_ops.vcpu_block = avic_vcpu_block;
+ d->arch.hvm_domain.pi_ops.do_resume = avic_vcpu_resume;
+
return ret;
err_out:
svm_avic_dom_destroy(d);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 249059625c..b3e3c84175 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1088,6 +1088,10 @@ static void svm_ctxt_switch_from(struct vcpu *v)
svm_tsc_ratio_save(v);
svm_sync_vmcb(v);
+
+ if ( v->domain->arch.hvm_domain.pi_ops.switch_from )
+ v->domain->arch.hvm_domain.pi_ops.switch_from(v);
+
svm_vmload_pa(per_cpu(host_vmcb, cpu));
/* Resume use of ISTs now that the host TR is reinstated. */
@@ -1120,6 +1124,9 @@ static void svm_ctxt_switch_to(struct vcpu *v)
svm_lwp_load(v);
svm_tsc_ratio_load(v);
+ if ( v->domain->arch.hvm_domain.pi_ops.switch_to )
+ v->domain->arch.hvm_domain.pi_ops.switch_to(v);
+
if ( cpu_has_rdtscp )
wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
}
@@ -1167,6 +1174,9 @@ static void noreturn svm_do_resume(struct vcpu *v)
vmcb_set_vintr(vmcb, intr);
}
+ if ( v->domain->arch.hvm_domain.pi_ops.do_resume )
+ v->domain->arch.hvm_domain.pi_ops.do_resume(v);
+
hvm_do_resume(v);
reset_stack_and_jump(svm_asm_do_resume);
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |