[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4] x86: make Viridian support optional


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
  • Date: Fri, 19 Sep 2025 16:31:40 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=NBngCzqWeLPwedEJWM3naBs/ymn3A3SkrGGYCFVM/L4=; b=A44b+L7NsVhd8EA7ADJth7EI/xUdJfv1BI7Obte0m7AOwv95J1SU16Hda+iu+ZBUts+jRmnptNTuzljabzo3t3BE5Vk7NF5E/iWJngIlhIXig+p7J7MGhJ6lQ3bPexsjwMZj+YYuZmlFI6aellIN8Xhl6GrkTRWfNdDl5f+S1OFw1jnDjRCDj7aleSMusPPTIAcmaIe6mTeEPSiZZ2r48Mr9V7IvIKr44eFeMpeWdNlVIvJH42p1103GSdwz/Pxwon+U+W7c59NIGmziXXFzbIvVAPl0gOFhGAR1IQznbkB4FSuXURtJT9JAp/GCjN82c6zp5p+gQNsJ5LMwghxe2A==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=XC6bGJrSE74BjFlg3BT5H4r3Y9uD22HQTufKs0l0u/9ggG2M4ePw9cWl7BE0T+zSNu1pcyIlH+KluFVS8HF8GOz7JAqVxdbdDL005Li5W+xVt5s+Q6AMXbIYYVnUt//0TGB9KbV/vdz/ZjRPF6MN13zIflZnP7QApOMaK8QbExUu4xyig1pV1PSJcnAumZo7OPoYO3HHzx9x7Tpx8jPwaP4lNx5yrGQH+CqQu2gm6KaJqkFT9N8gsrdUNlwXOXdqQGjJ1T+OXHAoF0qEIyQbKA8W7H1YUFnZWpzakThRwZLAPpJdsIMSe80QB2LDlMPSb+MtshvqjSB1iw12TKNBZw==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=epam.com;
  • Cc: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Paul Durrant <paul@xxxxxxx>, Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>, Grygorii Strashko <grygorii_strashko@xxxxxxxx>
  • Delivery-date: Fri, 19 Sep 2025 16:32:02 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHcKYLgAWeerUFTx0CrYmKq2dRJwA==
  • Thread-topic: [PATCH v4] x86: make Viridian support optional

From: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>

Add config option VIRIDIAN that covers viridian code within HVM.
Calls to viridian functions guarded by is_viridian_domain() and related macros.
Having this option may be beneficial by reducing code footprint for systems
that are not using Hyper-V.

[grygorii_strashko@xxxxxxxx: fixed NULL pointer deref in
viridian_save_domain_ctxt()]
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
changes in v4:
- s/HVM_VIRIDIAN/VIRIDIAN
- add "depends on AMD_SVM || INTEL_VMX"
- add guard !is_viridian_vcpu() checks in 
viridian_load_vcpu_ctxt/viridian_load_domain_ctxt

changes in v3:
- fixed NULL pointer deref in viridian_save_domain_ctxt() reported for v2,
  which caused v2 revert by commit 1fffcf10cd71 ("Revert "x86: make Viridian
  support optional"")

v3: 
https://patchwork.kernel.org/project/xen-devel/patch/20250916134114.2214104-1-grygorii_strashko@xxxxxxxx/
v2: 
https://patchwork.kernel.org/project/xen-devel/patch/20250321092633.3982645-1-Sergiy_Kibrik@xxxxxxxx/

 xen/arch/x86/hvm/Kconfig              | 10 ++++++++++
 xen/arch/x86/hvm/Makefile             |  2 +-
 xen/arch/x86/hvm/hvm.c                | 27 ++++++++++++++++++---------
 xen/arch/x86/hvm/viridian/viridian.c  | 14 ++++++++++----
 xen/arch/x86/hvm/vlapic.c             | 11 +++++++----
 xen/arch/x86/include/asm/hvm/domain.h |  2 ++
 xen/arch/x86/include/asm/hvm/hvm.h    |  3 ++-
 xen/arch/x86/include/asm/hvm/vcpu.h   |  2 ++
 8 files changed, 53 insertions(+), 19 deletions(-)

diff --git a/xen/arch/x86/hvm/Kconfig b/xen/arch/x86/hvm/Kconfig
index 5cb9f2904255..aed799fcb9c2 100644
--- a/xen/arch/x86/hvm/Kconfig
+++ b/xen/arch/x86/hvm/Kconfig
@@ -62,6 +62,17 @@ config ALTP2M
 
          If unsure, stay with defaults.
 
+config VIRIDIAN
+       bool "Hyper-V enlightenments for guests" if EXPERT
+       depends on AMD_SVM || INTEL_VMX
+       default y
+       help
+         Support optimizations for Hyper-V guests such as faster hypercalls,
+         efficient timer and interrupt handling, and enhanced paravirtualized
+         I/O. This is to improve performance and compatibility of Windows VMs.
+
+         If unsure, say Y.
+
 config MEM_PAGING
        bool "Xen memory paging support (UNSUPPORTED)" if UNSUPPORTED
        depends on VM_EVENT
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 6ec2c8f2db56..736eb3f966e9 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_AMD_SVM) += svm/
 obj-$(CONFIG_INTEL_VMX) += vmx/
-obj-y += viridian/
+obj-$(CONFIG_VIRIDIAN) += viridian/
 
 obj-y += asid.o
 obj-y += dm.o
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 23bd7f078a1d..95a80369b9b8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -701,9 +701,12 @@ int hvm_domain_initialise(struct domain *d,
     if ( hvm_tsc_scaling_supported )
         d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
 
-    rc = viridian_domain_init(d);
-    if ( rc )
-        goto fail2;
+    if ( is_viridian_domain(d) )
+    {
+        rc = viridian_domain_init(d);
+        if ( rc )
+            goto fail2;
+    }
 
     rc = alternative_call(hvm_funcs.domain_initialise, d);
     if ( rc != 0 )
@@ -739,7 +742,8 @@ void hvm_domain_relinquish_resources(struct domain *d)
     if ( hvm_funcs.nhvm_domain_relinquish_resources )
         alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
 
-    viridian_domain_deinit(d);
+    if ( is_viridian_domain(d) )
+        viridian_domain_deinit(d);
 
     ioreq_server_destroy_all(d);
 
@@ -1643,9 +1647,12 @@ int hvm_vcpu_initialise(struct vcpu *v)
          && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: 
nestedhvm_vcpu_destroy */
         goto fail5;
 
-    rc = viridian_vcpu_init(v);
-    if ( rc )
-        goto fail6;
+    if ( is_viridian_domain(d) )
+    {
+        rc = viridian_vcpu_init(v);
+        if ( rc )
+            goto fail6;
+    }
 
     rc = ioreq_server_add_vcpu_all(d, v);
     if ( rc != 0 )
@@ -1675,13 +1682,15 @@ int hvm_vcpu_initialise(struct vcpu *v)
  fail2:
     hvm_vcpu_cacheattr_destroy(v);
  fail1:
-    viridian_vcpu_deinit(v);
+    if ( is_viridian_domain(d) )
+        viridian_vcpu_deinit(v);
     return rc;
 }
 
 void hvm_vcpu_destroy(struct vcpu *v)
 {
-    viridian_vcpu_deinit(v);
+    if ( is_viridian_domain(v->domain) )
+        viridian_vcpu_deinit(v);
 
     ioreq_server_remove_vcpu_all(v->domain, v);
 
diff --git a/xen/arch/x86/hvm/viridian/viridian.c 
b/xen/arch/x86/hvm/viridian/viridian.c
index c0be24bd2210..5e49fc286d76 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -1116,14 +1116,14 @@ static int cf_check viridian_save_domain_ctxt(
 {
     const struct domain *d = v->domain;
     const struct viridian_domain *vd = d->arch.hvm.viridian;
-    struct hvm_viridian_domain_context ctxt = {
-        .hypercall_gpa = vd->hypercall_gpa.raw,
-        .guest_os_id = vd->guest_os_id.raw,
-    };
+    struct hvm_viridian_domain_context ctxt = {};
 
     if ( !is_viridian_domain(d) )
         return 0;
 
+    ctxt.hypercall_gpa = vd->hypercall_gpa.raw;
+    ctxt.guest_os_id = vd->guest_os_id.raw,
+
     viridian_time_save_domain_ctxt(d, &ctxt);
     viridian_synic_save_domain_ctxt(d, &ctxt);
 
@@ -1136,6 +1136,9 @@ static int cf_check viridian_load_domain_ctxt(
     struct viridian_domain *vd = d->arch.hvm.viridian;
     struct hvm_viridian_domain_context ctxt;
 
+    if ( !is_viridian_domain(d) )
+        return 0;
+
     if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
         return -EINVAL;
 
@@ -1172,6 +1175,9 @@ static int cf_check viridian_load_vcpu_ctxt(
     struct vcpu *v;
     struct hvm_viridian_vcpu_context ctxt;
 
+    if ( !is_viridian_domain(d) )
+        return 0;
+
     if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
     {
         dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 993e972cd71e..79697487ba90 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -426,7 +426,8 @@ void vlapic_EOI_set(struct vlapic *vlapic)
      * priority vector and then recurse to handle the lower priority
      * vector.
      */
-    bool missed_eoi = viridian_apic_assist_completed(v);
+    bool missed_eoi = has_viridian_apic_assist(v->domain) &&
+                      viridian_apic_assist_completed(v);
     int vector;
 
  again:
@@ -442,7 +443,7 @@ void vlapic_EOI_set(struct vlapic *vlapic)
      * NOTE: It is harmless to call viridian_apic_assist_clear() on a
      *       recursion, even though it is not necessary.
      */
-    if ( !missed_eoi )
+    if ( has_viridian_apic_assist(v->domain) && !missed_eoi )
         viridian_apic_assist_clear(v);
 
     vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
@@ -1360,7 +1361,8 @@ int vlapic_has_pending_irq(struct vcpu *v)
      * If so, we need to emulate the EOI here before comparing ISR
      * with IRR.
      */
-    if ( viridian_apic_assist_completed(v) )
+    if ( has_viridian_apic_assist(v->domain) &&
+         viridian_apic_assist_completed(v) )
         vlapic_EOI_set(vlapic);
 
     isr = vlapic_find_highest_isr(vlapic);
@@ -1373,7 +1375,8 @@ int vlapic_has_pending_irq(struct vcpu *v)
     if ( isr >= 0 &&
          (irr & 0xf0) <= (isr & 0xf0) )
     {
-        viridian_apic_assist_clear(v);
+        if ( has_viridian_apic_assist(v->domain) )
+            viridian_apic_assist_clear(v);
         return -1;
     }
 
diff --git a/xen/arch/x86/include/asm/hvm/domain.h 
b/xen/arch/x86/include/asm/hvm/domain.h
index 333501d5f2ac..95d9336a28f0 100644
--- a/xen/arch/x86/include/asm/hvm/domain.h
+++ b/xen/arch/x86/include/asm/hvm/domain.h
@@ -111,7 +111,9 @@ struct hvm_domain {
     /* hypervisor intercepted msix table */
     struct list_head       msixtbl_list;
 
+#ifdef CONFIG_VIRIDIAN
     struct viridian_domain *viridian;
+#endif
 
     /*
      * TSC value that VCPUs use to calculate their tsc_offset value.
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
b/xen/arch/x86/include/asm/hvm/hvm.h
index f02183691ea6..7312cdd878e1 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -510,7 +510,8 @@ hvm_get_cpl(struct vcpu *v)
     (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
 
 #define is_viridian_domain(d) \
-    (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
+    (IS_ENABLED(CONFIG_VIRIDIAN) && \
+     is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
 
 #define is_viridian_vcpu(v) \
     is_viridian_domain((v)->domain)
diff --git a/xen/arch/x86/include/asm/hvm/vcpu.h 
b/xen/arch/x86/include/asm/hvm/vcpu.h
index 924af890c5b2..9ed9eaff3bc5 100644
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -176,7 +176,9 @@ struct hvm_vcpu {
     /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
     struct x86_event     inject_event;
 
+#ifdef CONFIG_VIRIDIAN
     struct viridian_vcpu *viridian;
+#endif
 };
 
 #endif /* __ASM_X86_HVM_VCPU_H__ */
-- 
2.34.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.