[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH V3 01/11] x86/HV: Initialize GHCB page in Isolation VM



From: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>

Hyper-V exposes GHCB page via SEV ES GHCB MSR for SNP guest
to communicate with hypervisor. Map GHCB page for all
cpus to read/write MSR register and submit hvcall request
via GHCB.

Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
---
 arch/x86/hyperv/hv_init.c       | 60 ++++++++++++++++++++++++++++++---
 arch/x86/include/asm/mshyperv.h |  2 ++
 include/asm-generic/mshyperv.h  |  2 ++
 3 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index bb0ae4b5c00f..dc74d01cb859 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -60,6 +60,9 @@ static int hv_cpu_init(unsigned int cpu)
        struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
        void **input_arg;
        struct page *pg;
+       u64 ghcb_gpa;
+       void *ghcb_va;
+       void **ghcb_base;
 
        /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
        pg = alloc_pages(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL, 
hv_root_partition ? 1 : 0);
@@ -106,6 +109,17 @@ static int hv_cpu_init(unsigned int cpu)
                wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, val);
        }
 
+       if (ms_hyperv.ghcb_base) {
+               rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
+
+               ghcb_va = ioremap_cache(ghcb_gpa, HV_HYP_PAGE_SIZE);
+               if (!ghcb_va)
+                       return -ENOMEM;
+
+               ghcb_base = (void **)this_cpu_ptr(ms_hyperv.ghcb_base);
+               *ghcb_base = ghcb_va;
+       }
+
        return 0;
 }
 
@@ -201,6 +215,7 @@ static int hv_cpu_die(unsigned int cpu)
        unsigned long flags;
        void **input_arg;
        void *pg;
+       void **ghcb_va = NULL;
 
        local_irq_save(flags);
        input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -214,6 +229,13 @@ static int hv_cpu_die(unsigned int cpu)
                *output_arg = NULL;
        }
 
+       if (ms_hyperv.ghcb_base) {
+               ghcb_va = (void **)this_cpu_ptr(ms_hyperv.ghcb_base);
+               if (*ghcb_va)
+                       iounmap(*ghcb_va);
+               *ghcb_va = NULL;
+       }
+
        local_irq_restore(flags);
 
        free_pages((unsigned long)pg, hv_root_partition ? 1 : 0);
@@ -369,6 +391,9 @@ void __init hyperv_init(void)
        u64 guest_id, required_msrs;
        union hv_x64_msr_hypercall_contents hypercall_msr;
        int cpuhp, i;
+       u64 ghcb_gpa;
+       void *ghcb_va;
+       void **ghcb_base;
 
        if (x86_hyper_type != X86_HYPER_MS_HYPERV)
                return;
@@ -429,9 +454,24 @@ void __init hyperv_init(void)
                        VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
                        VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
                        __builtin_return_address(0));
-       if (hv_hypercall_pg == NULL) {
-               wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
-               goto remove_cpuhp_state;
+       if (hv_hypercall_pg == NULL)
+               goto clean_guest_os_id;
+
+       if (hv_isolation_type_snp()) {
+               ms_hyperv.ghcb_base = alloc_percpu(void *);
+               if (!ms_hyperv.ghcb_base)
+                       goto clean_guest_os_id;
+
+               rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
+               ghcb_va = ioremap_cache(ghcb_gpa, HV_HYP_PAGE_SIZE);
+               if (!ghcb_va) {
+                       free_percpu(ms_hyperv.ghcb_base);
+                       ms_hyperv.ghcb_base = NULL;
+                       goto clean_guest_os_id;
+               }
+
+               ghcb_base = (void **)this_cpu_ptr(ms_hyperv.ghcb_base);
+               *ghcb_base = ghcb_va;
        }
 
        rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -502,7 +542,8 @@ void __init hyperv_init(void)
        hv_query_ext_cap(0);
        return;
 
-remove_cpuhp_state:
+clean_guest_os_id:
+       wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
        cpuhp_remove_state(cpuhp);
 free_vp_assist_page:
        kfree(hv_vp_assist_page);
@@ -531,6 +572,9 @@ void hyperv_cleanup(void)
         */
        hv_hypercall_pg = NULL;
 
+       if (ms_hyperv.ghcb_base)
+               free_percpu(ms_hyperv.ghcb_base);
+
        /* Reset the hypercall page */
        hypercall_msr.as_uint64 = 0;
        wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -615,6 +659,14 @@ bool hv_is_isolation_supported(void)
 }
 EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
 
+DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
+
+bool hv_isolation_type_snp(void)
+{
+       return static_branch_unlikely(&isolation_type_snp);
+}
+EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
+
 /* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */
 bool hv_query_ext_cap(u64 cap_query)
 {
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 67ff0d637e55..aeacca7c4da8 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -11,6 +11,8 @@
 #include <asm/paravirt.h>
 #include <asm/mshyperv.h>
 
+DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
+
 typedef int (*hyperv_fill_flush_list_func)(
                struct hv_guest_mapping_flush_list *flush,
                void *data);
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 9a000ba2bb75..3ae56a29594f 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -35,6 +35,7 @@ struct ms_hyperv_info {
        u32 max_lp_index;
        u32 isolation_config_a;
        u32 isolation_config_b;
+       void  __percpu **ghcb_base;
 };
 extern struct ms_hyperv_info ms_hyperv;
 
@@ -224,6 +225,7 @@ bool hv_is_hyperv_initialized(void);
 bool hv_is_hibernation_supported(void);
 enum hv_isolation_type hv_get_isolation_type(void);
 bool hv_is_isolation_supported(void);
+bool hv_isolation_type_snp(void);
 void hyperv_cleanup(void);
 bool hv_query_ext_cap(u64 cap_query);
 #else /* CONFIG_HYPERV */
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.