[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH V3 06/11] HV/Vmbus: Add SNP support for VMbus channel initiate message



From: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>

The monitor pages in the CHANNELMSG_INITIATE_CONTACT are shared
with host and so it's necessary to use hvcall to set them visible
to host. In Isolation VM with AMD SEV SNP, the access address
should be in the extra space which is above shared gpa boundary.
So remap these pages into the extra address(pa + shared_gpa_boundary).

Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
---
 drivers/hv/connection.c   | 62 +++++++++++++++++++++++++++++++++++++++
 drivers/hv/hyperv_vmbus.h |  1 +
 2 files changed, 63 insertions(+)

diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 186fd4c8acd4..389adc92f958 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -104,6 +104,12 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo 
*msginfo, u32 version)
 
        msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
        msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+
+       if (hv_is_isolation_supported()) {
+               msg->monitor_page1 += ms_hyperv.shared_gpa_boundary;
+               msg->monitor_page2 += ms_hyperv.shared_gpa_boundary;
+       }
+
        msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
 
        /*
@@ -148,6 +154,29 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo 
*msginfo, u32 version)
                return -ECONNREFUSED;
        }
 
+       if (hv_is_isolation_supported()) {
+               vmbus_connection.monitor_pages_va[0]
+                       = vmbus_connection.monitor_pages[0];
+               vmbus_connection.monitor_pages[0]
+                       = ioremap_cache(msg->monitor_page1, HV_HYP_PAGE_SIZE);
+               if (!vmbus_connection.monitor_pages[0])
+                       return -ENOMEM;
+
+               vmbus_connection.monitor_pages_va[1]
+                       = vmbus_connection.monitor_pages[1];
+               vmbus_connection.monitor_pages[1]
+                       = ioremap_cache(msg->monitor_page2, HV_HYP_PAGE_SIZE);
+               if (!vmbus_connection.monitor_pages[1]) {
+                       vunmap(vmbus_connection.monitor_pages[0]);
+                       return -ENOMEM;
+               }
+
+               memset(vmbus_connection.monitor_pages[0], 0x00,
+                      HV_HYP_PAGE_SIZE);
+               memset(vmbus_connection.monitor_pages[1], 0x00,
+                      HV_HYP_PAGE_SIZE);
+       }
+
        return ret;
 }
 
@@ -159,6 +188,7 @@ int vmbus_connect(void)
        struct vmbus_channel_msginfo *msginfo = NULL;
        int i, ret = 0;
        __u32 version;
+       u64 pfn[2];
 
        /* Initialize the vmbus connection */
        vmbus_connection.conn_state = CONNECTING;
@@ -216,6 +246,16 @@ int vmbus_connect(void)
                goto cleanup;
        }
 
+       if (hv_is_isolation_supported()) {
+               pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
+               pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
+               if (hv_mark_gpa_visibility(2, pfn,
+                               VMBUS_PAGE_VISIBLE_READ_WRITE)) {
+                       ret = -EFAULT;
+                       goto cleanup;
+               }
+       }
+
        msginfo = kzalloc(sizeof(*msginfo) +
                          sizeof(struct vmbus_channel_initiate_contact),
                          GFP_KERNEL);
@@ -282,6 +322,8 @@ int vmbus_connect(void)
 
 void vmbus_disconnect(void)
 {
+       u64 pfn[2];
+
        /*
         * First send the unload request to the host.
         */
@@ -301,6 +343,26 @@ void vmbus_disconnect(void)
                vmbus_connection.int_page = NULL;
        }
 
+       if (hv_is_isolation_supported()) {
+               if (vmbus_connection.monitor_pages_va[0]) {
+                       vunmap(vmbus_connection.monitor_pages[0]);
+                       vmbus_connection.monitor_pages[0]
+                               = vmbus_connection.monitor_pages_va[0];
+                       vmbus_connection.monitor_pages_va[0] = NULL;
+               }
+
+               if (vmbus_connection.monitor_pages_va[1]) {
+                       vunmap(vmbus_connection.monitor_pages[1]);
+                       vmbus_connection.monitor_pages[1]
+                               = vmbus_connection.monitor_pages_va[1];
+                       vmbus_connection.monitor_pages_va[1] = NULL;
+               }
+
+               pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
+               pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
+               hv_mark_gpa_visibility(2, pfn, VMBUS_PAGE_NOT_VISIBLE);
+       }
+
        hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
        hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
        vmbus_connection.monitor_pages[0] = NULL;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 42f3d9d123a1..40bc0eff6665 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -240,6 +240,7 @@ struct vmbus_connection {
         * is child->parent notification
         */
        struct hv_monitor_page *monitor_pages[2];
+       void *monitor_pages_va[2];
        struct list_head chn_msg_list;
        spinlock_t channelmsg_lock;
 
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.