[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH V3 07/11] HV/Vmbus: Initialize VMbus ring buffer for Isolation VM



From: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>

VMbus ring buffer are shared with host and it's need to
be accessed via extra address space of Isolation VM with
SNP support. This patch is to map the ring buffer
address in extra address space via ioremap(). HV host
visibility hvcall smears data in the ring buffer and
so reset the ring buffer memory to zero after calling
visibility hvcall.

Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
---
 drivers/hv/Kconfig        |  1 +
 drivers/hv/channel.c      | 10 +++++
 drivers/hv/hyperv_vmbus.h |  2 +
 drivers/hv/ring_buffer.c  | 84 ++++++++++++++++++++++++++++++---------
 4 files changed, 79 insertions(+), 18 deletions(-)

diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 66c794d92391..a8386998be40 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -7,6 +7,7 @@ config HYPERV
        depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
        select PARAVIRT
        select X86_HV_CALLBACK_VECTOR
+       select VMAP_PFN
        help
          Select this option to run Linux as a Hyper-V client operating
          system.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 01048bb07082..7350da9dbe97 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -707,6 +707,16 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
        if (err)
                goto error_clean_ring;
 
+       err = hv_ringbuffer_post_init(&newchannel->outbound,
+                                     page, send_pages);
+       if (err)
+               goto error_free_gpadl;
+
+       err = hv_ringbuffer_post_init(&newchannel->inbound,
+                                     &page[send_pages], recv_pages);
+       if (err)
+               goto error_free_gpadl;
+
        /* Create and init the channel open message */
        open_info = kzalloc(sizeof(*open_info) +
                           sizeof(struct vmbus_channel_open_channel),
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 40bc0eff6665..15cd23a561f3 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -172,6 +172,8 @@ extern int hv_synic_cleanup(unsigned int cpu);
 /* Interface */
 
 void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
+int hv_ringbuffer_post_init(struct hv_ring_buffer_info *ring_info,
+               struct page *pages, u32 page_cnt);
 
 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
                       struct page *pages, u32 pagecnt, u32 max_pkt_size);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 2aee356840a2..d4f93fca1108 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -17,6 +17,8 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/prefetch.h>
+#include <linux/io.h>
+#include <asm/mshyperv.h>
 
 #include "hyperv_vmbus.h"
 
@@ -179,43 +181,89 @@ void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
        mutex_init(&channel->outbound.ring_buffer_mutex);
 }
 
-/* Initialize the ring buffer. */
-int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
-                      struct page *pages, u32 page_cnt, u32 max_pkt_size)
+int hv_ringbuffer_post_init(struct hv_ring_buffer_info *ring_info,
+                      struct page *pages, u32 page_cnt)
 {
+       u64 physic_addr = page_to_pfn(pages) << PAGE_SHIFT;
+       unsigned long *pfns_wraparound;
+       void *vaddr;
        int i;
-       struct page **pages_wraparound;
 
-       BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
+       if (!hv_isolation_type_snp())
+               return 0;
+
+       physic_addr += ms_hyperv.shared_gpa_boundary;
 
        /*
         * First page holds struct hv_ring_buffer, do wraparound mapping for
         * the rest.
         */
-       pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
+       pfns_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(unsigned long),
                                   GFP_KERNEL);
-       if (!pages_wraparound)
+       if (!pfns_wraparound)
                return -ENOMEM;
 
-       pages_wraparound[0] = pages;
+       pfns_wraparound[0] = physic_addr >> PAGE_SHIFT;
        for (i = 0; i < 2 * (page_cnt - 1); i++)
-               pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
-
-       ring_info->ring_buffer = (struct hv_ring_buffer *)
-               vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
-
-       kfree(pages_wraparound);
+               pfns_wraparound[i + 1] = (physic_addr >> PAGE_SHIFT) +
+                       i % (page_cnt - 1) + 1;
 
-
-       if (!ring_info->ring_buffer)
+       vaddr = vmap_pfn(pfns_wraparound, page_cnt * 2 - 1, PAGE_KERNEL_IO);
+       kfree(pfns_wraparound);
+       if (!vaddr)
                return -ENOMEM;
 
-       ring_info->ring_buffer->read_index =
-               ring_info->ring_buffer->write_index = 0;
+       /* Clean memory after setting host visibility. */
+       memset((void *)vaddr, 0x00, page_cnt * PAGE_SIZE);
+
+       ring_info->ring_buffer = (struct hv_ring_buffer *)vaddr;
+       ring_info->ring_buffer->read_index = 0;
+       ring_info->ring_buffer->write_index = 0;
 
        /* Set the feature bit for enabling flow control. */
        ring_info->ring_buffer->feature_bits.value = 1;
 
+       return 0;
+}
+
+/* Initialize the ring buffer. */
+int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+                      struct page *pages, u32 page_cnt, u32 max_pkt_size)
+{
+       int i;
+       struct page **pages_wraparound;
+
+       BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
+
+       if (!hv_isolation_type_snp()) {
+               /*
+                * First page holds struct hv_ring_buffer, do wraparound 
mapping for
+                * the rest.
+                */
+               pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page 
*),
+                                          GFP_KERNEL);
+               if (!pages_wraparound)
+                       return -ENOMEM;
+
+               pages_wraparound[0] = pages;
+               for (i = 0; i < 2 * (page_cnt - 1); i++)
+                       pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 
1];
+
+               ring_info->ring_buffer = (struct hv_ring_buffer *)
+                       vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, 
PAGE_KERNEL);
+
+               kfree(pages_wraparound);
+
+               if (!ring_info->ring_buffer)
+                       return -ENOMEM;
+
+               ring_info->ring_buffer->read_index =
+                       ring_info->ring_buffer->write_index = 0;
+
+               /* Set the feature bit for enabling flow control. */
+               ring_info->ring_buffer->feature_bits.value = 1;
+       }
+
        ring_info->ring_size = page_cnt << PAGE_SHIFT;
        ring_info->ring_size_div10_reciprocal =
                reciprocal_value(ring_info->ring_size / 10);
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.