[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64][HVM] Add buffer IO mechanism for Xen/VTi domain. Current



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 1e6b0a8a0822a22d313d9058fa0dc10c7a319821
# Parent  21905d2497d6782333e0ebad23b3164db7a0f270
[IA64][HVM] Add buffer IO mechanism for Xen/VTi domain. Current
implementation can accelerate Windows guest's dense IO operations
at boot time.

Signed-off-by: Zhang xiantao <xiantao.zhang@xxxxxxxxx>
---
 tools/libxc/ia64/xc_ia64_hvm_build.c |   10 +++--
 tools/python/xen/xend/image.py       |    2 -
 xen/arch/ia64/vmx/mmio.c             |   70 ++++++++++++++++++++++++++++++++++-
 xen/arch/ia64/vmx/vmx_init.c         |   14 +++++--
 xen/include/asm-ia64/vmx_platform.h  |    2 +
 xen/include/asm-ia64/vmx_vcpu.h      |    1 
 xen/include/public/arch-ia64.h       |    3 +
 7 files changed, 93 insertions(+), 9 deletions(-)

diff -r 21905d2497d6 -r 1e6b0a8a0822 tools/libxc/ia64/xc_ia64_hvm_build.c
--- a/tools/libxc/ia64/xc_ia64_hvm_build.c      Wed Oct 25 15:25:13 2006 +0100
+++ b/tools/libxc/ia64/xc_ia64_hvm_build.c      Wed Oct 25 15:29:08 2006 +0100
@@ -551,8 +551,9 @@ setup_guest(int xc_handle, uint32_t dom,
             char *image, unsigned long image_size, uint32_t vcpus,
             unsigned int store_evtchn, unsigned long *store_mfn)
 {
-    unsigned long page_array[2];
+    unsigned long page_array[3];
     shared_iopage_t *sp;
+    void *ioreq_buffer_page;
     unsigned long dom_memsize = (memsize << 20);
     DECLARE_DOMCTL;
 
@@ -587,7 +588,7 @@ setup_guest(int xc_handle, uint32_t dom,
 
     /* Retrieve special pages like io, xenstore, etc. */
     if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
-                             IO_PAGE_START>>PAGE_SHIFT, 2) != 2) {
+                             IO_PAGE_START>>PAGE_SHIFT, 3) != 3) {
         PERROR("Could not get the page frame list");
         goto error_out;
     }
@@ -604,7 +605,10 @@ setup_guest(int xc_handle, uint32_t dom,
 
     memset(sp, 0, PAGE_SIZE);
     munmap(sp, PAGE_SIZE);
-
+    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom,
+                               PAGE_SIZE, PROT_READ|PROT_WRITE, 
page_array[2]); 
+    memset(ioreq_buffer_page,0,PAGE_SIZE);
+    munmap(ioreq_buffer_page, PAGE_SIZE);
     return 0;
 
 error_out:
diff -r 21905d2497d6 -r 1e6b0a8a0822 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Wed Oct 25 15:25:13 2006 +0100
+++ b/tools/python/xen/xend/image.py    Wed Oct 25 15:29:08 2006 +0100
@@ -471,7 +471,7 @@ class IA64_HVM_ImageHandler(HVMImageHand
     def getRequiredAvailableMemory(self, mem_kb):
         page_kb = 16
         # ROM size for guest firmware, ioreq page and xenstore page
-        extra_pages = 1024 + 2
+        extra_pages = 1024 + 3
         return mem_kb + extra_pages * page_kb
 
     def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
diff -r 21905d2497d6 -r 1e6b0a8a0822 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Wed Oct 25 15:25:13 2006 +0100
+++ b/xen/arch/ia64/vmx/mmio.c  Wed Oct 25 15:29:08 2006 +0100
@@ -52,6 +52,70 @@ struct mmio_list *lookup_mmio(u64 gpa, s
 #define PIB_OFST_INTA           0x1E0000
 #define PIB_OFST_XTP            0x1E0008
 
+#define HVM_BUFFERED_IO_RANGE_NR 1
+
+struct hvm_buffered_io_range {
+    unsigned long start_addr;
+    unsigned long length;
+};
+
+static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
+static struct hvm_buffered_io_range
+*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
+{
+    &buffered_stdvga_range
+};
+
+int hvm_buffered_io_intercept(ioreq_t *p)
+{
+    struct vcpu *v = current;
+    spinlock_t  *buffered_io_lock;
+    buffered_iopage_t *buffered_iopage =
+        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
+    unsigned long tmp_write_pointer = 0;
+    int i;
+
+    /* ignore READ ioreq_t! */
+    if ( p->dir == IOREQ_READ )
+        return 0;
+
+    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
+        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
+             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
+                                     hvm_buffered_io_ranges[i]->length )
+            break;
+    }
+
+    if ( i == HVM_BUFFERED_IO_RANGE_NR )
+        return 0;
+
+    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
+    spin_lock(buffered_io_lock);
+
+    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
+         (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
+        /* the queue is full.
+         * send the iopacket through the normal path.
+         * NOTE: The arithimetic operation could handle the situation for
+         * write_pointer overflow.
+         */
+        spin_unlock(buffered_io_lock);
+        return 0;
+    }
+
+    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
+
+    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
+
+    /*make the ioreq_t visible before write_pointer*/
+    wmb();
+    buffered_iopage->write_pointer++;
+
+    spin_unlock(buffered_io_lock);
+
+    return 1;
+}
+
 static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
 
 static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int 
ma)
@@ -156,7 +220,11 @@ static void low_mmio_access(VCPU *vcpu, 
     p->df = 0;
 
     p->io_count++;
-
+    if(hvm_buffered_io_intercept(p)){
+        p->state = STATE_IORESP_READY;
+        vmx_io_assist(v);
+        return ;
+    }else 
     vmx_send_assist_req(v);
     if(dir==IOREQ_READ){ //read
         *val=p->u.data;
diff -r 21905d2497d6 -r 1e6b0a8a0822 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Wed Oct 25 15:25:13 2006 +0100
+++ b/xen/arch/ia64/vmx/vmx_init.c      Wed Oct 25 15:29:08 2006 +0100
@@ -362,8 +362,8 @@ static const io_range_t io_ranges[] = {
        {PIB_START, PIB_SIZE, GPFN_PIB},
 };
 
-/* Reseve 1 page for shared I/O and 1 page for xenstore.  */
-#define VMX_SYS_PAGES  (2 + (GFW_SIZE >> PAGE_SHIFT))
+/* Reseve 1 page for shared I/O ,1 page for xenstore and 1 page for buffer 
I/O.  */
+#define VMX_SYS_PAGES  (3 + (GFW_SIZE >> PAGE_SHIFT))
 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
 
 static void vmx_build_physmap_table(struct domain *d)
@@ -424,8 +424,12 @@ static void vmx_build_physmap_table(stru
        mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
        assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
        list_ent = mfn_to_page(mfn)->list.next;
+       ASSERT(list_ent != &d->page_list);
+    
+    mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
+    assign_domain_page(d, BUFFER_IO_PAGE_START, mfn << PAGE_SHIFT);
+    list_ent = mfn_to_page(mfn)->list.next;
        ASSERT(list_ent == &d->page_list);
-
 }
 
 void vmx_setup_platform(struct domain *d)
@@ -436,6 +440,10 @@ void vmx_setup_platform(struct domain *d
 
        d->arch.vmx_platform.shared_page_va =
                (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
+    //For buffered IO requests.
+    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
+    d->arch.hvm_domain.buffered_io_va =
+        (unsigned long)__va(__gpa_to_mpa(d, BUFFER_IO_PAGE_START));
        /* TEMP */
        d->arch.vmx_platform.pib_base = 0xfee00000UL;
 
diff -r 21905d2497d6 -r 1e6b0a8a0822 xen/include/asm-ia64/vmx_platform.h
--- a/xen/include/asm-ia64/vmx_platform.h       Wed Oct 25 15:25:13 2006 +0100
+++ b/xen/include/asm-ia64/vmx_platform.h       Wed Oct 25 15:29:08 2006 +0100
@@ -24,6 +24,8 @@
 #include <asm/hvm/vioapic.h>
 struct mmio_list;
 typedef struct virtual_platform_def {
+    unsigned long          buffered_io_va;
+    spinlock_t             buffered_io_lock;
     unsigned long       shared_page_va;
     unsigned long       pib_base;
     unsigned char       xtp;
diff -r 21905d2497d6 -r 1e6b0a8a0822 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Wed Oct 25 15:25:13 2006 +0100
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Wed Oct 25 15:29:08 2006 +0100
@@ -56,7 +56,6 @@ extern int check_indirect_reg_rsv_fields
 extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
 extern u64 set_isr_ei_ni (VCPU *vcpu);
 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
-
 
 /* next all for VTI domain APIs definition */
 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
diff -r 21905d2497d6 -r 1e6b0a8a0822 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Wed Oct 25 15:25:13 2006 +0100
+++ b/xen/include/public/arch-ia64.h    Wed Oct 25 15:29:08 2006 +0100
@@ -79,6 +79,9 @@ typedef unsigned long xen_ulong_t;
 
 #define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
 #define STORE_PAGE_SIZE         PAGE_SIZE
+
+#define BUFFER_IO_PAGE_START (STORE_PAGE_START+PAGE_SIZE)
+#define BUFFER_IO_PAGE_SIZE PAGE_SIZE
 
 #define IO_SAPIC_START   0xfec00000UL
 #define IO_SAPIC_SIZE    0x100000

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.