[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] merge



# HG changeset patch
# User Ian.Campbell@xxxxxxxxxxxxx
# Node ID b16252dbcb1f99a22fcf4c87a015c47f6019cfd2
# Parent  c1850c659e40042d4ce2bf006d6f37b9ebda307a
# Parent  01043d5438786a7e023ca9236999d0f5bf919e9d
merge
---
 tools/ioemu/patches/series                      |    1 
 tools/ioemu/patches/xen-support-buffered-ioreqs |   77 +++++++++++++++++++++
 tools/ioemu/target-i386-dm/helper2.c            |   88 +++++++++++++++++-------
 tools/ioemu/vl.c                                |    8 +-
 tools/libxc/xc_hvm_build.c                      |   31 ++++++++
 xen/arch/x86/hvm/hvm.c                          |   31 +++++++-
 xen/arch/x86/hvm/intercept.c                    |   66 +++++++++++++++++-
 xen/arch/x86/hvm/platform.c                     |    2 
 xen/arch/x86/hvm/svm/svm.c                      |    3 
 xen/arch/x86/hvm/vmx/vmx.c                      |    3 
 xen/include/asm-x86/e820.h                      |    1 
 xen/include/asm-x86/hvm/domain.h                |    2 
 xen/include/asm-x86/hvm/hvm.h                   |    4 -
 xen/include/asm-x86/hvm/support.h               |    1 
 xen/include/public/hvm/ioreq.h                  |    8 ++
 15 files changed, 292 insertions(+), 34 deletions(-)

diff -r c1850c659e40 -r b16252dbcb1f tools/ioemu/patches/series
--- a/tools/ioemu/patches/series        Wed Aug 09 11:27:28 2006 +0100
+++ b/tools/ioemu/patches/series        Wed Aug 09 11:29:06 2006 +0100
@@ -39,3 +39,4 @@ qemu-allow-disable-sdl
 qemu-allow-disable-sdl
 qemu-fix-memset-args
 qemu-fix-write-to-disk-synchronous
+xen-support-buffered-ioreqs
diff -r c1850c659e40 -r b16252dbcb1f tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Wed Aug 09 11:27:28 2006 +0100
+++ b/tools/ioemu/target-i386-dm/helper2.c      Wed Aug 09 11:29:06 2006 +0100
@@ -75,6 +75,10 @@ int xc_handle;
 int xc_handle;
 
 shared_iopage_t *shared_page = NULL;
+
+#define BUFFER_IO_MAX_DELAY  100
+buffered_iopage_t *buffered_io_page = NULL;
+QEMUTimer *buffered_io_timer;
 
 /* the evtchn fd for polling */
 int xce_handle = -1;
@@ -419,36 +423,68 @@ void cpu_ioreq_xor(CPUState *env, ioreq_
     req->u.data = tmp1;
 }
 
+void __handle_ioreq(CPUState *env, ioreq_t *req)
+{
+    if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4)
+       req->u.data &= (1UL << (8 * req->size)) - 1;
+
+    switch (req->type) {
+    case IOREQ_TYPE_PIO:
+        cpu_ioreq_pio(env, req);
+        break;
+    case IOREQ_TYPE_COPY:
+        cpu_ioreq_move(env, req);
+        break;
+    case IOREQ_TYPE_AND:
+        cpu_ioreq_and(env, req);
+        break;
+    case IOREQ_TYPE_OR:
+        cpu_ioreq_or(env, req);
+        break;
+    case IOREQ_TYPE_XOR:
+        cpu_ioreq_xor(env, req);
+        break;
+    default:
+        hw_error("Invalid ioreq type 0x%x\n", req->type);
+    }
+}
+
+void __handle_buffered_iopage(CPUState *env)
+{
+    ioreq_t *req = NULL;
+
+    if (!buffered_io_page)
+        return;
+
+    while (buffered_io_page->read_pointer !=
+           buffered_io_page->write_pointer) {
+        req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
+                                      IOREQ_BUFFER_SLOT_NUM];
+
+        __handle_ioreq(env, req);
+
+        mb();
+        buffered_io_page->read_pointer++;
+    }
+}
+
+void handle_buffered_io(void *opaque)
+{
+    CPUState *env = opaque;
+
+    __handle_buffered_iopage(env);
+    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
+                  qemu_get_clock(rt_clock));
+}
+
 void cpu_handle_ioreq(void *opaque)
 {
     CPUState *env = opaque;
     ioreq_t *req = cpu_get_ioreq();
 
+    handle_buffered_io(env);
     if (req) {
-        if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
-            if (req->size != 4)
-                req->u.data &= (1UL << (8 * req->size))-1;
-        }
-
-        switch (req->type) {
-        case IOREQ_TYPE_PIO:
-            cpu_ioreq_pio(env, req);
-            break;
-        case IOREQ_TYPE_COPY:
-            cpu_ioreq_move(env, req);
-            break;
-        case IOREQ_TYPE_AND:
-            cpu_ioreq_and(env, req);
-            break;
-        case IOREQ_TYPE_OR:
-            cpu_ioreq_or(env, req);
-            break;
-        case IOREQ_TYPE_XOR:
-            cpu_ioreq_xor(env, req);
-            break;
-        default:
-            hw_error("Invalid ioreq type 0x%x\n", req->type);
-        }
+        __handle_ioreq(env, req);
 
         /* No state change if state = STATE_IORESP_HOOK */
         if (req->state == STATE_IOREQ_INPROCESS) {
@@ -465,6 +501,10 @@ int main_loop(void)
     extern int shutdown_requested;
     CPUState *env = cpu_single_env;
     int evtchn_fd = xc_evtchn_fd(xce_handle);
+
+    buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
+                                      cpu_single_env);
+    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
 
     qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
 
diff -r c1850c659e40 -r b16252dbcb1f tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Wed Aug 09 11:27:28 2006 +0100
+++ b/tools/ioemu/vl.c  Wed Aug 09 11:29:06 2006 +0100
@@ -5834,6 +5834,7 @@ int main(int argc, char **argv)
     unsigned long nr_pages;
     xen_pfn_t *page_array;
     extern void *shared_page;
+    extern void *buffered_io_page;
 
     char qemu_dm_logfilename[64];
 
@@ -6378,11 +6379,16 @@ int main(int argc, char **argv)
 
     phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
                                          PROT_READ|PROT_WRITE, page_array,
-                                         nr_pages - 1);
+                                         nr_pages - 3);
     if (phys_ram_base == 0) {
         fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
         exit(-1);
     }
+
+    /* not yet add for IA64 */
+    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+                                       PROT_READ|PROT_WRITE,
+                                       page_array[nr_pages - 3]);
 
     shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
                                        PROT_READ|PROT_WRITE,
diff -r c1850c659e40 -r b16252dbcb1f tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c        Wed Aug 09 11:27:28 2006 +0100
+++ b/tools/libxc/xc_hvm_build.c        Wed Aug 09 11:29:06 2006 +0100
@@ -26,6 +26,7 @@
 #define E820_IO          16
 #define E820_SHARED_PAGE 17
 #define E820_XENSTORE    18
+#define E820_BUFFERED_IO 19
 
 #define E820_MAP_PAGE       0x00090000
 #define E820_MAP_NR_OFFSET  0x000001E8
@@ -96,7 +97,13 @@ static void build_e820map(void *e820_pag
     e820entry[nr_map].type = E820_RESERVED;
     nr_map++;
 
-#define STATIC_PAGES    2       /* for ioreq_t and store_mfn */
+#define STATIC_PAGES    3
+    /* 3 static pages:
+     * - ioreq buffer.
+     * - xenstore.
+     * - shared_page.
+     */
+
     /* Most of the ram goes here */
     e820entry[nr_map].addr = 0x100000;
     e820entry[nr_map].size = mem_size - 0x100000 - STATIC_PAGES * PAGE_SIZE;
@@ -104,6 +111,12 @@ static void build_e820map(void *e820_pag
     nr_map++;
 
     /* Statically allocated special pages */
+
+    /* For buffered IO requests */
+    e820entry[nr_map].addr = mem_size - 3 * PAGE_SIZE;
+    e820entry[nr_map].size = PAGE_SIZE;
+    e820entry[nr_map].type = E820_BUFFERED_IO;
+    nr_map++;
 
     /* For xenstore */
     e820entry[nr_map].addr = mem_size - 2 * PAGE_SIZE;
@@ -213,6 +226,9 @@ static int setup_guest(int xc_handle,
     unsigned long shared_page_frame = 0;
     shared_iopage_t *sp;
 
+    unsigned long ioreq_buffer_frame = 0;
+    void *ioreq_buffer_page;
+
     memset(&dsi, 0, sizeof(struct domain_setup_info));
 
     if ( (parseelfimage(image, image_size, &dsi)) != 0 )
@@ -302,6 +318,19 @@ static int setup_guest(int xc_handle,
         goto error_out;
     memset(sp, 0, PAGE_SIZE);
     munmap(sp, PAGE_SIZE);
+
+    /* clean the buffered IO requests page */
+    ioreq_buffer_frame = page_array[(v_end >> PAGE_SHIFT) - 3];
+    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+                                             PROT_READ | PROT_WRITE,
+                                             ioreq_buffer_frame);
+
+    if ( ioreq_buffer_page == NULL )
+        goto error_out;
+
+    memset(ioreq_buffer_page, 0, PAGE_SIZE);
+
+    munmap(ioreq_buffer_page, PAGE_SIZE);
 
     xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, (v_end >> 
PAGE_SHIFT) - 2);
     xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
diff -r c1850c659e40 -r b16252dbcb1f xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Aug 09 11:29:06 2006 +0100
@@ -134,15 +134,28 @@ static void e820_map_io_shared_callback(
     }
 }
 
-void hvm_map_io_shared_page(struct vcpu *v)
-{
-    unsigned long mfn = INVALID_MFN;
+static void e820_map_buffered_io_callback(struct domain *d,
+                                          struct e820entry *e,
+                                          void *data)
+{
+    unsigned long *mfn = data;
+    if ( e->type == E820_BUFFERED_IO ) {
+        ASSERT(*mfn == INVALID_MFN);
+        *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
+    }
+}
+
+void hvm_map_io_shared_pages(struct vcpu *v)
+{
+    unsigned long mfn;
     void *p;
     struct domain *d = v->domain;
 
-    if ( d->arch.hvm_domain.shared_page_va )
+    if ( d->arch.hvm_domain.shared_page_va ||
+         d->arch.hvm_domain.buffered_io_va )
         return;
 
+    mfn = INVALID_MFN;
     e820_foreach(d, e820_map_io_shared_callback, &mfn);
 
     if ( mfn == INVALID_MFN )
@@ -159,6 +172,14 @@ void hvm_map_io_shared_page(struct vcpu 
     }
 
     d->arch.hvm_domain.shared_page_va = (unsigned long)p;
+
+    mfn = INVALID_MFN;
+    e820_foreach(d, e820_map_buffered_io_callback, &mfn);
+    if ( mfn != INVALID_MFN ) {
+        p = map_domain_page_global(mfn);
+        if ( p )
+            d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
+    }
 }
 
 void hvm_create_event_channels(struct vcpu *v)
@@ -209,6 +230,8 @@ void hvm_setup_platform(struct domain* d
         spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
         hvm_vioapic_init(d);
     }
+
+    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
 
     init_timer(&platform->pl_time.periodic_tm.timer,
                pt_timer_fn, v, v->processor);
diff -r c1850c659e40 -r b16252dbcb1f xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/arch/x86/hvm/intercept.c      Wed Aug 09 11:29:06 2006 +0100
@@ -36,10 +36,24 @@ extern struct hvm_mmio_handler vioapic_m
 
 #define HVM_MMIO_HANDLER_NR 2
 
-struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
+static struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
 {
     &vlapic_mmio_handler,
     &vioapic_mmio_handler
+};
+
+struct hvm_buffered_io_range {
+    unsigned long start_addr;
+    unsigned long length;
+};
+
+#define HVM_BUFFERED_IO_RANGE_NR 1
+
+static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
+static struct hvm_buffered_io_range
+*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
+{
+    &buffered_stdvga_range
 };
 
 static inline void hvm_mmio_access(struct vcpu *v,
@@ -140,6 +154,56 @@ static inline void hvm_mmio_access(struc
     }
 }
 
+int hvm_buffered_io_intercept(ioreq_t *p)
+{
+    struct vcpu *v = current;
+    spinlock_t  *buffered_io_lock;
+    buffered_iopage_t *buffered_iopage =
+        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
+    unsigned long tmp_write_pointer = 0;
+    int i;
+
+    /* ignore READ ioreq_t! */
+    if ( p->dir == IOREQ_READ )
+        return 0;
+
+    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
+        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
+             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
+                                     hvm_buffered_io_ranges[i]->length )
+            break;
+    }
+
+    if ( i == HVM_BUFFERED_IO_RANGE_NR )
+        return 0;
+
+    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
+    spin_lock(buffered_io_lock);
+
+    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
+         (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
+        /* the queue is full.
+         * send the iopacket through the normal path.
+         * NOTE: The arithimetic operation could handle the situation for
+         * write_pointer overflow.
+         */
+        spin_unlock(buffered_io_lock);
+        return 0;
+    }
+
+    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
+
+    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
+
+    /*make the ioreq_t visible before write_pointer*/
+    wmb();
+    buffered_iopage->write_pointer++;
+
+    spin_unlock(buffered_io_lock);
+
+    return 1;
+}
+
 int hvm_mmio_intercept(ioreq_t *p)
 {
     struct vcpu *v = current;
diff -r c1850c659e40 -r b16252dbcb1f xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/arch/x86/hvm/platform.c       Wed Aug 09 11:29:06 2006 +0100
@@ -779,7 +779,7 @@ void send_mmio_req(
     } else
         p->u.data = value;
 
-    if (hvm_mmio_intercept(p)){
+    if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) {
         p->state = STATE_IORESP_READY;
         hvm_io_assist(v);
         return;
diff -r c1850c659e40 -r b16252dbcb1f xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Aug 09 11:29:06 2006 +0100
@@ -809,6 +809,9 @@ static void svm_relinquish_guest_resourc
     if ( d->arch.hvm_domain.shared_page_va )
         unmap_domain_page_global(
             (void *)d->arch.hvm_domain.shared_page_va);
+
+    if ( d->arch.hvm_domain.buffered_io_va )
+        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
 
     shadow_direct_map_clean(d);
 }
diff -r c1850c659e40 -r b16252dbcb1f xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Aug 09 11:29:06 2006 +0100
@@ -150,6 +150,9 @@ static void vmx_relinquish_guest_resourc
     if ( d->arch.hvm_domain.shared_page_va )
         unmap_domain_page_global(
                (void *)d->arch.hvm_domain.shared_page_va);
+
+    if ( d->arch.hvm_domain.buffered_io_va )
+        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
 
     shadow_direct_map_clean(d);
 }
diff -r c1850c659e40 -r b16252dbcb1f xen/include/asm-x86/e820.h
--- a/xen/include/asm-x86/e820.h        Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/include/asm-x86/e820.h        Wed Aug 09 11:29:06 2006 +0100
@@ -12,6 +12,7 @@
 #define E820_IO          16
 #define E820_SHARED_PAGE 17
 #define E820_XENSTORE    18
+#define E820_BUFFERED_IO 19
 
 #define E820_MAP_PAGE        0x00090000
 #define E820_MAP_NR_OFFSET   0x000001E8
diff -r c1850c659e40 -r b16252dbcb1f xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/include/asm-x86/hvm/domain.h  Wed Aug 09 11:29:06 2006 +0100
@@ -33,6 +33,8 @@
 
 struct hvm_domain {
     unsigned long          shared_page_va;
+    unsigned long          buffered_io_va;
+    spinlock_t             buffered_io_lock;
     s64                    tsc_frequency;
     struct pl_time         pl_time;
 
diff -r c1850c659e40 -r b16252dbcb1f xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Aug 09 11:29:06 2006 +0100
@@ -78,7 +78,7 @@ hvm_disable(void)
 }
 
 void hvm_create_event_channels(struct vcpu *v);
-void hvm_map_io_shared_page(struct vcpu *v);
+void hvm_map_io_shared_pages(struct vcpu *v);
 
 static inline int
 hvm_initialize_guest_resources(struct vcpu *v)
@@ -87,7 +87,7 @@ hvm_initialize_guest_resources(struct vc
     if ( hvm_funcs.initialize_guest_resources )
         ret = hvm_funcs.initialize_guest_resources(v);
     if ( ret == 1 ) {
-        hvm_map_io_shared_page(v);
+        hvm_map_io_shared_pages(v);
         hvm_create_event_channels(v);
     }
     return ret;
diff -r c1850c659e40 -r b16252dbcb1f xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/include/asm-x86/hvm/support.h Wed Aug 09 11:29:06 2006 +0100
@@ -139,6 +139,7 @@ extern void hvm_setup_platform(struct do
 extern void hvm_setup_platform(struct domain* d);
 extern int hvm_mmio_intercept(ioreq_t *p);
 extern int hvm_io_intercept(ioreq_t *p, int type);
+extern int hvm_buffered_io_intercept(ioreq_t *p);
 extern void hvm_hooks_assist(struct vcpu *v);
 extern void hvm_print_line(struct vcpu *v, const char c);
 extern void hlt_timer_fn(void *data);
diff -r c1850c659e40 -r b16252dbcb1f xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h    Wed Aug 09 11:27:28 2006 +0100
+++ b/xen/include/public/hvm/ioreq.h    Wed Aug 09 11:29:06 2006 +0100
@@ -78,6 +78,14 @@ struct shared_iopage {
 };
 typedef struct shared_iopage shared_iopage_t;
 
+#define IOREQ_BUFFER_SLOT_NUM     80
+struct buffered_iopage {
+    unsigned long   read_pointer;
+    unsigned long   write_pointer;
+    ioreq_t         ioreq[IOREQ_BUFFER_SLOT_NUM];
+};            /* sizeof this structure must be in one page */
+typedef struct buffered_iopage buffered_iopage_t;
+
 #endif /* _IOREQ_H_ */
 
 /*
diff -r c1850c659e40 -r b16252dbcb1f 
tools/ioemu/patches/xen-support-buffered-ioreqs
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/ioemu/patches/xen-support-buffered-ioreqs   Wed Aug 09 11:29:06 
2006 +0100
@@ -0,0 +1,150 @@
+Index: ioemu/vl.c
+===================================================================
+--- ioemu.orig/vl.c    2006-08-08 14:33:30.000000000 +0100
++++ ioemu/vl.c 2006-08-08 14:43:34.000000000 +0100
+@@ -5834,6 +5834,7 @@
+     unsigned long nr_pages;
+     xen_pfn_t *page_array;
+     extern void *shared_page;
++    extern void *buffered_io_page;
+ 
+     char qemu_dm_logfilename[64];
+ 
+@@ -6378,12 +6379,17 @@
+ 
+     phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
+                                          PROT_READ|PROT_WRITE, page_array,
+-                                         nr_pages - 1);
++                                         nr_pages - 3);
+     if (phys_ram_base == 0) {
+         fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
+         exit(-1);
+     }
+ 
++    /* not yet add for IA64 */
++    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
++                                       PROT_READ|PROT_WRITE,
++                                       page_array[nr_pages - 3]);
++
+     shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+                                        PROT_READ|PROT_WRITE,
+                                        page_array[nr_pages - 1]);
+Index: ioemu/target-i386-dm/helper2.c
+===================================================================
+--- ioemu.orig/target-i386-dm/helper2.c        2006-08-08 14:33:30.000000000 
+0100
++++ ioemu/target-i386-dm/helper2.c     2006-08-09 10:03:40.558744653 +0100
+@@ -76,6 +76,10 @@
+ 
+ shared_iopage_t *shared_page = NULL;
+ 
++#define BUFFER_IO_MAX_DELAY  100
++buffered_iopage_t *buffered_io_page = NULL;
++QEMUTimer *buffered_io_timer;
++
+ /* the evtchn fd for polling */
+ int xce_handle = -1;
+ 
+@@ -419,36 +423,68 @@
+     req->u.data = tmp1;
+ }
+ 
++void __handle_ioreq(CPUState *env, ioreq_t *req)
++{
++    if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4)
++      req->u.data &= (1UL << (8 * req->size)) - 1;
++
++    switch (req->type) {
++    case IOREQ_TYPE_PIO:
++        cpu_ioreq_pio(env, req);
++        break;
++    case IOREQ_TYPE_COPY:
++        cpu_ioreq_move(env, req);
++        break;
++    case IOREQ_TYPE_AND:
++        cpu_ioreq_and(env, req);
++        break;
++    case IOREQ_TYPE_OR:
++        cpu_ioreq_or(env, req);
++        break;
++    case IOREQ_TYPE_XOR:
++        cpu_ioreq_xor(env, req);
++        break;
++    default:
++        hw_error("Invalid ioreq type 0x%x\n", req->type);
++    }
++}
++
++void __handle_buffered_iopage(CPUState *env)
++{
++    ioreq_t *req = NULL;
++
++    if (!buffered_io_page)
++        return;
++
++    while (buffered_io_page->read_pointer !=
++           buffered_io_page->write_pointer) {
++        req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
++                                     IOREQ_BUFFER_SLOT_NUM];
++
++        __handle_ioreq(env, req);
++
++        mb();
++        buffered_io_page->read_pointer++;
++    }
++}
++
++void handle_buffered_io(void *opaque)
++{
++    CPUState *env = opaque;
++
++    __handle_buffered_iopage(env);
++    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
++                 qemu_get_clock(rt_clock));
++}
++
+ void cpu_handle_ioreq(void *opaque)
+ {
+     CPUState *env = opaque;
+     ioreq_t *req = cpu_get_ioreq();
+ 
++    handle_buffered_io(env);
+     if (req) {
+-        if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
+-            if (req->size != 4)
+-                req->u.data &= (1UL << (8 * req->size))-1;
+-        }
+-
+-        switch (req->type) {
+-        case IOREQ_TYPE_PIO:
+-            cpu_ioreq_pio(env, req);
+-            break;
+-        case IOREQ_TYPE_COPY:
+-            cpu_ioreq_move(env, req);
+-            break;
+-        case IOREQ_TYPE_AND:
+-            cpu_ioreq_and(env, req);
+-            break;
+-        case IOREQ_TYPE_OR:
+-            cpu_ioreq_or(env, req);
+-            break;
+-        case IOREQ_TYPE_XOR:
+-            cpu_ioreq_xor(env, req);
+-            break;
+-        default:
+-            hw_error("Invalid ioreq type 0x%x\n", req->type);
+-        }
++        __handle_ioreq(env, req);
+ 
+         /* No state change if state = STATE_IORESP_HOOK */
+         if (req->state == STATE_IOREQ_INPROCESS) {
+@@ -466,6 +502,10 @@
+     CPUState *env = cpu_single_env;
+     int evtchn_fd = xc_evtchn_fd(xce_handle);
+ 
++    buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
++                                     cpu_single_env);
++    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
++
+     qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
+ 
+     env->send_event = 0;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.