[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [qemu patches] Update patches for changeset 10993:dc7b56b8cfb5.



# HG changeset patch
# User chris@xxxxxxxxxxxxxxxxxxxxxxxx
# Node ID 01043d5438786a7e023ca9236999d0f5bf919e9d
# Parent  dc7b56b8cfb589999d4ac339da80dac4591580bc
[qemu patches] Update patches for changeset 10993:dc7b56b8cfb5.

Signed-off-by: Christian Limpach <Christian.Limpach@xxxxxxxxxxxxx>
---
 tools/ioemu/patches/series                      |    1 
 tools/ioemu/patches/xen-support-buffered-ioreqs |   77 ++++++++++++++++++++++++
 2 files changed, 78 insertions(+)

diff -r dc7b56b8cfb5 -r 01043d543878 tools/ioemu/patches/series
--- a/tools/ioemu/patches/series        Wed Aug 09 11:25:33 2006 +0100
+++ b/tools/ioemu/patches/series        Wed Aug 09 11:25:55 2006 +0100
@@ -39,3 +39,4 @@ qemu-allow-disable-sdl
 qemu-allow-disable-sdl
 qemu-fix-memset-args
 qemu-fix-write-to-disk-synchronous
+xen-support-buffered-ioreqs
diff -r dc7b56b8cfb5 -r 01043d543878 
tools/ioemu/patches/xen-support-buffered-ioreqs
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/ioemu/patches/xen-support-buffered-ioreqs   Wed Aug 09 11:25:55 
2006 +0100
@@ -0,0 +1,150 @@
+Index: ioemu/vl.c
+===================================================================
+--- ioemu.orig/vl.c    2006-08-08 14:33:30.000000000 +0100
++++ ioemu/vl.c 2006-08-08 14:43:34.000000000 +0100
+@@ -5834,6 +5834,7 @@
+     unsigned long nr_pages;
+     xen_pfn_t *page_array;
+     extern void *shared_page;
++    extern void *buffered_io_page;
+ 
+     char qemu_dm_logfilename[64];
+ 
+@@ -6378,12 +6379,17 @@
+ 
+     phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
+                                          PROT_READ|PROT_WRITE, page_array,
+-                                         nr_pages - 1);
++                                         nr_pages - 3);
+     if (phys_ram_base == 0) {
+         fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
+         exit(-1);
+     }
+ 
++    /* not yet add for IA64 */
++    buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
++                                       PROT_READ|PROT_WRITE,
++                                       page_array[nr_pages - 3]);
++
+     shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+                                        PROT_READ|PROT_WRITE,
+                                        page_array[nr_pages - 1]);
+Index: ioemu/target-i386-dm/helper2.c
+===================================================================
+--- ioemu.orig/target-i386-dm/helper2.c        2006-08-08 14:33:30.000000000 
+0100
++++ ioemu/target-i386-dm/helper2.c     2006-08-09 10:03:40.558744653 +0100
+@@ -76,6 +76,10 @@
+ 
+ shared_iopage_t *shared_page = NULL;
+ 
++#define BUFFER_IO_MAX_DELAY  100
++buffered_iopage_t *buffered_io_page = NULL;
++QEMUTimer *buffered_io_timer;
++
+ /* the evtchn fd for polling */
+ int xce_handle = -1;
+ 
+@@ -419,36 +423,68 @@
+     req->u.data = tmp1;
+ }
+ 
++void __handle_ioreq(CPUState *env, ioreq_t *req)
++{
++    if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4)
++      req->u.data &= (1UL << (8 * req->size)) - 1;
++
++    switch (req->type) {
++    case IOREQ_TYPE_PIO:
++        cpu_ioreq_pio(env, req);
++        break;
++    case IOREQ_TYPE_COPY:
++        cpu_ioreq_move(env, req);
++        break;
++    case IOREQ_TYPE_AND:
++        cpu_ioreq_and(env, req);
++        break;
++    case IOREQ_TYPE_OR:
++        cpu_ioreq_or(env, req);
++        break;
++    case IOREQ_TYPE_XOR:
++        cpu_ioreq_xor(env, req);
++        break;
++    default:
++        hw_error("Invalid ioreq type 0x%x\n", req->type);
++    }
++}
++
++void __handle_buffered_iopage(CPUState *env)
++{
++    ioreq_t *req = NULL;
++
++    if (!buffered_io_page)
++        return;
++
++    while (buffered_io_page->read_pointer !=
++           buffered_io_page->write_pointer) {
++        req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
++                                     IOREQ_BUFFER_SLOT_NUM];
++
++        __handle_ioreq(env, req);
++
++        mb();
++        buffered_io_page->read_pointer++;
++    }
++}
++
++void handle_buffered_io(void *opaque)
++{
++    CPUState *env = opaque;
++
++    __handle_buffered_iopage(env);
++    qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
++                 qemu_get_clock(rt_clock));
++}
++
+ void cpu_handle_ioreq(void *opaque)
+ {
+     CPUState *env = opaque;
+     ioreq_t *req = cpu_get_ioreq();
+ 
++    handle_buffered_io(env);
+     if (req) {
+-        if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
+-            if (req->size != 4)
+-                req->u.data &= (1UL << (8 * req->size))-1;
+-        }
+-
+-        switch (req->type) {
+-        case IOREQ_TYPE_PIO:
+-            cpu_ioreq_pio(env, req);
+-            break;
+-        case IOREQ_TYPE_COPY:
+-            cpu_ioreq_move(env, req);
+-            break;
+-        case IOREQ_TYPE_AND:
+-            cpu_ioreq_and(env, req);
+-            break;
+-        case IOREQ_TYPE_OR:
+-            cpu_ioreq_or(env, req);
+-            break;
+-        case IOREQ_TYPE_XOR:
+-            cpu_ioreq_xor(env, req);
+-            break;
+-        default:
+-            hw_error("Invalid ioreq type 0x%x\n", req->type);
+-        }
++        __handle_ioreq(env, req);
+ 
+         /* No state change if state = STATE_IORESP_HOOK */
+         if (req->state == STATE_IOREQ_INPROCESS) {
+@@ -466,6 +502,10 @@
+     CPUState *env = cpu_single_env;
+     int evtchn_fd = xc_evtchn_fd(xce_handle);
+ 
++    buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
++                                     cpu_single_env);
++    qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
++
+     qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
+ 
+     env->send_event = 0;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.