[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] Fix stdvga performance for 32bit ops



Corrected a bug in the stdvga code where it did not
properly handle 32 bit operations.
The buf_ioreq_t can now store 32 bits of data.
Because this increases its size to 8 bytes,
only 510 elements fit in the buffered_iopage
(down from 672 elements).

Signed-off-by: Robert Phillips <rphillips@xxxxxxxxxxxxxxx>
Signed-off-by: Ben Guthro <bguthro@xxxxxxxxxxxxxxx>
diff -r 15c6e8698fda tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Wed Oct 31 15:07:54 2007 -0400
+++ b/tools/ioemu/target-i386-dm/helper2.c      Wed Oct 31 15:08:02 2007 -0400
@@ -554,20 +554,17 @@ void __handle_buffered_iopage(CPUState *
                                       IOREQ_BUFFER_SLOT_NUM];
         req.size = 1UL << buf_req->size;
         req.count = 1;
+        req.addr = buf_req->addr;
         req.data = buf_req->data;
         req.state = STATE_IOREQ_READY;
         req.dir  = buf_req->dir;
         req.type = buf_req->type;
         qw = req.size == 8;
         if (qw) {
-            req.data |= ((uint64_t)buf_req->addr) << 16;
             buf_req = 
&buffered_io_page->buf_ioreq[(buffered_io_page->read_pointer+1) %
                                                IOREQ_BUFFER_SLOT_NUM];
             req.data |= ((uint64_t)buf_req->data) << 32;
-            req.data |= ((uint64_t)buf_req->addr) << 48;
-        }
-        else
-            req.addr = buf_req->addr;
+        }
 
         __handle_ioreq(env, &req);
 
diff -r 15c6e8698fda xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Wed Oct 31 15:07:54 2007 -0400
+++ b/xen/arch/x86/hvm/intercept.c      Wed Oct 31 15:08:02 2007 -0400
@@ -163,8 +163,11 @@ int hvm_buffered_io_send(ioreq_t *p)
     BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
 
     /* Return 0 for the cases we can't deal with. */
-    if (p->addr > 0xffffful || p->data_is_ptr || p->df || p->count != 1)
-        return 0;
+    if (p->addr > 0xffffful || p->data_is_ptr || p->df || p->count != 1) {
+        gdprintk(XENLOG_DEBUG, "slow ioreq.  type:%d size:%ld addr:0x%08lx 
dir:%d ptr:%d df:%d count:%ld\n",
+                 p->type, p->size, p->addr, !!p->dir, !!p->data_is_ptr, 
!!p->df, p->count);
+        return 0;
+    }
 
     bp.type = p->type;
     bp.dir  = p->dir;
@@ -181,7 +184,6 @@ int hvm_buffered_io_send(ioreq_t *p)
     case 8:
         bp.size = 3;
         qw = 1;
-        gdprintk(XENLOG_INFO, "quadword ioreq type:%d data:%ld\n", p->type, 
p->data);
         break;
     default:
         gdprintk(XENLOG_WARNING, "unexpected ioreq size:%ld\n", p->size);
@@ -189,7 +191,7 @@ int hvm_buffered_io_send(ioreq_t *p)
     }
     
     bp.data = p->data;
-    bp.addr = qw ? ((p->data >> 16) & 0xfffful) : (p->addr & 0xffffful);
+    bp.addr = p->addr;
     
     spin_lock(&iorp->lock);
 
@@ -205,7 +207,6 @@ int hvm_buffered_io_send(ioreq_t *p)
     
     if (qw) {
         bp.data = p->data >> 32;
-        bp.addr = (p->data >> 48) & 0xfffful;
         memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM],
                &bp, sizeof(bp));
     }
diff -r 15c6e8698fda xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Wed Oct 31 15:07:54 2007 -0400
+++ b/xen/arch/x86/hvm/stdvga.c Wed Oct 31 15:08:02 2007 -0400
@@ -273,9 +273,12 @@ int stdvga_intercept_pio(ioreq_t *p)
     }
 
     spin_lock(&s->lock);
+
     if ( p->dir == IOREQ_READ ) {
         if (p->size != 1)
             gdprintk(XENLOG_WARNING, "unexpected io size:%d\n", (int)p->size);
+        if (p->data_is_ptr)
+            gdprintk(XENLOG_WARNING, "unexpected data_is_ptr\n");
         if (!(p->addr == 0x3c5 && s->sr_index >= sizeof(sr_mask)) &&
             !(p->addr == 0x3cf && s->gr_index >= sizeof(gr_mask)))
         {
@@ -591,6 +594,9 @@ int stdvga_intercept_mmio(ioreq_t *p)
             s->cache = 0;
         }
     }
+    else
+        buf = p->dir == IOREQ_WRITE;
+    
     if (buf && hvm_buffered_io_send(p)) {
         UPDATE_STATS(p->dir == IOREQ_READ ? s->stats.nr_mmio_buffered_rd++ : 
s->stats.nr_mmio_buffered_wr++);
         spin_unlock(&s->lock);
diff -r 15c6e8698fda xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h    Wed Oct 31 15:07:54 2007 -0400
+++ b/xen/include/public/hvm/ioreq.h    Wed Oct 31 15:07:57 2007 -0400
@@ -82,13 +82,13 @@ struct buf_ioreq {
 struct buf_ioreq {
     uint8_t  type;   /*  I/O type                    */
     uint8_t  dir:1;  /*  1=read, 0=write             */
-    uint8_t  size:2; /*  0=>1, 1=>2, 3=>8. If 8 then use two contig buf_ioreqs 
*/
-    uint32_t addr:20; /*  physical address or high-order data */
-    uint16_t data;   /*  (low order) data            */
+    uint8_t  size:2; /*  0=>1, 1=>2, 2=>4, 3=>8. If 8 then use two contig 
buf_ioreqs */
+    uint32_t addr:20;/*  physical address            */
+    uint32_t data;   /*  data                        */
 };
 typedef struct buf_ioreq buf_ioreq_t;
 
-#define IOREQ_BUFFER_SLOT_NUM     672
+#define IOREQ_BUFFER_SLOT_NUM     510
 struct buffered_iopage {
     volatile unsigned int read_pointer;
     volatile unsigned int write_pointer;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.