[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [HVM] Fix PIC IO intercept: addresses are physical, not virtual.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID edcd3a25a2bdae510d91edc7f1bfdb54f429c762
# Parent  7da100019e00018346877608d9110f1bde8817d5
[HVM] Fix PIC IO intercept: addresses are physical, not virtual.

Signed-off-by: Xiaohui Xin <xiaohui.xin@xxxxxxxxx>
Signed-off-by: Xiaowei Yang <xiaowei.yang@xxxxxxxxx>
Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
---
 xen/arch/x86/hvm/i8259.c    |   45 +++++++++++++++++++++-----------------------
 xen/arch/x86/hvm/platform.c |   20 ++++++++++---------
 2 files changed, 33 insertions(+), 32 deletions(-)

diff -r 7da100019e00 -r edcd3a25a2bd xen/arch/x86/hvm/i8259.c
--- a/xen/arch/x86/hvm/i8259.c  Fri Oct 13 18:04:33 2006 +0100
+++ b/xen/arch/x86/hvm/i8259.c  Sun Oct 15 09:15:51 2006 +0100
@@ -498,19 +498,19 @@ void pic_init(struct hvm_virpic *s, void
 
 static int intercept_pic_io(ioreq_t *p)
 {
-    struct hvm_virpic  *pic;
-    struct vcpu *v = current;
+    struct hvm_virpic *pic;
     uint32_t data;
     unsigned long flags;
-    
-    if ( p->size != 1 || p->count != 1) {
+
+    if ( p->size != 1 || p->count != 1 ) {
         printk("PIC_IO wrong access size %d!\n", (int)p->size);
         return 1;
     }
-    pic = &v->domain->arch.hvm_domain.vpic;
-    if ( p->dir == 0 ) {
-        if (p->pdata_valid) 
-            (void)hvm_copy_from_guest_virt(
+
+    pic = &current->domain->arch.hvm_domain.vpic;
+    if ( p->dir == IOREQ_WRITE ) {
+        if ( p->pdata_valid )
+            (void)hvm_copy_from_guest_phys(
                 &data, (unsigned long)p->u.pdata, p->size);
         else
             data = p->u.data;
@@ -524,10 +524,10 @@ static int intercept_pic_io(ioreq_t *p)
         data = pic_ioport_read(
             (void*)&pic->pics[p->addr>>7], (uint32_t) p->addr);
         spin_unlock_irqrestore(&pic->lock, flags);
-        if (p->pdata_valid) 
-            (void)hvm_copy_to_guest_virt(
+        if ( p->pdata_valid )
+            (void)hvm_copy_to_guest_phys(
                 (unsigned long)p->u.pdata, &data, p->size);
-        else 
+        else
             p->u.data = (u64)data;
     }
     return 1;
@@ -535,42 +535,41 @@ static int intercept_pic_io(ioreq_t *p)
 
 static int intercept_elcr_io(ioreq_t *p)
 {
-    struct hvm_virpic  *s;
-    struct vcpu *v = current;
+    struct hvm_virpic *s;
     uint32_t data;
     unsigned long flags;
-    
+
     if ( p->size != 1 || p->count != 1 ) {
         printk("PIC_IO wrong access size %d!\n", (int)p->size);
         return 1;
     }
 
-    s = &v->domain->arch.hvm_domain.vpic;
-    if ( p->dir == 0 ) {
-        if (p->pdata_valid) 
-            (void)hvm_copy_from_guest_virt(
+    s = &current->domain->arch.hvm_domain.vpic;
+    if ( p->dir == IOREQ_WRITE ) {
+        if ( p->pdata_valid )
+            (void)hvm_copy_from_guest_phys(
                 &data, (unsigned long)p->u.pdata, p->size);
         else
             data = p->u.data;
         spin_lock_irqsave(&s->lock, flags);
         elcr_ioport_write((void*)&s->pics[p->addr&1],
                 (uint32_t) p->addr, (uint32_t)( data & 0xff));
-        get_sp(current->domain)->sp_global.pic_elcr = 
+        get_sp(current->domain)->sp_global.pic_elcr =
             s->pics[0].elcr | ((u16)s->pics[1].elcr << 8);
         spin_unlock_irqrestore(&s->lock, flags);
     }
     else {
         data = (u64) elcr_ioport_read(
                 (void*)&s->pics[p->addr&1], (uint32_t) p->addr);
-        if (p->pdata_valid) 
-            (void)hvm_copy_to_guest_virt(
+        if ( p->pdata_valid )
+            (void)hvm_copy_to_guest_phys(
                 (unsigned long)p->u.pdata, &data, p->size);
-        else 
+        else
             p->u.data = (u64)data;
-
     }
     return 1;
 }
+
 void register_pic_io_hook (void)
 {
     register_portio_handler(0x20, 2, intercept_pic_io);
diff -r 7da100019e00 -r edcd3a25a2bd xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Fri Oct 13 18:04:33 2006 +0100
+++ b/xen/arch/x86/hvm/platform.c       Sun Oct 15 09:15:51 2006 +0100
@@ -730,13 +730,13 @@ void send_pio_req(struct cpu_user_regs *
     vcpu_iodata_t *vio;
     ioreq_t *p;
 
-    if (size == 0 || count == 0) {
+    if ( size == 0 || count == 0 ) {
         printf("null pio request? port %lx, count %lx, size %d, value %lx, dir 
%d, pvalid %d.\n",
                port, count, size, value, dir, pvalid);
     }
 
     vio = get_vio(v->domain, v->vcpu_id);
-    if (vio == NULL) {
+    if ( vio == NULL ) {
         printk("bad shared page: %lx\n", (unsigned long) vio);
         domain_crash_synchronous();
     }
@@ -745,6 +745,7 @@ void send_pio_req(struct cpu_user_regs *
     if ( p->state != STATE_INVALID )
         printk("WARNING: send pio with something already pending (%d)?\n",
                p->state);
+
     p->dir = dir;
     p->pdata_valid = pvalid;
 
@@ -752,19 +753,20 @@ void send_pio_req(struct cpu_user_regs *
     p->size = size;
     p->addr = port;
     p->count = count;
-    p->df = regs->eflags & EF_DF ? 1 : 0;
+    p->df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
 
     p->io_count++;
 
-    if (pvalid) {
-        if (hvm_paging_enabled(current))
-            p->u.data = shadow_gva_to_gpa(current, value);
+    if ( pvalid )   /* get physical address of data */
+    {
+        if ( hvm_paging_enabled(current) )
+            p->u.pdata = (void *)shadow_gva_to_gpa(current, value);
         else
-            p->u.pdata = (void *) value; /* guest VA == guest PA */
-    } else
+            p->u.pdata = (void *)value; /* guest VA == guest PA */
+    } else if ( dir == IOREQ_WRITE )
         p->u.data = value;
 
-    if (hvm_portio_intercept(p)) {
+    if ( hvm_portio_intercept(p) ) {
         p->state = STATE_IORESP_READY;
         hvm_io_assist(v);
         return;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.