[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] vti save-restore: hvm domain io page clean up.



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1194456669 25200
# Node ID 91575bb23d073515a50e7cad04b5367316c08f73
# Parent  74b40a9f4c0a27217f093f4172ef68d783644fbb
[IA64] vti save-restore: hvm domain io page clean up.

- set_hvm_param hypercall clean up.
- The reference counts of the io pages must be incremented.
- Buffered pio wasn't SMP safe.
- Clean up get_vio() parameter.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/mmio.c            |   27 ++++---
 xen/arch/ia64/vmx/vmx_hypercall.c   |   30 +++++++-
 xen/arch/ia64/vmx/vmx_init.c        |  130 ++++++++++++++++++++++++++++--------
 xen/arch/ia64/vmx/vmx_support.c     |    4 -
 xen/include/asm-ia64/vmx.h          |    8 +-
 xen/include/asm-ia64/vmx_platform.h |   25 ++++--
 6 files changed, 174 insertions(+), 50 deletions(-)

diff -r 74b40a9f4c0a -r 91575bb23d07 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Wed Nov 07 10:19:21 2007 -0700
+++ b/xen/arch/ia64/vmx/mmio.c  Wed Nov 07 10:31:09 2007 -0700
@@ -56,7 +56,7 @@ static int hvm_buffered_io_intercept(ior
 {
     struct vcpu *v = current;
     buffered_iopage_t *pg =
-        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
+        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buf_ioreq.va);
     buf_ioreq_t bp;
     int i, qw = 0;
 
@@ -101,7 +101,7 @@ static int hvm_buffered_io_intercept(ior
     bp.data = p->data;
     bp.addr = p->addr;
 
-    spin_lock(&v->domain->arch.hvm_domain.buffered_io_lock);
+    spin_lock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
 
     if (pg->write_pointer - pg->read_pointer >= IOREQ_BUFFER_SLOT_NUM - qw) {
         /* the queue is full.
@@ -109,7 +109,7 @@ static int hvm_buffered_io_intercept(ior
          * NOTE: The arithimetic operation could handle the situation for
          * write_pointer overflow.
          */
-        spin_unlock(&v->domain->arch.hvm_domain.buffered_io_lock);
+        spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
         return 0;
     }
 
@@ -126,7 +126,7 @@ static int hvm_buffered_io_intercept(ior
     wmb();
     pg->write_pointer += qw ? 2 : 1;
 
-    spin_unlock(&v->domain->arch.hvm_domain.buffered_io_lock);
+    spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
 
     return 1;
 }
@@ -137,7 +137,7 @@ static void low_mmio_access(VCPU *vcpu, 
     vcpu_iodata_t *vio;
     ioreq_t *p;
 
-    vio = get_vio(v->domain, v->vcpu_id);
+    vio = get_vio(v);
     if (!vio)
         panic_domain(NULL, "bad shared page");
 
@@ -174,7 +174,8 @@ static int vmx_ide_pio_intercept(ioreq_t
 static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
 {
     struct buffered_piopage *pio_page =
-        (void *)(current->domain->arch.hvm_domain.buffered_pio_va);
+        (void *)(current->domain->arch.hvm_domain.buf_pioreq.va);
+    spinlock_t *pio_lock;
     struct pio_buffer *piobuf;
     uint32_t pointer, page_offset;
 
@@ -188,14 +189,17 @@ static int vmx_ide_pio_intercept(ioreq_t
     if (p->size != 2 && p->size != 4)
         return 0;
 
+    pio_lock = &current->domain->arch.hvm_domain.buf_pioreq.lock;
+    spin_lock(pio_lock);
+
     pointer = piobuf->pointer;
     page_offset = piobuf->page_offset;
 
     /* sanity check */
     if (page_offset + pointer < offsetof(struct buffered_piopage, buffer))
-        return 0;
+        goto unlock_out;
     if (page_offset + piobuf->data_end > PAGE_SIZE)
-        return 0;
+        goto unlock_out;
 
     if (pointer + p->size < piobuf->data_end) {
         uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer;
@@ -213,10 +217,15 @@ static int vmx_ide_pio_intercept(ioreq_t
             }
         }
         piobuf->pointer += p->size;
+        spin_unlock(pio_lock);
+
         p->state = STATE_IORESP_READY;
         vmx_io_assist(current);
         return 1;
     }
+
+ unlock_out:
+    spin_unlock(pio_lock);
     return 0;
 }
 
@@ -258,7 +267,7 @@ static void legacy_io_access(VCPU *vcpu,
     vcpu_iodata_t *vio;
     ioreq_t *p;
 
-    vio = get_vio(v->domain, v->vcpu_id);
+    vio = get_vio(v);
     if (!vio)
         panic_domain(NULL, "bad shared page\n");
 
diff -r 74b40a9f4c0a -r 91575bb23d07 xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Nov 07 10:19:21 2007 -0700
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Nov 07 10:31:09 2007 -0700
@@ -133,8 +133,34 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
             return -EPERM;
 
         if (op == HVMOP_set_param) {
-            d->arch.hvm_domain.params[a.index] = a.value;
-            rc = 0;
+            struct vmx_ioreq_page *iorp;
+            struct vcpu *v;
+
+            switch (a.index) {
+            case HVM_PARAM_IOREQ_PFN:
+                iorp = &d->arch.hvm_domain.ioreq;
+                rc = vmx_set_ioreq_page(d, iorp, a.value);
+                spin_lock(&iorp->lock);
+                if (rc == 0 && iorp->va != NULL)
+                    /* Initialise evtchn port info if VCPUs already created. */
+                    for_each_vcpu(d, v)
+                        get_vio(v)->vp_eport = v->arch.arch_vmx.xen_port;
+                spin_unlock(&iorp->lock);
+                break;
+            case HVM_PARAM_BUFIOREQ_PFN: 
+                iorp = &d->arch.hvm_domain.buf_ioreq;
+                rc = vmx_set_ioreq_page(d, iorp, a.value);
+                break;
+            case HVM_PARAM_BUFPIOREQ_PFN: 
+                iorp = &d->arch.hvm_domain.buf_pioreq;
+                rc = vmx_set_ioreq_page(d, iorp, a.value);
+                break;
+            default:
+                /* nothing */
+                break;
+            }
+            if (rc == 0)
+                d->arch.hvm_domain.params[a.index] = a.value;
         }
         else {
             a.value = d->arch.hvm_domain.params[a.index];
diff -r 74b40a9f4c0a -r 91575bb23d07 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Wed Nov 07 10:19:21 2007 -0700
+++ b/xen/arch/ia64/vmx/vmx_init.c      Wed Nov 07 10:31:09 2007 -0700
@@ -267,22 +267,44 @@ vmx_load_state(struct vcpu *v)
         * anchored in vcpu */
 }
 
-static void vmx_create_event_channels(struct vcpu *v)
-{
-       vcpu_iodata_t *p;
+static int
+vmx_vcpu_initialise(struct vcpu *v)
+{
+       struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq;
+
+       int rc = alloc_unbound_xen_event_channel(v, 0);
+       if (rc < 0)
+               return rc;
+       v->arch.arch_vmx.xen_port = rc;
+
+       spin_lock(&iorp->lock);
+       if (v->domain->arch.vmx_platform.ioreq.va != 0) {
+               vcpu_iodata_t *p = get_vio(v);
+               p->vp_eport = v->arch.arch_vmx.xen_port;
+       }
+       spin_unlock(&iorp->lock);
+
+       gdprintk(XENLOG_INFO, "Allocated port %ld for hvm %d vcpu %d.\n",
+                v->arch.arch_vmx.xen_port, v->domain->domain_id, v->vcpu_id);
+
+       return 0;
+}
+
+static int vmx_create_event_channels(struct vcpu *v)
+{
        struct vcpu *o;
 
        if (v->vcpu_id == 0) {
                /* Ugly: create event channels for every vcpu when vcpu 0
                   starts, so that they're available for ioemu to bind to. */
                for_each_vcpu(v->domain, o) {
-                       p = get_vio(v->domain, o->vcpu_id);
-                       o->arch.arch_vmx.xen_port = p->vp_eport =
-                                       alloc_unbound_xen_event_channel(o, 0);
-                       gdprintk(XENLOG_INFO, "Allocated port %ld for hvm.\n",
-                                o->arch.arch_vmx.xen_port);
+                       int rc = vmx_vcpu_initialise(o);
+                       if (rc < 0) //XXX error recovery
+                               return rc;
                }
        }
+
+       return 0;
 }
 
 /*
@@ -292,6 +314,67 @@ static void vmx_release_assist_channel(s
 static void vmx_release_assist_channel(struct vcpu *v)
 {
        return;
+}
+
+/* following three functions are based from hvm_xxx_ioreq_page()
+ * in xen/arch/x86/hvm/hvm.c */
+static void vmx_init_ioreq_page(
+       struct domain *d, struct vmx_ioreq_page *iorp)
+{
+       memset(iorp, 0, sizeof(*iorp));
+       spin_lock_init(&iorp->lock);
+       domain_pause(d);
+}
+
+static void vmx_destroy_ioreq_page(
+       struct domain *d, struct vmx_ioreq_page *iorp)
+{
+       spin_lock(&iorp->lock);
+
+       ASSERT(d->is_dying);
+
+       if (iorp->va != NULL) {
+               put_page(iorp->page);
+               iorp->page = NULL;
+               iorp->va = NULL;
+       }
+
+       spin_unlock(&iorp->lock);
+}
+
+int vmx_set_ioreq_page(
+       struct domain *d, struct vmx_ioreq_page *iorp, unsigned long gpfn)
+{
+       struct page_info *page;
+       unsigned long mfn;
+       pte_t pte;
+
+       pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT);
+       if (!pte_present(pte) || !pte_mem(pte))
+               return -EINVAL;
+       mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT;
+       ASSERT(mfn_valid(mfn));
+
+       page = mfn_to_page(mfn);
+       if (get_page(page, d) == 0)
+               return -EINVAL;
+
+       spin_lock(&iorp->lock);
+
+       if ((iorp->va != NULL) || d->is_dying) {
+               spin_unlock(&iorp->lock);
+               put_page(page);
+               return -EINVAL;
+       }
+
+       iorp->va = mfn_to_virt(mfn);
+       iorp->page = page;
+
+       spin_unlock(&iorp->lock);
+
+       domain_unpause(d);
+
+       return 0;
 }
 
 /*
@@ -320,7 +403,10 @@ vmx_final_setup_guest(struct vcpu *v)
        rc = init_domain_tlb(v);
        if (rc)
                return rc;
-       vmx_create_event_channels(v);
+
+       rc = vmx_create_event_channels(v);
+       if (rc)
+               return rc;
 
        /* v->arch.schedule_tail = arch_vmx_do_launch; */
        vmx_create_vp(v);
@@ -352,6 +438,10 @@ vmx_relinquish_guest_resources(struct do
                vmx_release_assist_channel(v);
 
        vacpi_relinquish_resources(d);
+
+       vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq);
+       vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
+       vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
 }
 
 void
@@ -397,26 +487,14 @@ static void vmx_build_io_physmap_table(s
 
 int vmx_setup_platform(struct domain *d)
 {
-       unsigned long mpa;
        ASSERT(d != dom0); /* only for non-privileged vti domain */
 
        vmx_build_io_physmap_table(d);
 
-       mpa = __gpa_to_mpa(d, IO_PAGE_START);
-       if (mpa == 0)
-               return -EINVAL;
-       d->arch.vmx_platform.shared_page_va = (unsigned long)__va(mpa);
-       /* For buffered IO requests. */
-       spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
-
-       mpa = __gpa_to_mpa(d, BUFFER_IO_PAGE_START);
-       if (mpa == 0)
-               return -EINVAL;
-       d->arch.hvm_domain.buffered_io_va = (unsigned long)__va(mpa);
-       mpa = __gpa_to_mpa(d, BUFFER_PIO_PAGE_START);
-       if (mpa == 0)
-               return -EINVAL;
-       d->arch.hvm_domain.buffered_pio_va = (unsigned long)__va(mpa);
+       vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq);
+       vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq);
+       vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq);
+
        /* TEMP */
        d->arch.vmx_platform.pib_base = 0xfee00000UL;
 
@@ -445,7 +523,7 @@ void vmx_do_resume(struct vcpu *v)
 
        /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
        /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-       p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+       p = &get_vio(v)->vp_ioreq;
        while (p->state != STATE_IOREQ_NONE) {
                switch (p->state) {
                case STATE_IORESP_READY: /* IORESP_READY -> NONE */
diff -r 74b40a9f4c0a -r 91575bb23d07 xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c   Wed Nov 07 10:19:21 2007 -0700
+++ b/xen/arch/ia64/vmx/vmx_support.c   Wed Nov 07 10:31:09 2007 -0700
@@ -42,7 +42,7 @@ void vmx_io_assist(struct vcpu *v)
      * This shared page contains I/O request between emulation code
      * and device model.
      */
-    vio = get_vio(v->domain, v->vcpu_id);
+    vio = get_vio(v);
     if (!vio)
         panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n",
                      (unsigned long)vio);
@@ -65,7 +65,7 @@ void vmx_send_assist_req(struct vcpu *v)
 {
     ioreq_t *p;
 
-    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+    p = &get_vio(v)->vp_ioreq;
     if (unlikely(p->state != STATE_IOREQ_NONE)) {
         /* This indicates a bug in the device model.  Crash the
            domain. */
diff -r 74b40a9f4c0a -r 91575bb23d07 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h        Wed Nov 07 10:19:21 2007 -0700
+++ b/xen/include/asm-ia64/vmx.h        Wed Nov 07 10:31:09 2007 -0700
@@ -57,8 +57,12 @@ extern void deliver_pal_init(struct vcpu
 extern void deliver_pal_init(struct vcpu *vcpu);
 extern void vmx_pend_pal_init(struct domain *d);
 
-static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
+static inline vcpu_iodata_t *get_vio(struct vcpu *v)
 {
-    return &((shared_iopage_t 
*)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
+    struct domain *d = v->domain;
+    shared_iopage_t *p = (shared_iopage_t *)d->arch.vmx_platform.ioreq.va;
+    ASSERT((v == current) || spin_is_locked(&d->arch.vmx_platform.ioreq.lock));
+    ASSERT(d->arch.vmx_platform.ioreq.va != NULL);
+    return &p->vcpu_iodata[v->vcpu_id];
 }
 #endif /* _ASM_IA64_VT_H */
diff -r 74b40a9f4c0a -r 91575bb23d07 xen/include/asm-ia64/vmx_platform.h
--- a/xen/include/asm-ia64/vmx_platform.h       Wed Nov 07 10:19:21 2007 -0700
+++ b/xen/include/asm-ia64/vmx_platform.h       Wed Nov 07 10:31:09 2007 -0700
@@ -43,17 +43,24 @@
  * it is not used on ia64 */
 #define OS_TYPE_PORT    0xB2
 
+struct vmx_ioreq_page {
+    spinlock_t          lock;
+    struct page_info   *page;
+    void               *va;
+};
+int vmx_set_ioreq_page(struct domain *d,
+                       struct vmx_ioreq_page *iorp, unsigned long gmfn);
+
 typedef struct virtual_platform_def {
-    unsigned long       gos_type;
-    unsigned long       buffered_io_va;
-    spinlock_t          buffered_io_lock;
-    unsigned long       buffered_pio_va;
-    unsigned long       shared_page_va;
-    unsigned long       pib_base;
-    unsigned long       params[HVM_NR_PARAMS];
+    unsigned long               gos_type;
+    struct vmx_ioreq_page       ioreq;
+    struct vmx_ioreq_page       buf_ioreq;
+    struct vmx_ioreq_page       buf_pioreq;
+    unsigned long               pib_base;
+    unsigned long               params[HVM_NR_PARAMS];
     /* One IOSAPIC now... */
-    struct viosapic     viosapic;
-    struct vacpi        vacpi;
+    struct viosapic             viosapic;
+    struct vacpi                vacpi;
 } vir_plat_t;
 
 static inline int __fls(uint32_t word)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.