[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Expand hvm_op hypercall for PV-on-HVM/IPF



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID c232365128cf2e205ae3f551611d16dad273520e
# Parent  0ae5ba4585c9777c4237d2412125946d9702d060
[IA64] Expand hvm_op hypercall for PV-on-HVM/IPF

Introduce HVMOP_setup_shared_info_page
 - A page allocated on HVM-guest OS is swapped original shared_info
   page with this hypercall.
 - In x86 code, original shared_info page is used after pv-on-hvm
   setup with remapping feature in arch depend HYPERVISOR_memory_op.
   But, we can't implement same feature for IPF, thus we select to
   implement with this method.
Introduce HVMOP_setup_gnttab_table
 - Pages allocated on HVM-guest OS is swapped original grant_table
   page frames with this hypercall.
 - Same above.

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@xxxxxxxxxxxxxx>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@xxxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/vmx_hypercall.c |  105 ++++++++++++++++++++++++++++++++++++++
 xen/include/public/arch-ia64.h    |   11 +++
 2 files changed, 116 insertions(+)

diff -r 0ae5ba4585c9 -r c232365128cf xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Thu Aug 24 11:54:23 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Fri Aug 25 15:06:18 2006 -0600
@@ -2,6 +2,7 @@
 /*
  * vmx_hyparcall.c: handling hypercall from domain
  * Copyright (c) 2005, Intel Corporation.
+ * Copyright (c) 2006, Fujitsu Limited.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -17,6 +18,8 @@
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  *
  *  Xuefei Xu (Anthony Xu) (Anthony.xu@xxxxxxxxx)
+ *  Tsunehisa Doi (Doi.Tsunehisa@xxxxxxxxxxxxxx)
+ *  Tomonari Horikoshi (t.horikoshi@xxxxxxxxxxxxxx)
  */
 
 #include <xen/config.h>
@@ -34,6 +37,89 @@
 #include <public/version.h>
 #include <asm/dom_fw.h>
 #include <xen/domain.h>
+#include <xen/compile.h>
+#include <xen/event.h>
+
+static void
+vmx_free_pages(unsigned long pgaddr, int npg)
+{
+    for (; npg > 0; npg--, pgaddr += PAGE_SIZE) {
+        /* If original page belongs to xen heap, then relinguish back
+         * to xen heap. Or else, leave to domain itself to decide.
+         */
+        if (likely(IS_XEN_HEAP_FRAME(virt_to_page(pgaddr)))) {
+            free_domheap_page(virt_to_page(pgaddr));
+            free_xenheap_page((void *)pgaddr);
+        }
+        else {
+            put_page(virt_to_page(pgaddr));
+        }
+    }
+}
+
+static int
+vmx_gnttab_setup_table(unsigned long frame_pa, unsigned long nr_frames)
+{
+    struct domain *d = current->domain;
+    unsigned long o_grant_shared, pgaddr;
+
+    if ((nr_frames != NR_GRANT_FRAMES) || (frame_pa & (PAGE_SIZE - 1))) {
+        return -EINVAL;
+    }
+
+    pgaddr = domain_mpa_to_imva(d, frame_pa);
+    if (pgaddr == NULL) {
+        return -EFAULT;
+    }
+
+    o_grant_shared = (unsigned long)d->grant_table->shared;
+    d->grant_table->shared = (struct grant_entry *)pgaddr;
+
+    /* Copy existing grant table shared into new page */
+    if (o_grant_shared) {
+        memcpy((void *)d->grant_table->shared,
+               (void *)o_grant_shared, PAGE_SIZE * nr_frames);
+        vmx_free_pages(o_grant_shared, nr_frames);
+    }
+    else {
+        memset((void *)d->grant_table->shared, 0, PAGE_SIZE * nr_frames);
+    }
+    return 0;
+}
+
+static int
+vmx_setup_shared_info_page(unsigned long gpa)
+{
+    VCPU *vcpu = current;
+    struct domain *d = vcpu->domain;
+    unsigned long o_info, pgaddr;
+    struct vcpu *v;
+
+    if (gpa & (PAGE_SIZE - 1)) {
+        return -EINVAL;
+    }
+
+    pgaddr = domain_mpa_to_imva(d, gpa);
+    if (pgaddr == NULL) {
+        return -EFAULT;
+    }
+
+    o_info = (u64)d->shared_info;
+    d->shared_info = (shared_info_t *)pgaddr;
+
+    /* Copy existing shared info into new page */
+    if (o_info) {
+        memcpy((void*)d->shared_info, (void*)o_info, PAGE_SIZE);
+        for_each_vcpu(d, v) {
+            v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
+        }
+        vmx_free_pages(o_info, 1);
+    }
+    else {
+        memset((void *)d->shared_info, 0, PAGE_SIZE);
+    }
+    return 0;
+}
 
 long
 do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
@@ -78,6 +164,25 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
         break;
     }
 
+    case HVMOP_setup_gnttab_table:
+    case HVMOP_setup_shared_info_page:
+    {
+        struct xen_hvm_setup a;
+
+        if (copy_from_guest(&a, arg, 1))
+            return -EFAULT;
+
+        switch (op) {
+        case HVMOP_setup_gnttab_table:
+            printk("vmx_gnttab_setup_table: frame_pa=%#lx,"
+                            "nr_frame=%ld\n", a.arg1, a.arg2);
+            return vmx_gnttab_setup_table(a.arg1, a.arg2);
+        case HVMOP_setup_shared_info_page:
+            printk("vmx_setup_shared_info_page: gpa=0x%lx\n", a.arg1);
+            return vmx_setup_shared_info_page(a.arg1);
+        }
+    }
+
     default:
         DPRINTK("Bad HVM op %ld.\n", op);
         rc = -ENOSYS;
diff -r 0ae5ba4585c9 -r c232365128cf xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Thu Aug 24 11:54:23 2006 -0600
+++ b/xen/include/public/arch-ia64.h    Fri Aug 25 15:06:18 2006 -0600
@@ -335,6 +335,17 @@ struct vcpu_guest_context {
 };
 typedef struct vcpu_guest_context vcpu_guest_context_t;
 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+// hvm_op expansion
+#define HVMOP_setup_gnttab_table        2
+#define HVMOP_setup_shared_info_page    3
+
+struct xen_hvm_setup {
+    unsigned long arg1;
+    unsigned long arg2;
+};
+typedef struct xen_hvm_setup xen_hvm_setup_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_setup_t);
 
 // dom0 vp op
 #define __HYPERVISOR_ia64_dom0vp_op     __HYPERVISOR_arch_0

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.