[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Add hypercalls needed for VTD



# HG changeset patch
# User Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
# Date 1224646806 -32400
# Node ID 6db3c096c244c941533c10d29b7262e1b3ce7cec
# Parent  46d7e12c4c919bab07af4b7097526dd06b824bea
[IA64] Add hypercalls needed for VTD

Add hypercalls needed for VTD.

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>
---
 xen/arch/ia64/xen/dom0_ops.c  |  265 +++++++++++++++++++++++++++++++++++++++++-
 xen/arch/ia64/xen/hypercall.c |   78 ++++++++++--
 xen/arch/ia64/xen/mm.c        |   32 ++++-
 xen/include/asm-ia64/mm.h     |    2 
 4 files changed, 361 insertions(+), 16 deletions(-)

diff -r 46d7e12c4c91 -r 6db3c096c244 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Wed Oct 22 11:46:55 2008 +0900
+++ b/xen/arch/ia64/xen/dom0_ops.c      Wed Oct 22 12:40:06 2008 +0900
@@ -18,6 +18,7 @@
 #include <xen/trace.h>
 #include <xen/console.h>
 #include <xen/guest_access.h>
+#include <xen/pci.h>
 #include <asm/vmx.h>
 #include <asm/dom_fw.h>
 #include <asm/vhpt.h>
@@ -256,6 +257,266 @@ long arch_do_domctl(xen_domctl_t *op, XE
     }
     break;
 
+    case XEN_DOMCTL_get_device_group:
+    {
+        struct domain *d;
+        u32 max_sdevs;
+        u8 bus, devfn;
+        XEN_GUEST_HANDLE_64(uint32) sdevs;
+        int num_sdevs;
+
+        ret = -ENOSYS;
+        if ( !iommu_enabled )
+            break;
+
+        ret = -EINVAL;
+        if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
+            break;
+
+        bus = (op->u.get_device_group.machine_bdf >> 16) & 0xff;
+        devfn = (op->u.get_device_group.machine_bdf >> 8) & 0xff;
+        max_sdevs = op->u.get_device_group.max_sdevs;
+        sdevs = op->u.get_device_group.sdev_array;
+
+        num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs);
+        if ( num_sdevs < 0 )
+        {
+            dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
+            ret = -EFAULT;
+            op->u.get_device_group.num_sdevs = 0;
+        }
+        else
+        {
+            ret = 0;
+            op->u.get_device_group.num_sdevs = num_sdevs;
+        }
+        if ( copy_to_guest(u_domctl, op, 1) )
+            ret = -EFAULT;
+        rcu_unlock_domain(d);
+    }
+    break;
+
+    case XEN_DOMCTL_test_assign_device:
+    {
+        u8 bus, devfn;
+
+        ret = -ENOSYS;
+        if ( !iommu_enabled )
+            break;
+
+        ret = -EINVAL;
+        bus = (op->u.assign_device.machine_bdf >> 16) & 0xff;
+        devfn = (op->u.assign_device.machine_bdf >> 8) & 0xff;
+
+        if ( device_assigned(bus, devfn) )
+        {
+            printk( "XEN_DOMCTL_test_assign_device: "
+                     "%x:%x:%x already assigned, or non-existent\n",
+                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+            break;
+        }
+        ret = 0;
+    }
+    break;
+
+    case XEN_DOMCTL_assign_device:
+    {
+        struct domain *d;
+        u8 bus, devfn;
+
+        ret = -ENOSYS;
+        if ( !iommu_enabled )
+            break;
+
+        ret = -EINVAL;
+        if ( unlikely((d = get_domain_by_id(op->domain)) == NULL) )
+        {
+            gdprintk(XENLOG_ERR,
+                "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
+            break;
+        }
+        bus = (op->u.assign_device.machine_bdf >> 16) & 0xff;
+        devfn = (op->u.assign_device.machine_bdf >> 8) & 0xff;
+
+        if ( !iommu_pv_enabled && !is_hvm_domain(d) )
+        {
+            ret = -ENOSYS;
+            break;
+        }
+
+        if ( device_assigned(bus, devfn) )
+        {
+            gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
+                     "%x:%x:%x already assigned, or non-existent\n",
+                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+            break;
+        }
+
+        ret = assign_device(d, bus, devfn);
+        gdprintk(XENLOG_INFO, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
+                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+        put_domain(d);
+    }
+    break;
+
+    case XEN_DOMCTL_deassign_device:
+    {
+        struct domain *d;
+        u8 bus, devfn;
+
+        ret = -ENOSYS;
+        if ( !iommu_enabled )
+            break;
+
+        ret = -EINVAL;
+        if ( unlikely((d = get_domain_by_id(op->domain)) == NULL) )
+        {
+            gdprintk(XENLOG_ERR,
+                "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
+            break;
+        }
+        bus = (op->u.assign_device.machine_bdf >> 16) & 0xff;
+        devfn = (op->u.assign_device.machine_bdf >> 8) & 0xff;
+
+        if ( !iommu_pv_enabled && !is_hvm_domain(d) )
+        {
+            ret = -ENOSYS;
+            break;
+        }
+
+        if ( !device_assigned(bus, devfn) )
+            break;
+
+        ret = 0;
+        deassign_device(d, bus, devfn);
+        gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n",
+            bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+        put_domain(d);
+    }
+    break;
+
+    case XEN_DOMCTL_bind_pt_irq:
+    {
+        struct domain * d;
+        xen_domctl_bind_pt_irq_t * bind;
+
+        ret = -ESRCH;
+        if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
+            break;
+        bind = &(op->u.bind_pt_irq);
+        if ( iommu_enabled )
+            ret = pt_irq_create_bind_vtd(d, bind);
+        if ( ret < 0 )
+            gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
+        rcu_unlock_domain(d);
+    }
+    break;
+
+    case XEN_DOMCTL_unbind_pt_irq:
+    {
+        struct domain * d;
+        xen_domctl_bind_pt_irq_t * bind;
+
+        ret = -ESRCH;
+        if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
+            break;
+        bind = &(op->u.bind_pt_irq);
+        if ( iommu_enabled )
+            ret = pt_irq_destroy_bind_vtd(d, bind);
+        if ( ret < 0 )
+            gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
+        rcu_unlock_domain(d);
+    }
+    break;
+
+    case XEN_DOMCTL_memory_mapping:
+    {
+        struct domain *d;
+        unsigned long gfn = op->u.memory_mapping.first_gfn;
+        unsigned long mfn = op->u.memory_mapping.first_mfn;
+        unsigned long nr_mfns = op->u.memory_mapping.nr_mfns;
+        int i;
+
+        ret = -EINVAL;
+        if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
+            break;
+
+        ret = -ESRCH;
+        if ( unlikely((d = rcu_lock_domain_by_id(op->domain)) == NULL) )
+            break;
+
+        ret=0;
+        if ( op->u.memory_mapping.add_mapping )
+        {
+            gdprintk(XENLOG_INFO,
+                "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
+                gfn, mfn, nr_mfns);
+
+            ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
+            for ( i = 0; i < nr_mfns; i++ )
+                assign_domain_mmio_page(d, (gfn+i)<<PAGE_SHIFT,
+                           (mfn+i)<<PAGE_SHIFT, PAGE_SIZE,
+                           ASSIGN_writable | ASSIGN_nocache);
+        }
+        else
+        {
+            gdprintk(XENLOG_INFO,
+                "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
+                 gfn, mfn, nr_mfns);
+
+            for ( i = 0; i < nr_mfns; i++ )
+                deassign_domain_mmio_page(d, (gfn+i)<<PAGE_SHIFT,
+                        (mfn+i)<<PAGE_SHIFT, PAGE_SIZE);
+            ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+        }
+
+        rcu_unlock_domain(d);
+    }
+    break;
+
+    case XEN_DOMCTL_ioport_mapping:
+    {
+
+#define MAX_IOPORTS    0x10000
+        struct domain *d;
+        unsigned int fgp = op->u.ioport_mapping.first_gport;
+        unsigned int fmp = op->u.ioport_mapping.first_mport;
+        unsigned int np = op->u.ioport_mapping.nr_ports;
+
+        ret = -EINVAL;
+        if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
+            ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
+        {
+            gdprintk(XENLOG_ERR,
+                "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
+                fgp, fmp, np);
+            break;
+        }
+
+        ret = -ESRCH;
+        if ( unlikely((d = rcu_lock_domain_by_id(op->domain)) == NULL) )
+            break;
+
+        if ( op->u.ioport_mapping.add_mapping )
+        {
+            gdprintk(XENLOG_INFO,
+                    "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
+                    fgp, fmp, np);
+
+            ret = ioports_permit_access(d, fgp, fmp, fmp + np - 1);
+        }
+        else
+        {
+            gdprintk(XENLOG_INFO,
+                    "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
+                    fgp, fmp, np);
+
+            ret = ioports_deny_access(d,  fgp, fgp + np - 1);
+        }
+        rcu_unlock_domain(d);
+    }
+    break;
+
     case XEN_DOMCTL_sethvmcontext:
     { 
         struct hvm_domain_context c;
@@ -387,10 +648,6 @@ long arch_do_domctl(xen_domctl_t *op, XE
         rcu_unlock_domain(d);
     }
     break;
-
-    case XEN_DOMCTL_assign_device:
-        ret = -ENOSYS;
-        break;
 
     default:
         printk("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
diff -r 46d7e12c4c91 -r 6db3c096c244 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Wed Oct 22 11:46:55 2008 +0900
+++ b/xen/arch/ia64/xen/hypercall.c     Wed Oct 22 12:40:06 2008 +0900
@@ -35,6 +35,7 @@
 #include <public/arch-ia64/debug_op.h>
 #include <asm/sioemu.h>
 #include <public/arch-ia64/sioemu.h>
+#include <xen/pci.h>
 
 static IA64FAULT
 xen_hypercall (struct pt_regs *regs)
@@ -313,6 +314,21 @@ iosapic_guest_write(
 iosapic_guest_write(
     unsigned long physbase, unsigned int reg, u32 pval);
 
+
+/*
+ * XXX We don't support MSI for PCI passthrough, so just return ENOSYS
+ */
+static int physdev_map_pirq(struct physdev_map_pirq *map)
+{
+       return -ENOSYS;
+}
+
+static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
+{
+       return -ENOSYS;
+}
+
+
 long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
 {
     int irq;
@@ -426,18 +442,60 @@ long do_physdev_op(int cmd, XEN_GUEST_HA
         break;
     }
 
-    /*
-     * XXX We don't support MSI for PCI passthrough, so just return success
-     */
-    case PHYSDEVOP_map_pirq:
-    case PHYSDEVOP_unmap_pirq:
-        ret = 0;
-        break;
-
-    case PHYSDEVOP_manage_pci_add:
-    case PHYSDEVOP_manage_pci_remove:
+       case PHYSDEVOP_map_pirq: {
+        struct physdev_map_pirq map;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&map, arg, 1) != 0 )
+             break;
+
+        ret = physdev_map_pirq(&map);
+
+        if ( copy_to_guest(arg, &map, 1) != 0 )
+             ret = -EFAULT;
+        break;
+    }
+
+    case PHYSDEVOP_unmap_pirq: {
+        struct physdev_unmap_pirq unmap;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&unmap, arg, 1) != 0 )
+            break;
+
+        ret = physdev_unmap_pirq(&unmap);
+            break;
+    }
+
+    case PHYSDEVOP_manage_pci_add: {
+        struct physdev_manage_pci manage_pci;
+        ret = -EPERM;
+        if ( !IS_PRIV(current->domain) )
+            break;
+        ret = -EFAULT;
+        if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
+            break;
+
+        ret = pci_add_device(manage_pci.bus, manage_pci.devfn);
+            break;
+    }
+
+    case PHYSDEVOP_manage_pci_remove: {
+        struct physdev_manage_pci manage_pci;
+        ret = -EPERM;
+        if ( !IS_PRIV(current->domain) )
+            break;
+        ret = -EFAULT;
+        if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
+            break;
+
+        ret = pci_remove_device(manage_pci.bus, manage_pci.devfn);
+            break;
+    }
+
     default:
         ret = -ENOSYS;
+        printk("not implemented do_physdev_op: %d\n", cmd);
         break;
     }
 
diff -r 46d7e12c4c91 -r 6db3c096c244 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Wed Oct 22 11:46:55 2008 +0900
+++ b/xen/arch/ia64/xen/mm.c    Wed Oct 22 12:40:06 2008 +0900
@@ -1436,7 +1436,8 @@ zap_domain_page_one(struct domain *d, un
     again:
         // memory_exchange() calls guest_physmap_remove_page() with
         // a stealed page. i.e. page owner = NULL.
-        BUG_ON(page_get_owner(mfn_to_page(mfn)) != d &&
+        BUG_ON(mfn_valid(mfn) &&
+               page_get_owner(mfn_to_page(mfn)) != d &&
                page_get_owner(mfn_to_page(mfn)) != NULL);
         old_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;
         old_pte = pfn_pte(mfn, __pgprot(old_arflags));
@@ -1459,12 +1460,39 @@ zap_domain_page_one(struct domain *d, un
         BUG_ON(mfn != pte_pfn(ret_pte));
     }
 
+    perfc_incr(zap_domain_page_one);
+    if(!mfn_valid(mfn))
+        return;
+
     page = mfn_to_page(mfn);
     BUG_ON((page->count_info & PGC_count_mask) == 0);
 
     BUG_ON(clear_PGC_allocate && (page_get_owner(page) == NULL));
     domain_put_page(d, mpaddr, pte, old_pte, clear_PGC_allocate);
-    perfc_incr(zap_domain_page_one);
+}
+
+int
+deassign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
+                        unsigned long phys_addr, unsigned long size )
+{
+    unsigned long addr = mpaddr & PAGE_MASK;
+    unsigned long end = PAGE_ALIGN(mpaddr + size);
+
+    if (size == 0) {
+        gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
+                __func__, d, mpaddr, size);
+    }
+    if (!efi_mmio(phys_addr, size)) {
+#ifndef NDEBUG
+        gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
+                __func__, d, mpaddr, size);
+#endif
+        return -EINVAL;
+    }
+
+    for (; addr < end; addr += PAGE_SIZE )
+        zap_domain_page_one(d, addr, 0, INVALID_MFN);
+    return 0;
 }
 
 unsigned long
diff -r 46d7e12c4c91 -r 6db3c096c244 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Oct 22 11:46:55 2008 +0900
+++ b/xen/include/asm-ia64/mm.h Wed Oct 22 12:40:06 2008 +0900
@@ -428,6 +428,8 @@ extern int __assign_domain_page(struct d
 extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, 
unsigned long physaddr, unsigned long flags);
 extern void assign_domain_page(struct domain *d, unsigned long mpaddr, 
unsigned long physaddr);
 extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, 
unsigned long flags);
+extern int deassign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
+                        unsigned long phys_addr, unsigned long size);
 struct p2m_entry;
 extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, 
struct p2m_entry* entry);
 extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.