[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] iomem support for driver domains.



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 7be1cfe8345b6bf09e66f037de6fd59c3bb0f706
# Parent  48d7d00e69e5e1558fc06d6c4503f9ab23b6a933
[IA64] iomem support for driver domains.

First steps in hypevisor to support driver domains.

IO ports capabilities added (not yet used).
IO mem capabilities checked.
ASSIGN_nocache flag added.
Memory attributes checked.

Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
---
 xen/arch/ia64/xen/dom0_ops.c   |   44 ++++++++++++++-
 xen/arch/ia64/xen/domain.c     |    5 +
 xen/arch/ia64/xen/mm.c         |  119 ++++++++++++++++++++++++++---------------
 xen/include/asm-ia64/domain.h  |    4 +
 xen/include/asm-ia64/iocap.h   |    7 ++
 xen/include/public/arch-ia64.h |    3 +
 6 files changed, 138 insertions(+), 44 deletions(-)

diff -r 48d7d00e69e5 -r 7be1cfe8345b xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Thu Jul 27 09:17:54 2006 -0600
+++ b/xen/arch/ia64/xen/dom0_ops.c      Thu Jul 27 09:47:10 2006 -0600
@@ -20,6 +20,7 @@
 #include <public/sched_ctl.h>
 #include <asm/vmx.h>
 #include <asm/dom_fw.h>
+#include <xen/iocap.h>
 
 void build_physmap_table(struct domain *d);
 
@@ -279,6 +280,29 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
     }
     break;
 
+    case DOM0_IOPORT_PERMISSION:
+    {
+        struct domain *d;
+        unsigned int fp = op->u.ioport_permission.first_port;
+        unsigned int np = op->u.ioport_permission.nr_ports;
+
+        ret = -ESRCH;
+        d = find_domain_by_id(op->u.ioport_permission.domain);
+        if (unlikely(d == NULL))
+            break;
+
+        if (np == 0)
+            ret = 0;
+        else {
+            if (op->u.ioport_permission.allow_access)
+                ret = ioports_permit_access(d, fp, fp + np - 1);
+            else
+                ret = ioports_deny_access(d, fp, fp + np - 1);
+        }
+
+        put_domain(d);
+    }
+    break;
     default:
         printf("arch_do_dom0_op: unrecognized dom0 op: %d!!!\n",op->cmd);
         ret = -ENOSYS;
@@ -289,6 +313,24 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
 }
 
 #ifdef CONFIG_XEN_IA64_DOM0_VP
+static unsigned long
+dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
+{
+    unsigned long end;
+
+    /* Linux may use a 0 size!  */
+    if (size == 0)
+        size = PAGE_SIZE;
+
+    end = PAGE_ALIGN(mpaddr + size);
+
+    if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
+                                (end >> PAGE_SHIFT) - 1))
+        return -EPERM;
+
+    return assign_domain_mmio_page(d, mpaddr, size);
+}
+
 unsigned long
 do_dom0vp_op(unsigned long cmd,
              unsigned long arg0, unsigned long arg1, unsigned long arg2,
@@ -299,7 +341,7 @@ do_dom0vp_op(unsigned long cmd,
 
     switch (cmd) {
     case IA64_DOM0VP_ioremap:
-        ret = assign_domain_mmio_page(d, arg0, arg1);
+        ret = dom0vp_ioremap(d, arg0, arg1);
         break;
     case IA64_DOM0VP_phystomach:
         ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
diff -r 48d7d00e69e5 -r 7be1cfe8345b xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Thu Jul 27 09:17:54 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Thu Jul 27 09:47:10 2006 -0600
@@ -360,6 +360,9 @@ int arch_domain_create(struct domain *d)
        if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
            goto fail_nomem;
 
+       d->arch.ioport_caps = rangeset_new(d, "I/O Ports",
+                                          RANGESETF_prettyprint_hex);
+
        printf ("arch_domain_create: domain=%p\n", d);
        return 0;
 
@@ -885,6 +888,8 @@ static void physdev_init_dom0(struct dom
                BUG();
        if (irqs_permit_access(d, 0, NR_IRQS-1))
                BUG();
+       if (ioports_permit_access(d, 0, 0xffff))
+               BUG();
 }
 
 int construct_dom0(struct domain *d, 
diff -r 48d7d00e69e5 -r 7be1cfe8345b xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Thu Jul 27 09:17:54 2006 -0600
+++ b/xen/arch/ia64/xen/mm.c    Thu Jul 27 09:47:10 2006 -0600
@@ -418,13 +418,13 @@ u64 translate_domain_pte(u64 pteval, u64
        u64 mask, mpaddr, pteval2;
        u64 arflags;
        u64 arflags2;
+       u64 maflags2;
 
        pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
 
        // FIXME address had better be pre-validated on insert
        mask = ~itir_mask(itir.itir);
-       mpaddr = (((pteval & ~_PAGE_ED) & _PAGE_PPN_MASK) & ~mask) |
-                (address & mask);
+       mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
 #ifdef CONFIG_XEN_IA64_DOM0_VP
        if (itir.ps > PAGE_SHIFT) {
                itir.ps = PAGE_SHIFT;
@@ -454,6 +454,8 @@ u64 translate_domain_pte(u64 pteval, u64
        }
 #endif
        pteval2 = lookup_domain_mpa(d, mpaddr, entry);
+
+       /* Check access rights.  */
        arflags  = pteval  & _PAGE_AR_MASK;
        arflags2 = pteval2 & _PAGE_AR_MASK;
        if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
@@ -466,36 +468,53 @@ u64 translate_domain_pte(u64 pteval, u64
                        pteval2, arflags2, mpaddr);
 #endif
                pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
-    }
-
-       pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
-       pteval2 |= (pteval & _PAGE_ED);
-       pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
-       pteval2 |= (pteval & ~_PAGE_PPN_MASK);
-       /*
-        * Don't let non-dom0 domains map uncached addresses.  This can
-        * happen when domU tries to touch i/o port space.  Also prevents
-        * possible address aliasing issues.
-        * WB => WB
-        * UC, UCE, WC => WB
-        * NaTPage => NaTPage
-        */
-       if (d != dom0 && (pteval2 & _PAGE_MA_MASK) != _PAGE_MA_NAT)
-               pteval2 &= ~_PAGE_MA_MASK;
-
-    /* If shadow mode is enabled, virtualize dirty bit.  */
-    if (shadow_mode_enabled(d) && (pteval2 & _PAGE_D)) {
-        u64 mp_page = mpaddr >> PAGE_SHIFT;
-        pteval2 |= _PAGE_VIRT_D;
-
-        /* If the page is not already dirty, don't set the dirty bit.
-           This is a small optimization!  */
-        if (mp_page < d->arch.shadow_bitmap_size * 8
-            && !test_bit(mp_page, d->arch.shadow_bitmap))
-            pteval2 = (pteval2 & ~_PAGE_D);
-    }
-
-       return pteval2;
+       }
+
+       /* Check memory attribute. The switch is on the *requested* memory
+          attribute.  */
+       maflags2 = pteval2 & _PAGE_MA_MASK;
+       switch (pteval & _PAGE_MA_MASK) {
+       case _PAGE_MA_NAT:
+               /* NaT pages are always accepted!  */                
+               break;
+       case _PAGE_MA_UC:
+       case _PAGE_MA_UCE:
+       case _PAGE_MA_WC:
+               if (maflags2 == _PAGE_MA_WB) {
+                       /* Don't let domains WB-map uncached addresses.
+                          This can happen when domU tries to touch i/o
+                          port space.  Also prevents possible address
+                          aliasing issues.  */
+                       printf("Warning: UC to WB for mpaddr=%lx\n", mpaddr);
+                       pteval = (pteval & ~_PAGE_MA_MASK) | _PAGE_MA_WB;
+               }
+               break;
+       case _PAGE_MA_WB:
+               if (maflags2 != _PAGE_MA_WB) {
+                       /* Forbid non-coherent access to coherent memory. */
+                       panic_domain(NULL, "try to use WB mem attr on "
+                                    "UC page, mpaddr=%lx\n", mpaddr);
+               }
+               break;
+       default:
+               panic_domain(NULL, "try to use unknown mem attribute\n");
+       }
+
+       /* If shadow mode is enabled, virtualize dirty bit.  */
+       if (shadow_mode_enabled(d) && (pteval & _PAGE_D)) {
+               u64 mp_page = mpaddr >> PAGE_SHIFT;
+               pteval |= _PAGE_VIRT_D;
+
+               /* If the page is not already dirty, don't set the dirty bit! */
+               if (mp_page < d->arch.shadow_bitmap_size * 8
+                   && !test_bit(mp_page, d->arch.shadow_bitmap))
+                       pteval &= ~_PAGE_D;
+       }
+    
+       /* Ignore non-addr bits of pteval2 and force PL0->2
+          (PL3 is unaffected) */
+       return (pteval & ~_PAGE_PPN_MASK) |
+              (pteval2 & _PAGE_PPN_MASK) | _PAGE_PL_2;
 }
 
 // given a current domain metaphysical address, return the physical address
@@ -823,8 +842,19 @@ assign_new_domain0_page(struct domain *d
 #endif
 }
 
+static unsigned long
+flags_to_prot (unsigned long flags)
+{
+    unsigned long res = _PAGE_PL_2 | __DIRTY_BITS;
+
+    res |= flags & ASSIGN_readonly ? _PAGE_AR_R: _PAGE_AR_RWX;
+    res |= flags & ASSIGN_nocache ? _PAGE_MA_UC: _PAGE_MA_WB;
+    
+    return res;
+}
+
 /* map a physical address to the specified metaphysical addr */
-// flags: currently only ASSIGN_readonly
+// flags: currently only ASSIGN_readonly, ASSIGN_nocache
 // This is called by assign_domain_mmio_page().
 // So accessing to pte is racy.
 void
@@ -836,13 +866,12 @@ __assign_domain_page(struct domain *d,
     pte_t old_pte;
     pte_t new_pte;
     pte_t ret_pte;
-    unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: 
_PAGE_AR_RWX;
+    unsigned long prot = flags_to_prot(flags);
 
     pte = lookup_alloc_domain_pte(d, mpaddr);
 
     old_pte = __pte(0);
-    new_pte = pfn_pte(physaddr >> PAGE_SHIFT,
-                      __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags));
+    new_pte = pfn_pte(physaddr >> PAGE_SHIFT, __pgprot(prot));
     ret_pte = ptep_cmpxchg_rel(&d->arch.mm, mpaddr, pte, old_pte, new_pte);
     if (pte_val(ret_pte) == pte_val(old_pte))
         smp_mb();
@@ -941,7 +970,7 @@ assign_domain_mmio_page(struct domain *d
                 __func__, __LINE__, d, mpaddr, size);
         return -EINVAL;
     }
-    assign_domain_same_page(d, mpaddr, size, ASSIGN_writable);
+    assign_domain_same_page(d, mpaddr, size, ASSIGN_writable | ASSIGN_nocache);
     return mpaddr;
 }
 
@@ -967,11 +996,12 @@ assign_domain_page_replace(struct domain
     volatile pte_t* pte;
     pte_t old_pte;
     pte_t npte;
-    unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: 
_PAGE_AR_RWX;
+    unsigned long prot = flags_to_prot(flags);
+
     pte = lookup_alloc_domain_pte(d, mpaddr);
 
     // update pte
-    npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags));
+    npte = pfn_pte(mfn, __pgprot(prot));
     old_pte = ptep_xchg(mm, mpaddr, pte, npte);
     if (pte_mem(old_pte)) {
         unsigned long old_mfn = pte_pfn(old_pte);
@@ -1013,7 +1043,7 @@ assign_domain_page_cmpxchg_rel(struct do
     unsigned long old_arflags;
     pte_t old_pte;
     unsigned long new_mfn;
-    unsigned long new_arflags;
+    unsigned long new_prot;
     pte_t new_pte;
     pte_t ret_pte;
 
@@ -1029,10 +1059,9 @@ assign_domain_page_cmpxchg_rel(struct do
         return -EINVAL;
     }
 
-    new_arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: _PAGE_AR_RWX;
+    new_prot = flags_to_prot(flags);
     new_mfn = page_to_mfn(new_page);
-    new_pte = pfn_pte(new_mfn,
-                      __pgprot(__DIRTY_BITS | _PAGE_PL_2 | new_arflags));
+    new_pte = pfn_pte(new_mfn, __pgprot(new_prot));
 
     // update pte
     ret_pte = ptep_cmpxchg_rel(mm, mpaddr, pte, old_pte, new_pte);
@@ -1151,6 +1180,10 @@ dom0vp_add_physmap(struct domain* d, uns
 {
     int error = 0;
     struct domain* rd;
+
+    /* Not allowed by a domain.  */
+    if (flags & ASSIGN_nocache)
+        return -EINVAL;
 
     rd = find_domain_by_id(domid);
     if (unlikely(rd == NULL)) {
diff -r 48d7d00e69e5 -r 7be1cfe8345b xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Thu Jul 27 09:17:54 2006 -0600
+++ b/xen/include/asm-ia64/domain.h     Thu Jul 27 09:47:10 2006 -0600
@@ -11,6 +11,7 @@
 #include <xen/list.h>
 #include <xen/cpumask.h>
 #include <asm/fpswa.h>
+#include <xen/rangeset.h>
 
 struct p2m_entry {
     volatile pte_t*     pte;
@@ -86,6 +87,9 @@ struct arch_domain {
             unsigned int is_vti : 1;
         };
     };
+
+    /* Allowed accesses to io ports.  */
+    struct rangeset *ioport_caps;
 
     /* There are two ranges of RID for a domain:
        one big range, used to virtualize domain RID,
diff -r 48d7d00e69e5 -r 7be1cfe8345b xen/include/asm-ia64/iocap.h
--- a/xen/include/asm-ia64/iocap.h      Thu Jul 27 09:17:54 2006 -0600
+++ b/xen/include/asm-ia64/iocap.h      Thu Jul 27 09:47:10 2006 -0600
@@ -7,4 +7,11 @@
 #ifndef __IA64_IOCAP_H__
 #define __IA64_IOCAP_H__
 
+#define ioports_permit_access(d, s, e)                  \
+    rangeset_add_range((d)->arch.ioport_caps, s, e)
+#define ioports_deny_access(d, s, e)                    \
+    rangeset_remove_range((d)->arch.ioport_caps, s, e)
+#define ioports_access_permitted(d, s, e)               \
+    rangeset_contains_range((d)->arch.ioport_caps, s, e)
+
 #endif /* __IA64_IOCAP_H__ */
diff -r 48d7d00e69e5 -r 7be1cfe8345b xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Thu Jul 27 09:17:54 2006 -0600
+++ b/xen/include/public/arch-ia64.h    Thu Jul 27 09:47:10 2006 -0600
@@ -359,6 +359,9 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_conte
 #define _ASSIGN_readonly                0
 #define ASSIGN_readonly                 (1UL << _ASSIGN_readonly)
 #define ASSIGN_writable                 (0UL << _ASSIGN_readonly) // dummy flag
+/* Internal only: memory attribute must be WC/UC/UCE.  */
+#define _ASSIGN_nocache                 1
+#define ASSIGN_nocache                  (1UL << _ASSIGN_nocache)
 
 /* This structure has the same layout of struct ia64_boot_param, defined in
    <asm/system.h>.  It is redefined here to ease use.  */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.