[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: replace nr_irqs sized per-domain arrays with radix trees



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxxxx>
# Date 1304252190 -3600
# Node ID 4891f1f41ba54cfe537e5194b28d0abc4d930733
# Parent  c0a8f889ca9e46574215bad8c0e093c5f947852c
x86: replace nr_irqs sized per-domain arrays with radix trees

It would seem possible to fold the two trees into one (making e.g. the
emuirq bits stored in the upper half of the pointer), but I'm not
certain that's worth it as it would make deletion of entries more
cumbersome. Unless pirq-s and emuirq-s were mutually exclusive...

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---


diff -r c0a8f889ca9e -r 4891f1f41ba5 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Sun May 01 13:03:37 2011 +0100
+++ b/xen/arch/x86/domain.c     Sun May 01 13:16:30 2011 +0100
@@ -614,26 +614,16 @@
         memset(d->arch.pirq_irq, 0,
                d->nr_pirqs * sizeof(*d->arch.pirq_irq));
 
-        d->arch.irq_pirq = xmalloc_array(int, nr_irqs);
-        if ( !d->arch.irq_pirq )
+        if ( (rc = init_domain_irq_mapping(d)) != 0 )
             goto fail;
-        memset(d->arch.irq_pirq, 0,
-               nr_irqs * sizeof(*d->arch.irq_pirq));
-
-        for ( i = 1; platform_legacy_irq(i); ++i )
-            if ( !IO_APIC_IRQ(i) )
-                d->arch.irq_pirq[i] = d->arch.pirq_irq[i] = i;
 
         if ( is_hvm_domain(d) )
         {
             d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
-            d->arch.emuirq_pirq = xmalloc_array(int, nr_irqs);
-            if ( !d->arch.pirq_emuirq || !d->arch.emuirq_pirq )
+            if ( !d->arch.pirq_emuirq )
                 goto fail;
             for (i = 0; i < d->nr_pirqs; i++)
                 d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
-            for (i = 0; i < nr_irqs; i++)
-                d->arch.emuirq_pirq[i] = IRQ_UNBOUND;
         }
 
 
@@ -671,9 +661,8 @@
     d->is_dying = DOMDYING_dead;
     vmce_destroy_msr(d);
     xfree(d->arch.pirq_irq);
-    xfree(d->arch.irq_pirq);
     xfree(d->arch.pirq_emuirq);
-    xfree(d->arch.emuirq_pirq);
+    cleanup_domain_irq_mapping(d);
     free_xenheap_page(d->shared_info);
     if ( paging_initialised )
         paging_final_teardown(d);
@@ -726,9 +715,8 @@
 
     free_xenheap_page(d->shared_info);
     xfree(d->arch.pirq_irq);
-    xfree(d->arch.irq_pirq);
     xfree(d->arch.pirq_emuirq);
-    xfree(d->arch.emuirq_pirq);
+    cleanup_domain_irq_mapping(d);
 }
 
 unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
diff -r c0a8f889ca9e -r 4891f1f41ba5 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Sun May 01 13:03:37 2011 +0100
+++ b/xen/arch/x86/irq.c        Sun May 01 13:16:30 2011 +0100
@@ -950,6 +950,58 @@
     return desc;
 }
 
+static int set_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+    int err = radix_tree_insert(&d->arch.irq_pirq, irq, (void *)(long)pirq,
+                                NULL, NULL);
+
+    switch ( err )
+    {
+    case -EEXIST:
+        *radix_tree_lookup_slot(&d->arch.irq_pirq, irq) = (void *)(long)pirq;
+        /* fall through */
+    case 0:
+        d->arch.pirq_irq[pirq] = irq;
+        return 0;
+    }
+
+    return err;
+}
+
+static void clear_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+    d->arch.pirq_irq[pirq] = 0;
+    radix_tree_delete(&d->arch.irq_pirq, irq, NULL);
+}
+
+int init_domain_irq_mapping(struct domain *d)
+{
+    unsigned int i;
+    int err;
+
+    INIT_RADIX_TREE(&d->arch.irq_pirq, 0);
+    if ( is_hvm_domain(d) )
+        INIT_RADIX_TREE(&d->arch.hvm_domain.emuirq_pirq, 0);
+
+    for ( i = 1, err = 0; !err && platform_legacy_irq(i); ++i )
+        if ( !IO_APIC_IRQ(i) )
+            err = set_domain_irq_pirq(d, i, i);
+
+    return err;
+}
+
+static void irq_slot_free(void *unused)
+{
+}
+
+void cleanup_domain_irq_mapping(struct domain *d)
+{
+    radix_tree_destroy(&d->arch.irq_pirq, irq_slot_free, NULL);
+    if ( is_hvm_domain(d) )
+        radix_tree_destroy(&d->arch.hvm_domain.emuirq_pirq,
+                           irq_slot_free, NULL);
+}
+
 /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
 static void flush_ready_eoi(void)
 {
@@ -1386,7 +1438,7 @@
         BUG_ON(irq <= 0);
         desc = irq_to_desc(irq);
         spin_lock_irq(&desc->lock);
-        d->arch.pirq_irq[pirq] = d->arch.irq_pirq[irq] = 0;
+        clear_domain_irq_pirq(d, irq, pirq);
     }
     else
     {
@@ -1544,15 +1596,23 @@
             dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
               d->domain_id, irq);
         desc->handler = &pci_msi_type;
-        d->arch.pirq_irq[pirq] = irq;
-        d->arch.irq_pirq[irq] = pirq;
-        setup_msi_irq(pdev, msi_desc, irq);
-        spin_unlock_irqrestore(&desc->lock, flags);
-    } else
+        ret = set_domain_irq_pirq(d, irq, pirq);
+        if ( !ret )
+        {
+            setup_msi_irq(pdev, msi_desc, irq);
+            spin_unlock_irqrestore(&desc->lock, flags);
+        }
+        else
+        {
+            desc->handler = &no_irq_type;
+            spin_unlock_irqrestore(&desc->lock, flags);
+            pci_disable_msi(msi_desc);
+        }
+    }
+    else
     {
         spin_lock_irqsave(&desc->lock, flags);
-        d->arch.pirq_irq[pirq] = irq;
-        d->arch.irq_pirq[irq] = pirq;
+        ret = set_domain_irq_pirq(d, irq, pirq);
         spin_unlock_irqrestore(&desc->lock, flags);
     }
 
@@ -1599,14 +1659,11 @@
     BUG_ON(irq != domain_pirq_to_irq(d, pirq));
 
     if ( !forced_unbind )
-    {
-        d->arch.pirq_irq[pirq] = 0;
-        d->arch.irq_pirq[irq] = 0;
-    }
+        clear_domain_irq_pirq(d, irq, pirq);
     else
     {
         d->arch.pirq_irq[pirq] = -irq;
-        d->arch.irq_pirq[irq] = -pirq;
+        *radix_tree_lookup_slot(&d->arch.irq_pirq, irq) = (void *)(long)-pirq;
     }
 
     spin_unlock_irqrestore(&desc->lock, flags);
@@ -1829,10 +1886,25 @@
         return 0;
     }
 
-    d->arch.pirq_emuirq[pirq] = emuirq;
     /* do not store emuirq mappings for pt devices */
     if ( emuirq != IRQ_PT )
-        d->arch.emuirq_pirq[emuirq] = pirq;
+    {
+        int err = radix_tree_insert(&d->arch.hvm_domain.emuirq_pirq, emuirq,
+                                    (void *)((long)pirq + 1), NULL, NULL);
+
+        switch ( err )
+        {
+        case 0:
+            break;
+        case -EEXIST:
+            *radix_tree_lookup_slot(&d->arch.hvm_domain.emuirq_pirq, emuirq) =
+                (void *)((long)pirq + 1);
+            break;
+        default:
+            return err;
+        }
+    }
+    d->arch.pirq_emuirq[pirq] = emuirq;
 
     return 0;
 }
@@ -1860,7 +1932,7 @@
 
     d->arch.pirq_emuirq[pirq] = IRQ_UNBOUND;
     if ( emuirq != IRQ_PT )
-        d->arch.emuirq_pirq[emuirq] = IRQ_UNBOUND;
+        radix_tree_delete(&d->arch.hvm_domain.emuirq_pirq, emuirq, NULL);
 
  done:
     return ret;
diff -r c0a8f889ca9e -r 4891f1f41ba5 xen/common/radix-tree.c
--- a/xen/common/radix-tree.c   Sun May 01 13:03:37 2011 +0100
+++ b/xen/common/radix-tree.c   Sun May 01 13:16:30 2011 +0100
@@ -26,7 +26,6 @@
  * o tagging code removed
  * o radix_tree_insert has func parameter for dynamic data struct allocation
  * o radix_tree_destroy added (including recursive helper function)
- * o __init functions must be called explicitly
  * o other include files adapted to Xen
  */
 
@@ -35,6 +34,7 @@
 #include <xen/lib.h>
 #include <xen/types.h>
 #include <xen/errno.h>
+#include <xen/xmalloc.h>
 #include <xen/radix-tree.h>
 #include <asm/cache.h>
 
@@ -49,6 +49,18 @@
     return height_to_maxindex[height];
 }
 
+static struct radix_tree_node *_node_alloc(void *unused)
+{
+    struct radix_tree_node *node = xmalloc(struct radix_tree_node);
+
+    return node ? memset(node, 0, sizeof(*node)) : node;
+}
+
+static void _node_free(struct radix_tree_node *node)
+{
+    xfree(node);
+}
+
 /*
  * Extend a radix tree so it can store key @index.
  */
@@ -100,6 +112,9 @@
     int offset;
     int error;
 
+    if (!node_alloc)
+        node_alloc = _node_alloc;
+
     /* Make sure the tree is high enough.  */
     if (index > radix_tree_maxindex(root->height)) {
         error = radix_tree_extend(root, index, node_alloc, arg);
@@ -336,6 +351,9 @@
     unsigned int height, shift;
     int offset;
 
+    if (!node_free)
+        node_free = _node_free;
+
     height = root->height;
     if (index > radix_tree_maxindex(height))
         goto out;
@@ -420,6 +438,8 @@
     if (root->height == 0)
         slot_free(root->rnode);
     else {
+        if (!node_free)
+            node_free = _node_free;
         radix_tree_node_destroy(root->rnode, root->height,
                                 slot_free, node_free);
         node_free(root->rnode);
@@ -440,10 +460,14 @@
     return index;
 }
 
-void __init radix_tree_init(void)
+static int __init radix_tree_init(void)
 {
     unsigned int i;
 
     for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
         height_to_maxindex[i] = __maxindex(i);
+
+    return 0;
 }
+/* pre-SMP just so it runs before 'normal' initcalls */
+presmp_initcall(radix_tree_init);
diff -r c0a8f889ca9e -r 4891f1f41ba5 xen/common/tmem.c
--- a/xen/common/tmem.c Sun May 01 13:03:37 2011 +0100
+++ b/xen/common/tmem.c Sun May 01 13:16:30 2011 +0100
@@ -2925,7 +2925,6 @@
     if ( !tmh_enabled() )
         return 0;
 
-    radix_tree_init();
     if ( tmh_dedup_enabled() )
         for (i = 0; i < 256; i++ )
         {
diff -r c0a8f889ca9e -r 4891f1f41ba5 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Sun May 01 13:03:37 2011 +0100
+++ b/xen/include/asm-x86/domain.h      Sun May 01 13:16:30 2011 +0100
@@ -3,6 +3,7 @@
 
 #include <xen/config.h>
 #include <xen/mm.h>
+#include <xen/radix-tree.h>
 #include <asm/hvm/vcpu.h>
 #include <asm/hvm/domain.h>
 #include <asm/e820.h>
@@ -284,10 +285,9 @@
     const char *nested_p2m_function;
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
-    int *irq_pirq;
+    struct radix_tree_root irq_pirq;
     int *pirq_irq;
-    /* pirq to emulated irq and vice versa */
-    int *emuirq_pirq;
+    /* pirq to emulated irq */
     int *pirq_emuirq;
 
     /* Maximum physical-address bitwidth supported by this guest. */
diff -r c0a8f889ca9e -r 4891f1f41ba5 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Sun May 01 13:03:37 2011 +0100
+++ b/xen/include/asm-x86/hvm/domain.h  Sun May 01 13:16:30 2011 +0100
@@ -59,6 +59,9 @@
     /* VCPU which is current target for 8259 interrupts. */
     struct vcpu           *i8259_target;
 
+    /* emulated irq to pirq */
+    struct radix_tree_root emuirq_pirq;
+
     /* hvm_print_line() logging. */
 #define HVM_PBUF_SIZE 80
     char                  *pbuf;
diff -r c0a8f889ca9e -r 4891f1f41ba5 xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Sun May 01 13:03:37 2011 +0100
+++ b/xen/include/asm-x86/irq.h Sun May 01 13:16:30 2011 +0100
@@ -143,11 +143,17 @@
 
 void irq_set_affinity(struct irq_desc *, const cpumask_t *mask);
 
+int init_domain_irq_mapping(struct domain *);
+void cleanup_domain_irq_mapping(struct domain *);
+
 #define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
-#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
+#define domain_irq_to_pirq(d, irq) \
+    ((long)radix_tree_lookup(&(d)->arch.irq_pirq, irq))
 #define PIRQ_ALLOCATED -1
 #define domain_pirq_to_emuirq(d, pirq) ((d)->arch.pirq_emuirq[pirq])
-#define domain_emuirq_to_pirq(d, emuirq) ((d)->arch.emuirq_pirq[emuirq])
+#define domain_emuirq_to_pirq(d, emuirq) \
+    (((long)radix_tree_lookup(&(d)->arch.hvm_domain.emuirq_pirq, emuirq) ?: \
+     IRQ_UNBOUND + 1) - 1)
 #define IRQ_UNBOUND -1
 #define IRQ_PT -2
 
diff -r c0a8f889ca9e -r 4891f1f41ba5 xen/include/xen/radix-tree.h
--- a/xen/include/xen/radix-tree.h      Sun May 01 13:03:37 2011 +0100
+++ b/xen/include/xen/radix-tree.h      Sun May 01 13:16:30 2011 +0100
@@ -73,6 +73,5 @@
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
                        unsigned long first_index, unsigned int max_items);
-void radix_tree_init(void);
 
 #endif /* _XEN_RADIX_TREE_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.