[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 01/15] xen/arm: register mmio handler at runtime



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>

register mmio handlers at runtime and make
mmio handlers domain specific.

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
 xen/arch/arm/domain.c        |    4 ++++
 xen/arch/arm/io.c            |   34 +++++++++++++++++++++++-----------
 xen/arch/arm/io.h            |    8 ++++++--
 xen/arch/arm/vgic.c          |    4 +++-
 xen/arch/arm/vuart.c         |    6 +++++-
 xen/include/asm-arm/domain.h |    1 +
 6 files changed, 42 insertions(+), 15 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index f26b77b..e2f0cc4 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -34,6 +34,7 @@
 #include <asm/platform.h>
 #include "vtimer.h"
 #include "vuart.h"
+#include "io.h"
 
 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
 
@@ -521,6 +522,9 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
     share_xen_page_with_guest(
         virt_to_page(d->shared_info), d, XENSHARE_writable);
 
+    if ( (d->arch.io_handlers = xmalloc(struct io_handler)) == NULL )
+        goto fail;
+
     if ( (rc = p2m_alloc_table(d)) != 0 )
         goto fail;
 
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index a6db00b..d6231d0 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -17,31 +17,43 @@
  */
 
 #include <xen/config.h>
+#include <xen/init.h>
+#include <xen/kernel.h>
 #include <xen/lib.h>
+#include <xen/spinlock.h>
 #include <asm/current.h>
+#include <xen/sched.h>
 
 #include "io.h"
 
-static const struct mmio_handler *const mmio_handlers[] =
-{
-    &vgic_distr_mmio_handler,
-    &vuart_mmio_handler,
-};
-#define MMIO_HANDLER_NR ARRAY_SIZE(mmio_handlers)
+static DEFINE_SPINLOCK(handler_lock);
 
 int handle_mmio(mmio_info_t *info)
 {
     struct vcpu *v = current;
     int i;
+    struct io_handler *mmio_handle = v->domain->arch.io_handlers;
 
-    for ( i = 0; i < MMIO_HANDLER_NR; i++ )
-        if ( mmio_handlers[i]->check_handler(v, info->gpa) )
+    for ( i = 0; i < mmio_handle->num_entries; i++ )
+    {
+        if ( mmio_handle->mmio_handlers[i]->check_handler(v, info->gpa) )
             return info->dabt.write ?
-                mmio_handlers[i]->write_handler(v, info) :
-                mmio_handlers[i]->read_handler(v, info);
-
+                mmio_handle->mmio_handlers[i]->write_handler(v, info) :
+                mmio_handle->mmio_handlers[i]->read_handler(v, info);
+    }
     return 0;
 }
+
+void register_mmio_handler(struct domain *d, struct mmio_handler * handle)
+{
+    unsigned long flags;
+    struct io_handler *handler = d->arch.io_handlers;
+    BUG_ON(handler->num_entries >= MAX_IO_HANDLER);
+
+    spin_lock_irqsave(&handler_lock, flags);
+    handler->mmio_handlers[handler->num_entries++] = handle;
+    spin_unlock_irqrestore(&handler_lock, flags);
+}
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/io.h b/xen/arch/arm/io.h
index 8d252c0..5fc1660 100644
--- a/xen/arch/arm/io.h
+++ b/xen/arch/arm/io.h
@@ -40,10 +40,14 @@ struct mmio_handler {
     mmio_write_t write_handler;
 };
 
-extern const struct mmio_handler vgic_distr_mmio_handler;
-extern const struct mmio_handler vuart_mmio_handler;
+#define MAX_IO_HANDLER  16
+struct io_handler {
+    int num_entries;
+    struct mmio_handler *mmio_handlers[MAX_IO_HANDLER];
+};
 
 extern int handle_mmio(mmio_info_t *info);
+void register_mmio_handler(struct domain *d, struct mmio_handler * handle);
 
 #endif
 
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 8616534..77b561e 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -35,6 +35,7 @@
 /* Number of ranks of interrupt registers for a domain */
 #define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_lines+31)/32)
 
+static struct mmio_handler vgic_distr_mmio_handler;
 /*
  * Rank containing GICD_<FOO><n> for GICD_<FOO> with
  * <b>-bits-per-interrupt
@@ -107,6 +108,7 @@ int domain_vgic_init(struct domain *d)
     }
     for (i=0; i<DOMAIN_NR_RANKS(d); i++)
         spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
+    register_mmio_handler(d, &vgic_distr_mmio_handler);
     return 0;
 }
 
@@ -673,7 +675,7 @@ static int vgic_distr_mmio_check(struct vcpu *v, paddr_t 
addr)
     return (addr >= (d->arch.vgic.dbase)) && (addr < (d->arch.vgic.dbase + 
PAGE_SIZE));
 }
 
-const struct mmio_handler vgic_distr_mmio_handler = {
+static struct mmio_handler vgic_distr_mmio_handler = {
     .check_handler = vgic_distr_mmio_check,
     .read_handler  = vgic_distr_mmio_read,
     .write_handler = vgic_distr_mmio_write,
diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c
index b9d3ced..96df6b3 100644
--- a/xen/arch/arm/vuart.c
+++ b/xen/arch/arm/vuart.c
@@ -44,6 +44,8 @@
 
 #define domain_has_vuart(d) ((d)->arch.vuart.info != NULL)
 
+static struct mmio_handler vuart_mmio_handler;
+
 int domain_vuart_init(struct domain *d)
 {
     ASSERT( !d->domain_id );
@@ -59,6 +61,8 @@ int domain_vuart_init(struct domain *d)
     if ( !d->arch.vuart.buf )
         return -ENOMEM;
 
+    register_mmio_handler(d, &vuart_mmio_handler);
+
     return 0;
 }
 
@@ -133,7 +137,7 @@ static int vuart_mmio_write(struct vcpu *v, mmio_info_t 
*info)
     return 1;
 }
 
-const struct mmio_handler vuart_mmio_handler = {
+static struct mmio_handler vuart_mmio_handler = {
     .check_handler = vuart_mmio_check,
     .read_handler  = vuart_mmio_read,
     .write_handler = vuart_mmio_write,
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 50b9b54..23dac85 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -116,6 +116,7 @@ struct arch_domain
     struct hvm_domain hvm_domain;
     xen_pfn_t *grant_table_gpfn;
 
+    struct io_handler *io_handlers;
     /* Continuable domain_relinquish_resources(). */
     enum {
         RELMEM_not_started,
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.