[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/arm: io: Protect the handlers with a read-write lock



commit e6c0a480eff5a650824a65cca91d3f1646a89f3b
Author:     Julien Grall <julien.grall@xxxxxxx>
AuthorDate: Tue Jun 28 17:34:31 2016 +0100
Commit:     Stefano Stabellini <sstabellini@xxxxxxxxxx>
CommitDate: Mon Jul 11 18:49:56 2016 +0100

    xen/arm: io: Protect the handlers with a read-write lock
    
    Currently, accessing the I/O handlers does not require to take a lock
    because new handlers are always added at the end of the array. In a
    follow-up patch, this array will be sort to optimize the look up.
    
    Given that most of the time the I/O handlers will not be modify,
    using a spinlock will add contention when multiple vCPU are accessing
    the emulated MMIOs. So use a read-write lock to protected the handlers.
    
    Finally, take the opportunity to re-indent correctly domain_io_init.
    
    Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
    Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
 xen/arch/arm/io.c          | 47 +++++++++++++++++++++++++++-------------------
 xen/include/asm-arm/mmio.h |  3 ++-
 2 files changed, 30 insertions(+), 20 deletions(-)

diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index 0156755..5a96836 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -70,23 +70,39 @@ static int handle_write(const struct mmio_handler *handler, 
struct vcpu *v,
                                handler->priv);
 }
 
-int handle_mmio(mmio_info_t *info)
+static const struct mmio_handler *find_mmio_handler(struct domain *d,
+                                                    paddr_t gpa)
 {
-    struct vcpu *v = current;
-    int i;
-    const struct mmio_handler *handler = NULL;
-    const struct vmmio *vmmio = &v->domain->arch.vmmio;
+    const struct mmio_handler *handler;
+    unsigned int i;
+    struct vmmio *vmmio = &d->arch.vmmio;
+
+    read_lock(&vmmio->lock);
 
     for ( i = 0; i < vmmio->num_entries; i++ )
     {
         handler = &vmmio->handlers[i];
 
-        if ( (info->gpa >= handler->addr) &&
-             (info->gpa < (handler->addr + handler->size)) )
+        if ( (gpa >= handler->addr) &&
+             (gpa < (handler->addr + handler->size)) )
             break;
     }
 
     if ( i == vmmio->num_entries )
+        handler = NULL;
+
+    read_unlock(&vmmio->lock);
+
+    return handler;
+}
+
+int handle_mmio(mmio_info_t *info)
+{
+    struct vcpu *v = current;
+    const struct mmio_handler *handler = NULL;
+
+    handler = find_mmio_handler(v->domain, info->gpa);
+    if ( !handler )
         return 0;
 
     if ( info->dabt.write )
@@ -104,7 +120,7 @@ void register_mmio_handler(struct domain *d,
 
     BUG_ON(vmmio->num_entries >= MAX_IO_HANDLER);
 
-    spin_lock(&vmmio->lock);
+    write_lock(&vmmio->lock);
 
     handler = &vmmio->handlers[vmmio->num_entries];
 
@@ -113,24 +129,17 @@ void register_mmio_handler(struct domain *d,
     handler->size = size;
     handler->priv = priv;
 
-    /*
-     * handle_mmio is not using the lock to avoid contention.
-     * Make sure the other processors see the new handler before
-     * updating the number of entries
-     */
-    dsb(ish);
-
     vmmio->num_entries++;
 
-    spin_unlock(&vmmio->lock);
+    write_unlock(&vmmio->lock);
 }
 
 int domain_io_init(struct domain *d)
 {
-   spin_lock_init(&d->arch.vmmio.lock);
-   d->arch.vmmio.num_entries = 0;
+    rwlock_init(&d->arch.vmmio.lock);
+    d->arch.vmmio.num_entries = 0;
 
-   return 0;
+    return 0;
 }
 
 /*
diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h
index da1cc2e..32f10f2 100644
--- a/xen/include/asm-arm/mmio.h
+++ b/xen/include/asm-arm/mmio.h
@@ -20,6 +20,7 @@
 #define __ASM_ARM_MMIO_H__
 
 #include <xen/lib.h>
+#include <xen/rwlock.h>
 #include <asm/processor.h>
 #include <asm/regs.h>
 
@@ -51,7 +52,7 @@ struct mmio_handler {
 
 struct vmmio {
     int num_entries;
-    spinlock_t lock;
+    rwlock_t lock;
     struct mmio_handler handlers[MAX_IO_HANDLER];
 };
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.