[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v18 01/11] x86/hvm/ioreq: maintain an array of ioreq servers rather than a list



A subsequent patch will remove the current implicit limitation on creation
of ioreq servers which is due to the allocation of gfns for the ioreq
structures and buffered ioreq ring.

It will therefore be necessary to introduce an explicit limit and, since
this limit should be small, it simplifies the code to maintain an array of
that size rather than using a list.

Also, by reserving an array slot for the default server and populating
array slots early in create, the need to pass an 'is_default' boolean
to sub-functions can be avoided.

Some function return values are changed by this patch: Specifically, in
the case where the id of the default ioreq server is passed in, -EOPNOTSUPP
is now returned rather than -ENOENT.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

v18:
 - non-trivial re-base.
 - small modification to FOR_EACH... macro to iterate backwards, to main-
   tain a previous undocumented but useful semantic that secondary
   emulators are selected in favour of qemu.
 - dropped R-b's because of change.

v10:
 - modified FOR_EACH... macro as suggested by Jan.
 - check for NULL in IS_DEFAULT macro as suggested by Jan.

v9:
 - modified FOR_EACH... macro as requested by Andrew.

v8:
 - Addressed various comments from Jan.

v7:
 - Fixed assertion failure found in testing.

v6:
 - Updated according to comments made by Roger on v4 that I'd missed.

v5:
 - Switched GET/SET_IOREQ_SERVER() macros to get/set_ioreq_server()
   functions to avoid possible double-evaluation issues.

v4:
 - Introduced more helper macros and relocated them to the top of the
   code.

v3:
 - New patch (replacing "move is_default into struct hvm_ioreq_server") in
   response to review comments.
---
 xen/arch/x86/hvm/ioreq.c         | 539 +++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/domain.h |  11 +-
 2 files changed, 265 insertions(+), 285 deletions(-)

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 44d029499d..d8d4e96a80 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -33,6 +33,37 @@
 
 #include <public/hvm/ioreq.h>
 
+static void set_ioreq_server(struct domain *d, unsigned int id,
+                             struct hvm_ioreq_server *s)
+{
+    ASSERT(id < MAX_NR_IOREQ_SERVERS);
+    ASSERT(!s || !d->arch.hvm_domain.ioreq_server.server[id]);
+
+    d->arch.hvm_domain.ioreq_server.server[id] = s;
+}
+
+#define GET_IOREQ_SERVER(d, id) \
+    (d)->arch.hvm_domain.ioreq_server.server[id]
+
+static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d,
+                                                 unsigned int id)
+{
+    if ( id >= MAX_NR_IOREQ_SERVERS )
+        return NULL;
+
+    return GET_IOREQ_SERVER(d, id);
+}
+
+#define IS_DEFAULT(s) \
+    ((s) && (s) == GET_IOREQ_SERVER((s)->target, DEFAULT_IOSERVID))
+
+/* Iterate over all possible ioreq servers */
+#define FOR_EACH_IOREQ_SERVER(d, id, s) \
+    for ( (id) = MAX_NR_IOREQ_SERVERS; (id) != 0; ) \
+        if ( !(s = GET_IOREQ_SERVER(d, --(id))) ) \
+            continue; \
+        else
+
 static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
 {
     shared_iopage_t *p = s->ioreq.va;
@@ -47,10 +78,9 @@ bool hvm_io_pending(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct hvm_ioreq_server *s;
+    unsigned int id;
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    FOR_EACH_IOREQ_SERVER(d, id, s)
     {
         struct hvm_ioreq_vcpu *sv;
 
@@ -127,10 +157,9 @@ bool handle_hvm_io_completion(struct vcpu *v)
     struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
     struct hvm_ioreq_server *s;
     enum hvm_io_completion io_completion;
+    unsigned int id;
 
-      list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    FOR_EACH_IOREQ_SERVER(d, id, s)
     {
         struct hvm_ioreq_vcpu *sv;
 
@@ -243,13 +272,12 @@ static int hvm_map_ioreq_page(
 bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
 {
     const struct hvm_ioreq_server *s;
+    unsigned int id;
     bool found = false;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    FOR_EACH_IOREQ_SERVER(d, id, s)
     {
         if ( (s->ioreq.va && s->ioreq.page == page) ||
              (s->bufioreq.va && s->bufioreq.page == page) )
@@ -302,7 +330,7 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server 
*s,
 }
 
 static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
-                                     bool is_default, struct vcpu *v)
+                                     struct vcpu *v)
 {
     struct hvm_ioreq_vcpu *sv;
     int rc;
@@ -316,7 +344,8 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
     spin_lock(&s->lock);
 
     rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id,
-                                         s->emulator->domain_id, NULL);
+                                         s->emulator->domain_id,
+                                         NULL);
     if ( rc < 0 )
         goto fail2;
 
@@ -332,7 +361,7 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
             goto fail3;
 
         s->bufioreq_evtchn = rc;
-        if ( is_default )
+        if ( IS_DEFAULT(s) )
             d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
                 s->bufioreq_evtchn;
     }
@@ -432,7 +461,6 @@ static int hvm_ioreq_server_map_pages(struct 
hvm_ioreq_server *s,
 }
 
 static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
-                                        bool is_default,
                                         bool handle_bufioreq)
 {
     struct domain *d = s->target;
@@ -440,7 +468,7 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
     unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
     int rc;
 
-    if ( is_default )
+    if ( IS_DEFAULT(s) )
     {
         /*
          * The default ioreq server must handle buffered ioreqs, for
@@ -469,8 +497,7 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
     return rc;
 }
 
-static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
-                                         bool is_default)
+static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->target;
     bool handle_bufioreq = !!s->bufioreq.va;
@@ -480,7 +507,7 @@ static void hvm_ioreq_server_unmap_pages(struct 
hvm_ioreq_server *s,
 
     hvm_unmap_ioreq_page(s, false);
 
-    if ( !is_default )
+    if ( !IS_DEFAULT(s) )
     {
         if ( handle_bufioreq )
             hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
@@ -489,12 +516,11 @@ static void hvm_ioreq_server_unmap_pages(struct 
hvm_ioreq_server *s,
     }
 }
 
-static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
-                                            bool is_default)
+static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
 {
     unsigned int i;
 
-    if ( is_default )
+    if ( IS_DEFAULT(s) )
         return;
 
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
@@ -502,19 +528,21 @@ static void hvm_ioreq_server_free_rangesets(struct 
hvm_ioreq_server *s,
 }
 
 static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
-                                            bool is_default)
+                                            ioservid_t id)
 {
     unsigned int i;
     int rc;
 
-    if ( is_default )
+    if ( id == DEFAULT_IOSERVID )
         goto done;
 
+    ASSERT(!IS_DEFAULT(s));
+
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
     {
         char *name;
 
-        rc = asprintf(&name, "ioreq_server %d %s", s->id,
+        rc = asprintf(&name, "ioreq_server %d %s", id,
                       (i == XEN_DMOP_IO_RANGE_PORT) ? "port" :
                       (i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" :
                       (i == XEN_DMOP_IO_RANGE_PCI) ? "pci" :
@@ -538,13 +566,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
     return 0;
 
  fail:
-    hvm_ioreq_server_free_rangesets(s, false);
+    hvm_ioreq_server_free_rangesets(s);
 
     return rc;
 }
 
-static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
-                                    bool is_default)
+static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->target;
     struct hvm_ioreq_vcpu *sv;
@@ -555,7 +582,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server 
*s,
     if ( s->enabled )
         goto done;
 
-    if ( !is_default )
+    if ( !IS_DEFAULT(s) )
     {
         hvm_remove_ioreq_gfn(d, &s->ioreq);
 
@@ -574,8 +601,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server 
*s,
     spin_unlock(&s->lock);
 }
 
-static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
-                                     bool is_default)
+static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->target;
     bool handle_bufioreq = !!s->bufioreq.va;
@@ -585,7 +611,7 @@ static void hvm_ioreq_server_disable(struct 
hvm_ioreq_server *s,
     if ( !s->enabled )
         goto done;
 
-    if ( !is_default )
+    if ( !IS_DEFAULT(s) )
     {
         if ( handle_bufioreq )
             hvm_add_ioreq_gfn(d, &s->bufioreq);
@@ -600,14 +626,13 @@ static void hvm_ioreq_server_disable(struct 
hvm_ioreq_server *s,
 }
 
 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
-                                 struct domain *d, bool is_default,
-                                 int bufioreq_handling, ioservid_t id)
+                                 struct domain *d, int bufioreq_handling,
+                                 ioservid_t id)
 {
     struct domain *currd = current->domain;
     struct vcpu *v;
     int rc;
 
-    s->id = id;
     s->target = d;
 
     get_knownalive_domain(currd);
@@ -617,7 +642,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
-    rc = hvm_ioreq_server_alloc_rangesets(s, is_default);
+    rc = hvm_ioreq_server_alloc_rangesets(s, id);
     if ( rc )
         return rc;
 
@@ -625,13 +650,13 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
*s,
         s->bufioreq_atomic = true;
 
     rc = hvm_ioreq_server_setup_pages(
-             s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
+             s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
     if ( rc )
         goto fail_map;
 
     for_each_vcpu ( d, v )
     {
-        rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+        rc = hvm_ioreq_server_add_vcpu(s, v);
         if ( rc )
             goto fail_add;
     }
@@ -640,102 +665,87 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
*s,
 
  fail_add:
     hvm_ioreq_server_remove_all_vcpus(s);
-    hvm_ioreq_server_unmap_pages(s, is_default);
+    hvm_ioreq_server_unmap_pages(s);
 
  fail_map:
-    hvm_ioreq_server_free_rangesets(s, is_default);
+    hvm_ioreq_server_free_rangesets(s);
 
     put_domain(s->emulator);
     return rc;
 }
 
-static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
-                                    bool is_default)
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
 {
     ASSERT(!s->enabled);
     hvm_ioreq_server_remove_all_vcpus(s);
-    hvm_ioreq_server_unmap_pages(s, is_default);
-    hvm_ioreq_server_free_rangesets(s, is_default);
+    hvm_ioreq_server_unmap_pages(s);
+    hvm_ioreq_server_free_rangesets(s);
 
     put_domain(s->emulator);
 }
 
-static ioservid_t next_ioservid(struct domain *d)
-{
-    struct hvm_ioreq_server *s;
-    ioservid_t id;
-
-    ASSERT(spin_is_locked(&d->arch.hvm_domain.ioreq_server.lock));
-
-    id = d->arch.hvm_domain.ioreq_server.id;
-
- again:
-    id++;
-
-    /* Check for uniqueness */
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( id == s->id )
-            goto again;
-    }
-
-    d->arch.hvm_domain.ioreq_server.id = id;
-
-    return id;
-}
-
 int hvm_create_ioreq_server(struct domain *d, bool is_default,
                             int bufioreq_handling, ioservid_t *id)
 {
     struct hvm_ioreq_server *s;
+    unsigned int i;
     int rc;
 
     if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
         return -EINVAL;
 
-    rc = -ENOMEM;
     s = xzalloc(struct hvm_ioreq_server);
     if ( !s )
-        goto fail1;
+        return -ENOMEM;
 
     domain_pause(d);
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -EEXIST;
-    if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
-        goto fail2;
-
-    rc = hvm_ioreq_server_init(s, d, is_default, bufioreq_handling,
-                               next_ioservid(d));
-    if ( rc )
-        goto fail3;
-
-    list_add(&s->list_entry,
-             &d->arch.hvm_domain.ioreq_server.list);
-
     if ( is_default )
     {
-        d->arch.hvm_domain.default_ioreq_server = s;
-        hvm_ioreq_server_enable(s, true);
+        i = DEFAULT_IOSERVID;
+
+        rc = -EEXIST;
+        if ( GET_IOREQ_SERVER(d, i) )
+            goto fail;
     }
+    else
+    {
+        for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
+        {
+            if ( i != DEFAULT_IOSERVID && !GET_IOREQ_SERVER(d, i) )
+                break;
+        }
+
+        rc = -ENOSPC;
+        if ( i >= MAX_NR_IOREQ_SERVERS )
+            goto fail;
+    }
+
+    set_ioreq_server(d, i, s);
+
+    rc = hvm_ioreq_server_init(s, d, bufioreq_handling, i);
+    if ( rc )
+        goto fail;
+
+    if ( i == DEFAULT_IOSERVID )
+        hvm_ioreq_server_enable(s);
 
     if ( id )
-        *id = s->id;
+        *id = i;
 
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
- fail3:
- fail2:
+ fail:
+    set_ioreq_server(d, i, NULL);
+
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
- fail1:
     return rc;
 }
 
@@ -744,41 +754,38 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    if ( id == DEFAULT_IOSERVID )
+        return -EPERM;
 
-    rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-        if ( s->id != id )
-            continue;
+    s = get_ioreq_server(d, id);
 
-        rc = -EPERM;
-        if ( s->emulator != current->domain )
-            break;
+    rc = -ENOENT;
+    if ( !s )
+        goto out;
 
-        domain_pause(d);
+    ASSERT(!IS_DEFAULT(s));
 
-        p2m_set_ioreq_server(d, 0, s);
+    rc = -EPERM;
+    if ( s->emulator != current->domain )
+        goto out;
 
-        hvm_ioreq_server_disable(s, false);
+    domain_pause(d);
 
-        list_del(&s->list_entry);
+    p2m_set_ioreq_server(d, 0, s);
 
-        hvm_ioreq_server_deinit(s, false);
+    hvm_ioreq_server_disable(s);
+    hvm_ioreq_server_deinit(s);
 
-        domain_unpause(d);
+    domain_unpause(d);
 
-        xfree(s);
+    set_ioreq_server(d, id, NULL);
+    xfree(s);
 
-        rc = 0;
-        break;
-    }
+    rc = 0;
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
@@ -792,35 +799,34 @@ int hvm_get_ioreq_server_info(struct domain *d, 
ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( id == DEFAULT_IOSERVID )
+        return -EOPNOTSUPP;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    s = get_ioreq_server(d, id);
 
-        if ( s->id != id )
-            continue;
+    rc = -ENOENT;
+    if ( !s )
+        goto out;
 
-        rc = -EPERM;
-        if ( s->emulator != current->domain )
-            break;
+    ASSERT(!IS_DEFAULT(s));
 
-        *ioreq_gfn = s->ioreq.gfn;
+    rc = -EPERM;
+    if ( s->emulator != current->domain )
+        goto out;
 
-        if ( s->bufioreq.va != NULL )
-        {
-            *bufioreq_gfn = s->bufioreq.gfn;
-            *bufioreq_port = s->bufioreq_evtchn;
-        }
+    *ioreq_gfn = s->ioreq.gfn;
 
-        rc = 0;
-        break;
+    if ( s->bufioreq.va != NULL )
+    {
+        *bufioreq_gfn = s->bufioreq.gfn;
+        *bufioreq_port = s->bufioreq_evtchn;
     }
 
+    rc = 0;
+
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
@@ -831,55 +837,53 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
ioservid_t id,
                                      uint64_t end)
 {
     struct hvm_ioreq_server *s;
+    struct rangeset *r;
     int rc;
 
     if ( start > end )
         return -EINVAL;
 
+    if ( id == DEFAULT_IOSERVID )
+        return -EOPNOTSUPP;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        struct rangeset *r;
+    s = get_ioreq_server(d, id);
 
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    rc = -ENOENT;
+    if ( !s )
+        goto out;
 
-        if ( s->id != id )
-            continue;
+    ASSERT(!IS_DEFAULT(s));
 
-        rc = -EPERM;
-        if ( s->emulator != current->domain )
-            break;
+    rc = -EPERM;
+    if ( s->emulator != current->domain )
+        goto out;
 
-        switch ( type )
-        {
-        case XEN_DMOP_IO_RANGE_PORT:
-        case XEN_DMOP_IO_RANGE_MEMORY:
-        case XEN_DMOP_IO_RANGE_PCI:
-            r = s->range[type];
-            break;
+    switch ( type )
+    {
+    case XEN_DMOP_IO_RANGE_PORT:
+    case XEN_DMOP_IO_RANGE_MEMORY:
+    case XEN_DMOP_IO_RANGE_PCI:
+        r = s->range[type];
+        break;
 
-        default:
-            r = NULL;
-            break;
-        }
+    default:
+        r = NULL;
+        break;
+    }
 
-        rc = -EINVAL;
-        if ( !r )
-            break;
+    rc = -EINVAL;
+    if ( !r )
+        goto out;
 
-        rc = -EEXIST;
-        if ( rangeset_overlaps_range(r, start, end) )
-            break;
+    rc = -EEXIST;
+    if ( rangeset_overlaps_range(r, start, end) )
+        goto out;
 
-        rc = rangeset_add_range(r, start, end);
-        break;
-    }
+    rc = rangeset_add_range(r, start, end);
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
@@ -890,55 +894,53 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
*d, ioservid_t id,
                                          uint64_t end)
 {
     struct hvm_ioreq_server *s;
+    struct rangeset *r;
     int rc;
 
     if ( start > end )
         return -EINVAL;
 
+    if ( id == DEFAULT_IOSERVID )
+        return -EOPNOTSUPP;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
+    s = get_ioreq_server(d, id);
+
     rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        struct rangeset *r;
+    if ( !s )
+        goto out;
 
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    ASSERT(!IS_DEFAULT(s));
 
-        if ( s->id != id )
-            continue;
+    rc = -EPERM;
+    if ( s->emulator != current->domain )
+        goto out;
 
-        rc = -EPERM;
-        if ( s->emulator != current->domain )
-            break;
+    switch ( type )
+    {
+    case XEN_DMOP_IO_RANGE_PORT:
+    case XEN_DMOP_IO_RANGE_MEMORY:
+    case XEN_DMOP_IO_RANGE_PCI:
+        r = s->range[type];
+        break;
 
-        switch ( type )
-        {
-        case XEN_DMOP_IO_RANGE_PORT:
-        case XEN_DMOP_IO_RANGE_MEMORY:
-        case XEN_DMOP_IO_RANGE_PCI:
-            r = s->range[type];
-            break;
+    default:
+        r = NULL;
+        break;
+    }
 
-        default:
-            r = NULL;
-            break;
-        }
+    rc = -EINVAL;
+    if ( !r )
+        goto out;
 
-        rc = -EINVAL;
-        if ( !r )
-            break;
+    rc = -ENOENT;
+    if ( !rangeset_contains_range(r, start, end) )
+        goto out;
 
-        rc = -ENOENT;
-        if ( !rangeset_contains_range(r, start, end) )
-            break;
-
-        rc = rangeset_remove_range(r, start, end);
-        break;
-    }
+    rc = rangeset_remove_range(r, start, end);
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
@@ -958,6 +960,9 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
+    if ( id == DEFAULT_IOSERVID )
+        return -EOPNOTSUPP;
+
     if ( type != HVMMEM_ioreq_server )
         return -EINVAL;
 
@@ -966,25 +971,21 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
+    s = get_ioreq_server(d, id);
+
     rc = -ENOENT;
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
-    {
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    if ( !s )
+        goto out;
 
-        if ( s->id != id )
-            continue;
+    ASSERT(!IS_DEFAULT(s));
 
-        rc = -EPERM;
-        if ( s->emulator != current->domain )
-            break;
+    rc = -EPERM;
+    if ( s->emulator != current->domain )
+        goto out;
 
-        rc = p2m_set_ioreq_server(d, flags, s);
-        break;
-    }
+    rc = p2m_set_ioreq_server(d, flags, s);
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     if ( rc == 0 && flags == 0 )
@@ -1001,42 +1002,38 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
                                bool enabled)
 {
-    struct list_head *entry;
+    struct hvm_ioreq_server *s;
     int rc;
 
+    if ( id == DEFAULT_IOSERVID )
+        return -EOPNOTSUPP;
+
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    rc = -ENOENT;
-    list_for_each ( entry,
-                    &d->arch.hvm_domain.ioreq_server.list )
-    {
-        struct hvm_ioreq_server *s = list_entry(entry,
-                                                struct hvm_ioreq_server,
-                                                list_entry);
+    s = get_ioreq_server(d, id);
 
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
+    rc = -ENOENT;
+    if ( !s )
+        goto out;
 
-        if ( s->id != id )
-            continue;
+    ASSERT(!IS_DEFAULT(s));
 
-        rc = -EPERM;
-        if ( s->emulator != current->domain )
-            break;
+    rc = -EPERM;
+    if ( s->emulator != current->domain )
+        goto out;
 
-        domain_pause(d);
+    domain_pause(d);
 
-        if ( enabled )
-            hvm_ioreq_server_enable(s, false);
-        else
-            hvm_ioreq_server_disable(s, false);
+    if ( enabled )
+        hvm_ioreq_server_enable(s);
+    else
+        hvm_ioreq_server_disable(s);
 
-        domain_unpause(d);
+    domain_unpause(d);
 
-        rc = 0;
-        break;
-    }
+    rc = 0;
 
+ out:
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
@@ -1044,17 +1041,14 @@ int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
 int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
 {
     struct hvm_ioreq_server *s;
+    unsigned int id;
     int rc;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    FOR_EACH_IOREQ_SERVER(d, id, s)
     {
-        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
-
-        rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+        rc = hvm_ioreq_server_add_vcpu(s, v);
         if ( rc )
             goto fail;
     }
@@ -1064,10 +1058,15 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
     return 0;
 
  fail:
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    while ( id-- != 0 )
+    {
+        s = GET_IOREQ_SERVER(d, id);
+
+        if ( !s )
+            continue;
+
         hvm_ioreq_server_remove_vcpu(s, v);
+    }
 
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
@@ -1077,12 +1076,11 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
 void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
 {
     struct hvm_ioreq_server *s;
+    unsigned int id;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    FOR_EACH_IOREQ_SERVER(d, id, s)
         hvm_ioreq_server_remove_vcpu(s, v);
 
     spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
@@ -1090,28 +1088,19 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain 
*d, struct vcpu *v)
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
 {
-    struct hvm_ioreq_server *s, *next;
+    struct hvm_ioreq_server *s;
+    unsigned int id;
 
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
-    list_for_each_entry_safe ( s,
-                               next,
-                               &d->arch.hvm_domain.ioreq_server.list,
-                               list_entry )
+    FOR_EACH_IOREQ_SERVER(d, id, s)
     {
-        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
-
-        hvm_ioreq_server_disable(s, is_default);
-
-        if ( is_default )
-            d->arch.hvm_domain.default_ioreq_server = NULL;
-
-        list_del(&s->list_entry);
-
-        hvm_ioreq_server_deinit(s, is_default);
+        hvm_ioreq_server_disable(s);
+        hvm_ioreq_server_deinit(s);
 
+        set_ioreq_server(d, id, NULL);
         xfree(s);
     }
 
@@ -1125,12 +1114,10 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
     uint32_t cf8;
     uint8_t type;
     uint64_t addr;
-
-    if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
-        return NULL;
+    unsigned int id;
 
     if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
-        return d->arch.hvm_domain.default_ioreq_server;
+        return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
 
     cf8 = d->arch.hvm_domain.pci_cf8;
 
@@ -1168,16 +1155,11 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
         addr = p->addr;
     }
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    FOR_EACH_IOREQ_SERVER(d, id, s)
     {
         struct rangeset *r;
 
-        if ( s == d->arch.hvm_domain.default_ioreq_server )
-            continue;
-
-        if ( !s->enabled )
+        if ( IS_DEFAULT(s) )
             continue;
 
         r = s->range[type];
@@ -1210,7 +1192,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
         }
     }
 
-    return d->arch.hvm_domain.default_ioreq_server;
+    return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
 }
 
 static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
@@ -1369,13 +1351,13 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool 
buffered)
 {
     struct domain *d = current->domain;
     struct hvm_ioreq_server *s;
-    unsigned int failed = 0;
+    unsigned int id, failed = 0;
 
-    list_for_each_entry ( s,
-                          &d->arch.hvm_domain.ioreq_server.list,
-                          list_entry )
+    FOR_EACH_IOREQ_SERVER(d, id, s)
+    {
         if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
             failed++;
+    }
 
     return failed;
 }
@@ -1395,7 +1377,6 @@ static int hvm_access_cf8(
 void hvm_ioreq_init(struct domain *d)
 {
     spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
-    INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
 
     register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
 }
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 6e03d024c8..2e4d85f6fe 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -52,13 +52,11 @@ struct hvm_ioreq_vcpu {
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
-    struct list_head       list_entry;
     struct domain          *target, *emulator;
 
     /* Lock to serialize toolstack modifications */
     spinlock_t             lock;
 
-    ioservid_t             id;
     struct hvm_ioreq_page  ioreq;
     struct list_head       ioreq_vcpu_list;
     struct hvm_ioreq_page  bufioreq;
@@ -98,6 +96,9 @@ struct hvm_pi_ops {
     void (*do_resume)(struct vcpu *v);
 };
 
+#define MAX_NR_IOREQ_SERVERS 8
+#define DEFAULT_IOSERVID 0
+
 struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
     struct {
@@ -107,11 +108,9 @@ struct hvm_domain {
 
     /* Lock protects all other values in the sub-struct and the default */
     struct {
-        spinlock_t       lock;
-        ioservid_t       id;
-        struct list_head list;
+        spinlock_t              lock;
+        struct hvm_ioreq_server *server[MAX_NR_IOREQ_SERVERS];
     } ioreq_server;
-    struct hvm_ioreq_server *default_ioreq_server;
 
     /* Cached CF8 for guest PCI config cycles */
     uint32_t                pci_cf8;
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.