[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 2/4] x86/hvm: take a reference on ioreq server emulating domain



When an ioreq server is created the code currently stores the id
of the emulating domain, but does not take a reference on that domain.

This patch modifies the code to hold a reference for the lifetime of the
ioreq server.

NOTE: ioreq servers are either destroyed explicitly or destroyed implicitly
      in context of XEN_DOMCTL_destroydomain.
      If the emulating domain is shut down prior to the target then the
      any domain reference held by an ioreq server will prevent it from
      being destroyed. However, if an emulating domain is shut down prior
      to its target then it is likely that the target's vcpus will block
      fairly quickly waiting for emulation that will never occur, and when
      the target domain is destroyed the reference on the zombie emulating
      domain will be dropped allowing both to be cleaned up.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

v3:
 - Minor code tweaks requested by Jan
 - Expanded commit comment to explain how domain references will be
   released
---
 xen/arch/x86/hvm/ioreq.c         | 31 +++++++++++++++++++------------
 xen/include/asm-x86/hvm/domain.h |  4 +---
 2 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 2b9e5562dd..154f6f1a32 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -218,7 +218,7 @@ static void hvm_unmap_ioreq_page(struct hvm_ioreq_server 
*s, bool buf)
 static int hvm_map_ioreq_page(
     struct hvm_ioreq_server *s, bool buf, unsigned long gfn)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
     struct page_info *page;
     void *va;
@@ -315,8 +315,8 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
 
     spin_lock(&s->lock);
 
-    rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, s->domid,
-                                         NULL);
+    rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id,
+                                         s->emulator->domain_id, NULL);
     if ( rc < 0 )
         goto fail2;
 
@@ -324,9 +324,10 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
 
     if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
     {
-        struct domain *d = s->domain;
+        struct domain *d = s->target;
 
-        rc = alloc_unbound_xen_event_channel(v->domain, 0, s->domid, NULL);
+        rc = alloc_unbound_xen_event_channel(v->domain, 0,
+                                             s->emulator->domain_id, NULL);
         if ( rc < 0 )
             goto fail3;
 
@@ -434,7 +435,7 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
                                         bool is_default,
                                         bool handle_bufioreq)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
     unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
     int rc;
@@ -471,7 +472,7 @@ static int hvm_ioreq_server_setup_pages(struct 
hvm_ioreq_server *s,
 static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
                                          bool is_default)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     bool handle_bufioreq = !!s->bufioreq.va;
 
     if ( handle_bufioreq )
@@ -521,7 +522,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
         if ( rc )
             goto fail;
 
-        s->range[i] = rangeset_new(s->domain, name,
+        s->range[i] = rangeset_new(s->target, name,
                                    RANGESETF_prettyprint_hex);
 
         xfree(name);
@@ -545,7 +546,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
 static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
                                     bool is_default)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     struct hvm_ioreq_vcpu *sv;
     bool handle_bufioreq = !!s->bufioreq.va;
 
@@ -576,7 +577,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server 
*s,
 static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
                                      bool is_default)
 {
-    struct domain *d = s->domain;
+    struct domain *d = s->target;
     bool handle_bufioreq = !!s->bufioreq.va;
 
     spin_lock(&s->lock);
@@ -602,12 +603,15 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server 
*s,
                                  struct domain *d, bool is_default,
                                  int bufioreq_handling, ioservid_t id)
 {
+    struct domain *currd = current->domain;
     struct vcpu *v;
     int rc;
 
     s->id = id;
-    s->domain = d;
-    s->domid = current->domain->domain_id;
+    s->target = d;
+
+    get_knownalive_domain(currd);
+    s->emulator = currd;
 
     spin_lock_init(&s->lock);
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
@@ -641,6 +645,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
  fail_map:
     hvm_ioreq_server_free_rangesets(s, is_default);
 
+    put_domain(s->emulator);
     return rc;
 }
 
@@ -651,6 +656,8 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server 
*s,
     hvm_ioreq_server_remove_all_vcpus(s);
     hvm_ioreq_server_unmap_pages(s, is_default);
     hvm_ioreq_server_free_rangesets(s, is_default);
+
+    put_domain(s->emulator);
 }
 
 static ioservid_t next_ioservid(struct domain *d)
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 7f128c05ff..6e03d024c8 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -53,13 +53,11 @@ struct hvm_ioreq_vcpu {
 
 struct hvm_ioreq_server {
     struct list_head       list_entry;
-    struct domain          *domain;
+    struct domain          *target, *emulator;
 
     /* Lock to serialize toolstack modifications */
     spinlock_t             lock;
 
-    /* Domain id of emulating domain */
-    domid_t                domid;
     ioservid_t             id;
     struct hvm_ioreq_page  ioreq;
     struct list_head       ioreq_vcpu_list;
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.