|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] ioreq-server: on-demand creation of ioreq server
commit a0731ccab237495919e1bde33fede65b94ec201a
Author: Paul Durrant <paul.durrant@xxxxxxxxxx>
AuthorDate: Mon May 12 12:03:19 2014 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon May 12 12:03:19 2014 +0200
ioreq-server: on-demand creation of ioreq server
This patch only creates the ioreq server when the legacy HVM parameters
are read (by an emulator).
A lock is introduced to protect access to the ioreq server by multiple
emulator/tool invocations should such an eventuality arise. The guest is
protected by creation of the ioreq server only being done whilst the
domain is paused.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 226 ++++++++++++++++++++++++++++----------
xen/include/asm-x86/hvm/domain.h | 1 +
2 files changed, 168 insertions(+), 59 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ca7594d..f2bd84b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -652,13 +652,66 @@ static void hvm_ioreq_server_remove_vcpu(struct
hvm_ioreq_server *s,
spin_unlock(&s->lock);
}
-static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
+static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
{
- struct hvm_ioreq_server *s;
+ struct hvm_ioreq_vcpu *sv, *next;
- s = xzalloc(struct hvm_ioreq_server);
- if ( !s )
- return -ENOMEM;
+ spin_lock(&s->lock);
+
+ list_for_each_entry_safe ( sv,
+ next,
+ &s->ioreq_vcpu_list,
+ list_entry )
+ {
+ struct vcpu *v = sv->vcpu;
+
+ list_del(&sv->list_entry);
+
+ if ( v->vcpu_id == 0 )
+ free_xen_event_channel(v, s->bufioreq_evtchn);
+
+ free_xen_event_channel(v, sv->ioreq_evtchn);
+
+ xfree(sv);
+ }
+
+ spin_unlock(&s->lock);
+}
+
+static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s)
+{
+ struct domain *d = s->domain;
+ unsigned long pfn;
+ int rc;
+
+ pfn = d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN];
+ rc = hvm_map_ioreq_page(s, 0, pfn);
+ if ( rc )
+ return rc;
+
+ pfn = d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN];
+ rc = hvm_map_ioreq_page(s, 1, pfn);
+ if ( rc )
+ goto fail;
+
+ return 0;
+
+fail:
+ hvm_unmap_ioreq_page(s, 0);
+ return rc;
+}
+
+static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
+{
+ hvm_unmap_ioreq_page(s, 1);
+ hvm_unmap_ioreq_page(s, 0);
+}
+
+static int hvm_ioreq_server_init(struct hvm_ioreq_server *s, struct domain *d,
+ domid_t domid)
+{
+ struct vcpu *v;
+ int rc;
s->domain = d;
s->domid = domid;
@@ -667,59 +720,89 @@ static int hvm_create_ioreq_server(struct domain *d,
domid_t domid)
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
spin_lock_init(&s->bufioreq_lock);
- /*
- * The domain needs to wait until HVM_PARAM_IOREQ_PFN and
- * HVM_PARAM_BUFIOREQ_PFN are both set.
- */
- domain_pause(d);
- domain_pause(d);
+ rc = hvm_ioreq_server_map_pages(s);
+ if ( rc )
+ return rc;
+
+ for_each_vcpu ( d, v )
+ {
+ rc = hvm_ioreq_server_add_vcpu(s, v);
+ if ( rc )
+ goto fail;
+ }
- d->arch.hvm_domain.ioreq_server = s;
return 0;
-}
-static void hvm_destroy_ioreq_server(struct domain *d)
-{
- struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+ fail:
+ hvm_ioreq_server_remove_all_vcpus(s);
+ hvm_ioreq_server_unmap_pages(s);
- hvm_unmap_ioreq_page(s, 1);
- hvm_unmap_ioreq_page(s, 0);
+ return rc;
+}
- xfree(s);
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
+{
+ hvm_ioreq_server_remove_all_vcpus(s);
+ hvm_ioreq_server_unmap_pages(s);
}
-static int hvm_set_ioreq_pfn(struct domain *d, bool_t buf,
- unsigned long pfn)
+static int hvm_create_ioreq_server(struct domain *d, domid_t domid)
{
- struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+ struct hvm_ioreq_server *s;
int rc;
- spin_lock(&s->lock);
+ rc = -ENOMEM;
+ s = xzalloc(struct hvm_ioreq_server);
+ if ( !s )
+ goto fail1;
- rc = hvm_map_ioreq_page(s, buf, pfn);
- if ( rc )
- goto fail;
+ domain_pause(d);
+ spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
- if ( !buf )
- {
- struct hvm_ioreq_vcpu *sv;
+ rc = -EEXIST;
+ if ( d->arch.hvm_domain.ioreq_server != NULL )
+ goto fail2;
- list_for_each_entry ( sv,
- &s->ioreq_vcpu_list,
- list_entry )
- hvm_update_ioreq_evtchn(s, sv);
- }
+ rc = hvm_ioreq_server_init(s, d, domid);
+ if ( rc )
+ goto fail2;
- spin_unlock(&s->lock);
- domain_unpause(d); /* domain_pause() in hvm_create_ioreq_server() */
+ d->arch.hvm_domain.ioreq_server = s;
+
+ spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+ domain_unpause(d);
return 0;
- fail:
- spin_unlock(&s->lock);
+ fail2:
+ spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+ domain_unpause(d);
+
+ xfree(s);
+ fail1:
return rc;
}
+static void hvm_destroy_ioreq_server(struct domain *d)
+{
+ struct hvm_ioreq_server *s;
+
+ domain_pause(d);
+ spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+ s = d->arch.hvm_domain.ioreq_server;
+ if ( s )
+ {
+ d->arch.hvm_domain.ioreq_server = NULL;
+ hvm_ioreq_server_deinit(s);
+ }
+
+ spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+ domain_unpause(d);
+
+ xfree(s);
+}
+
static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
evtchn_port_t *p_port)
{
@@ -737,9 +820,20 @@ static int hvm_replace_event_channel(struct vcpu *v,
domid_t remote_domid,
static int hvm_set_dm_domain(struct domain *d, domid_t domid)
{
- struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+ struct hvm_ioreq_server *s;
int rc = 0;
+ spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+ /*
+ * Lack of ioreq server is not a failure. HVM_PARAM_DM_DOMAIN will
+ * still be set and thus, when the server is created, it will have
+ * the correct domid.
+ */
+ s = d->arch.hvm_domain.ioreq_server;
+ if ( !s )
+ goto done;
+
domain_pause(d);
spin_lock(&s->lock);
@@ -776,12 +870,13 @@ static int hvm_set_dm_domain(struct domain *d, domid_t
domid)
spin_unlock(&s->lock);
domain_unpause(d);
+ done:
+ spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
return rc;
}
int hvm_domain_initialise(struct domain *d)
{
- domid_t domid;
int rc;
if ( !hvm_enabled )
@@ -807,6 +902,7 @@ int hvm_domain_initialise(struct domain *d)
}
+ spin_lock_init(&d->arch.hvm_domain.ioreq_server_lock);
spin_lock_init(&d->arch.hvm_domain.irq_lock);
spin_lock_init(&d->arch.hvm_domain.uc_lock);
@@ -847,21 +943,14 @@ int hvm_domain_initialise(struct domain *d)
rtc_init(d);
- domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
- rc = hvm_create_ioreq_server(d, domid);
- if ( rc != 0 )
- goto fail2;
-
register_portio_handler(d, 0xe9, 1, hvm_print_line);
rc = hvm_funcs.domain_initialise(d);
if ( rc != 0 )
- goto fail3;
+ goto fail2;
return 0;
- fail3:
- hvm_destroy_ioreq_server(d);
fail2:
rtc_deinit(d);
stdvga_deinit(d);
@@ -1518,7 +1607,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
{
int rc;
struct domain *d = v->domain;
- struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+ struct hvm_ioreq_server *s;
hvm_asid_flush_vcpu(v);
@@ -1561,7 +1650,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
&& (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown:
nestedhvm_vcpu_destroy */
goto fail5;
- rc = hvm_ioreq_server_add_vcpu(s, v);
+ spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+ s = d->arch.hvm_domain.ioreq_server;
+ if ( s )
+ rc = hvm_ioreq_server_add_vcpu(s, v);
+
+ spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
+
if ( rc != 0 )
goto fail6;
@@ -1598,9 +1694,15 @@ int hvm_vcpu_initialise(struct vcpu *v)
void hvm_vcpu_destroy(struct vcpu *v)
{
struct domain *d = v->domain;
- struct hvm_ioreq_server *s = d->arch.hvm_domain.ioreq_server;
+ struct hvm_ioreq_server *s;
+
+ spin_lock(&d->arch.hvm_domain.ioreq_server_lock);
+
+ s = d->arch.hvm_domain.ioreq_server;
+ if ( s )
+ hvm_ioreq_server_remove_vcpu(s, v);
- hvm_ioreq_server_remove_vcpu(s, v);
+ spin_unlock(&d->arch.hvm_domain.ioreq_server_lock);
nestedhvm_vcpu_destroy(v);
@@ -4421,12 +4523,6 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
switch ( a.index )
{
- case HVM_PARAM_IOREQ_PFN:
- rc = hvm_set_ioreq_pfn(d, 0, a.value);
- break;
- case HVM_PARAM_BUFIOREQ_PFN:
- rc = hvm_set_ioreq_pfn(d, 1, a.value);
- break;
case HVM_PARAM_CALLBACK_IRQ:
hvm_set_callback_via(d, a.value);
hvm_latch_shinfo_size(d);
@@ -4472,7 +4568,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
domctl_lock_release();
break;
case HVM_PARAM_DM_DOMAIN:
- /* Not reflexive, as we must domain_pause(). */
+ /* Not reflexive, as we may need to domain_pause(). */
rc = -EPERM;
if ( curr_d == d )
break;
@@ -4578,6 +4674,18 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
case HVM_PARAM_ACPI_S_STATE:
a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
break;
+ case HVM_PARAM_IOREQ_PFN:
+ case HVM_PARAM_BUFIOREQ_PFN:
+ case HVM_PARAM_BUFIOREQ_EVTCHN: {
+ domid_t domid;
+
+ /* May need to create server */
+ domid = d->arch.hvm_domain.params[HVM_PARAM_DM_DOMAIN];
+ rc = hvm_create_ioreq_server(d, domid);
+ if ( rc != 0 && rc != -EEXIST )
+ goto param_fail;
+ /*FALLTHRU*/
+ }
default:
a.value = d->arch.hvm_domain.params[a.index];
break;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 92dc5fb..1b0514c 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -64,6 +64,7 @@ struct hvm_ioreq_server {
};
struct hvm_domain {
+ spinlock_t ioreq_server_lock;
struct hvm_ioreq_server *ioreq_server;
struct pl_time pl_time;
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |