[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 08/11] xen: arch-specific hooks for domain_soft_reset()



x86-specific hook cleans up the pirq-emuirq mappings, destroys all ioreq
servers and and replaces the shared_info frame with an empty page to support
subsequent XENMAPSPACE_shared_info call.

ARM-specific hook is a noop for now.

Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
Changes since 'PATCH RFC' of the 'reset everything' approach to PVHVM guest 
kexec:
- Coding style, check get_gfn_query() return value, various minor fixes [Jan 
Beulich]
- Do not unpause VCPUs on arch hook failure [Jan Beulich]
---
 xen/arch/arm/domain.c         |  5 +++
 xen/arch/x86/domain.c         | 79 +++++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/hvm.c        |  2 +-
 xen/common/domain.c           |  5 +++
 xen/include/asm-x86/hvm/hvm.h |  2 ++
 xen/include/xen/domain.h      |  2 ++
 6 files changed, 94 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 24b8938..c112afa 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -653,6 +653,11 @@ void arch_domain_unpause(struct domain *d)
 {
 }
 
+int arch_domain_soft_reset(struct domain *d)
+{
+    return 0;
+}
+
 static int is_guest_pv32_psr(uint32_t psr)
 {
     switch (psr & PSR_MODE_MASK)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 0363650..2dd0e0d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -682,6 +682,85 @@ void arch_domain_unpause(struct domain *d)
         viridian_time_ref_count_thaw(d);
 }
 
+int arch_domain_soft_reset(struct domain *d)
+{
+    struct page_info *page = virt_to_page(d->shared_info), *new_page;
+    int ret = 0;
+    struct domain *owner;
+    unsigned long mfn, mfn_new, gfn;
+    p2m_type_t p2mt;
+    unsigned int i;
+
+    /* Soft reset is supported for HVM domains only */
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    hvm_destroy_all_ioreq_servers(d);
+
+    spin_lock(&d->event_lock);
+    for ( i = 1; i < d->nr_pirqs ; i ++ )
+        if ( domain_pirq_to_emuirq(d, i) != IRQ_UNBOUND )
+        {
+            ret = unmap_domain_pirq_emuirq(d, i);
+            if ( ret )
+                break;
+        }
+    spin_unlock(&d->event_lock);
+
+    if ( ret )
+        return ret;
+
+    /*
+     * shared_info page needs to be replaced with a new page, otherwise we
+     * will get a hole if the domain does XENMAPSPACE_shared_info
+     */
+
+    owner = page_get_owner_and_reference(page);
+    /* If shared_info page wasn't mounted, we do not need to replace it */
+    if ( owner != d )
+        goto exit_put_page;
+
+    mfn = page_to_mfn(page);
+    if ( !mfn_valid(mfn) )
+    {
+        printk(XENLOG_G_ERR "Dom%d's shared_info page points to invalid MFN\n",
+               d->domain_id);
+        ret = -EINVAL;
+        goto exit_put_page;
+    }
+
+    gfn = mfn_to_gmfn(d, mfn);
+    if ( mfn_x(get_gfn_query(d, gfn, &p2mt)) == INVALID_MFN )
+    {
+        printk(XENLOG_G_ERR "Failed to get Dom%d's shared_info GFN (%lx)\n",
+               d->domain_id, gfn);
+        ret = -EINVAL;
+        goto exit_put_page;
+    }
+
+    new_page = alloc_domheap_page(d, 0);
+    if ( !new_page )
+    {
+        printk(XENLOG_G_ERR "Failed to alloc a page to replace"
+               " Dom%d's shared_info frame %lx\n", d->domain_id, gfn);
+        ret = -ENOMEM;
+        goto exit_put_gfn;
+    }
+    mfn_new = page_to_mfn(new_page);
+    guest_physmap_remove_page(d, gfn, mfn, 0);
+
+    ret = guest_physmap_add_page(d, gfn, mfn_new, 0);
+    if ( ret )
+        printk(XENLOG_G_ERR "Failed to add a page to replace"
+               " Dom%d's shared_info frame %lx\n", d->domain_id, gfn);
+ exit_put_gfn:
+    put_gfn(d, gfn);
+ exit_put_page:
+    put_page(page);
+
+    return ret;
+}
+
 unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
 {
     unsigned long hv_cr4_mask, hv_cr4 = real_cr4_to_pv_guest_cr4(read_cr4());
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d5e5242..506a7be 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1319,7 +1319,7 @@ static void hvm_all_ioreq_servers_remove_vcpu(struct 
domain *d, struct vcpu *v)
     spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
-static void hvm_destroy_all_ioreq_servers(struct domain *d)
+void hvm_destroy_all_ioreq_servers(struct domain *d)
 {
     struct hvm_ioreq_server *s, *next;
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index ade80ff..cd5ed42 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1013,6 +1013,7 @@ int domain_unpause_by_systemcontroller(struct domain *d)
 int domain_soft_reset(struct domain *d)
 {
     struct vcpu *v;
+    int ret;
 
     for_each_vcpu ( d, v )
         if ( !v->paused_for_shutdown )
@@ -1025,6 +1026,10 @@ int domain_soft_reset(struct domain *d)
     for_each_vcpu ( d, v )
         unmap_vcpu_info(v);
 
+    ret = arch_domain_soft_reset(d);
+    if (ret)
+        return ret;
+
     domain_resume(d);
 
     return 0;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 77eeac5..ffdc379 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -234,6 +234,8 @@ bool_t hvm_send_assist_req(struct hvm_ioreq_server *s, 
ioreq_t *p);
 void hvm_broadcast_assist_req(ioreq_t *p);
 void hvm_complete_assist_req(ioreq_t *p);
 
+void hvm_destroy_all_ioreq_servers(struct domain *d);
+
 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
 int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
 
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 848db8a..a469fe0 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -65,6 +65,8 @@ void arch_domain_shutdown(struct domain *d);
 void arch_domain_pause(struct domain *d);
 void arch_domain_unpause(struct domain *d);
 
+int arch_domain_soft_reset(struct domain *d);
+
 int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u);
 void arch_get_info_guest(struct vcpu *, vcpu_guest_context_u);
 
-- 
2.4.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.