[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen/vcpu: Sanitise VCPUOP_initialise call hierachy



This code is especially tangled.  VCPUOP_initialise calls into
arch_initialise_vcpu() which calls back into default_initialise_vcpu() which
is common code.

This path is actually dead code on ARM, because VCPUOP_initialise is filtered
out by do_arm_vcpu_op().

The only valid way to start a secondary CPU on ARM is via the PSCI interface.
The same could in principle be said about INIT-SIPI-SIPI for x86 HVM, if HVM
guests hadn't already interited a paravirt way of starting CPUs.

Either way, it is quite likely that no future architectures implemented in Xen
are going to want to use a PV interface, as some standardised (v)CPU bringup
mechanism will already exist.

Arrange the code in do_vcpu_op() to allow arch_initialise_vcpu() to be
optional.  Opt in for x86, and opt out for ARM.

Deleting ARM's arch_initialise_vcpu() allows for default_initialise_vcpu() to
be folded into its (now) sole x86 caller, which reduces the compiled code
volume in all builds.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
CC: Juergen Gross <jgross@xxxxxxxx>

This is XSA-296 followup, hence why it is only posted now.  Seeing as we are
fairly early in 4.13, I'd request that it be considered, but it won't be the
end of the world if it gets delayed for 4.14.
---
 xen/arch/arm/domain.c        |  5 -----
 xen/arch/x86/domain.c        | 22 ++++++++++++++++++++--
 xen/common/domain.c          | 26 ++------------------------
 xen/include/asm-x86/domain.h |  3 +++
 xen/include/xen/domain.h     |  3 ---
 5 files changed, 25 insertions(+), 34 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 5380fbb081..ea0ebf282f 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -913,11 +913,6 @@ int arch_set_info_guest(
     return 0;
 }
 
-int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
-{
-    return default_initialise_vcpu(v, arg);
-}
-
 int arch_vcpu_reset(struct vcpu *v)
 {
     vcpu_end_shutdown_deferral(v);
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f1dd86e12e..cfc1b851b9 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1235,11 +1235,11 @@ int arch_set_info_guest(
 
 int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
+    struct domain *d = v->domain;
     int rc;
 
     if ( is_hvm_vcpu(v) )
     {
-        struct domain *d = v->domain;
         struct vcpu_hvm_context ctxt;
 
         if ( copy_from_guest(&ctxt, arg, 1) )
@@ -1250,7 +1250,25 @@ int arch_initialise_vcpu(struct vcpu *v, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         domain_unlock(d);
     }
     else
-        rc = default_initialise_vcpu(v, arg);
+    {
+        struct vcpu_guest_context *ctxt = alloc_vcpu_guest_context();
+
+        if ( !ctxt )
+            return -ENOMEM;
+
+        if ( copy_from_guest(ctxt, arg, 1) )
+        {
+            rc = -EFAULT;
+            goto free_context;
+        }
+
+        domain_lock(d);
+        rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
+        domain_unlock(d);
+
+    free_context:
+        free_vcpu_guest_context(ctxt);
+    }
 
     return rc;
 }
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 611116c7fc..9659560514 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1382,30 +1382,6 @@ void unmap_vcpu_info(struct vcpu *v)
     put_page_and_type(mfn_to_page(mfn));
 }
 
-int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
-{
-    struct vcpu_guest_context *ctxt;
-    struct domain *d = v->domain;
-    int rc;
-
-    if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
-        return -ENOMEM;
-
-    if ( copy_from_guest(ctxt, arg, 1) )
-    {
-        free_vcpu_guest_context(ctxt);
-        return -EFAULT;
-    }
-
-    domain_lock(d);
-    rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
-    domain_unlock(d);
-
-    free_vcpu_guest_context(ctxt);
-
-    return rc;
-}
-
 long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     struct domain *d = current->domain;
@@ -1417,6 +1393,7 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
     switch ( cmd )
     {
+#ifdef arch_initialise_vcpu
     case VCPUOP_initialise:
         if ( v->vcpu_info == &dummy_vcpu_info )
             return -EINVAL;
@@ -1427,6 +1404,7 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                                                cmd, vcpuid, arg);
 
         break;
+#endif /* arch_initialise_vcpu */
 
     case VCPUOP_up:
 #ifdef CONFIG_X86
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 212303f371..52d9659647 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -650,6 +650,9 @@ void arch_vcpu_regs_init(struct vcpu *v);
 struct vcpu_hvm_context;
 int arch_set_info_hvm_guest(struct vcpu *v, const struct vcpu_hvm_context 
*ctx);
 
+#define arch_initialise_vcpu arch_initialise_vcpu
+int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
+
 #ifdef CONFIG_PV
 void pv_inject_event(const struct x86_event *event);
 #else
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 769302057b..807a790648 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -78,9 +78,6 @@ void arch_p2m_set_access_required(struct domain *d, bool 
access_required);
 int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u);
 void arch_get_info_guest(struct vcpu *, vcpu_guest_context_u);
 
-int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
-int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
-
 int domain_relinquish_resources(struct domain *d);
 
 void dump_pageframe_info(struct domain *d);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.