[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 02/21] x86/xen: globalize have_vcpu_info_placement



have_vcpu_info_placement applies to both PV and HVM and as we're going
to split the code we need to make it global.

Rename to xen_have_vcpu_info_placement.

Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
 arch/x86/xen/enlighten.c | 12 ++++++------
 arch/x86/xen/xen-ops.h   |  2 ++
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 4c1a582..de77be9 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -164,7 +164,7 @@ struct shared_info *HYPERVISOR_shared_info = 
&xen_dummy_shared_info;
  *
  * 0: not available, 1: available
  */
-static int have_vcpu_info_placement = 1;
+int xen_have_vcpu_info_placement = 1;
 
 struct tls_descs {
        struct desc_struct desc[3];
@@ -228,7 +228,7 @@ void xen_vcpu_setup(int cpu)
                per_cpu(xen_vcpu, cpu) =
                        &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
 
-       if (!have_vcpu_info_placement) {
+       if (!xen_have_vcpu_info_placement) {
                if (cpu >= MAX_VIRT_CPUS)
                        clamp_max_cpus();
                return;
@@ -251,7 +251,7 @@ void xen_vcpu_setup(int cpu)
 
        if (err) {
                printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
-               have_vcpu_info_placement = 0;
+               xen_have_vcpu_info_placement = 0;
                clamp_max_cpus();
        } else {
                /* This cpu is using the registered vcpu info, even if
@@ -280,7 +280,7 @@ void xen_vcpu_restore(void)
 
                xen_setup_runstate_info(cpu);
 
-               if (have_vcpu_info_placement)
+               if (xen_have_vcpu_info_placement)
                        xen_vcpu_setup(cpu);
 
                if (other_cpu && is_up &&
@@ -1159,7 +1159,7 @@ void xen_setup_vcpu_info_placement(void)
         * xen_vcpu_setup managed to place the vcpu_info within the
         * percpu area for all cpus, so make use of it.
         */
-       if (have_vcpu_info_placement) {
+       if (xen_have_vcpu_info_placement) {
                pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
                pv_irq_ops.restore_fl = 
__PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
                pv_irq_ops.irq_disable = 
__PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
@@ -1178,7 +1178,7 @@ static unsigned xen_patch(u8 type, u16 clobbers, void 
*insnbuf,
 
 #define SITE(op, x)                                                    \
        case PARAVIRT_PATCH(op.x):                                      \
-       if (have_vcpu_info_placement) {                                 \
+       if (xen_have_vcpu_info_placement) {                             \
                start = (char *)xen_##x##_direct;                       \
                end = xen_##x##_direct_end;                             \
                reloc = xen_##x##_direct_reloc;                         \
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index f6a41c4..2b162f6 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -76,6 +76,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
 
 bool xen_vcpu_stolen(int vcpu);
 
+extern int xen_have_vcpu_info_placement;
+
 void xen_vcpu_setup(int cpu);
 void xen_setup_vcpu_info_placement(void);
 
-- 
2.9.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.