[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] linux: Introduce {current_,}vcpu_info



And use it where possible. Also consolidate time-xen.c in giving
get_time_values_from_xen() a 'cpu' parameter to match other functions
(this consolidation could of course also be done the other way around,
but I think this way the resulting code can be more efficient).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: head-2007-02-08/arch/i386/kernel/time-xen.c
===================================================================
--- head-2007-02-08.orig/arch/i386/kernel/time-xen.c    2007-02-15 
16:36:56.000000000 +0100
+++ head-2007-02-08/arch/i386/kernel/time-xen.c 2007-02-15 16:37:52.000000000 
+0100
@@ -222,8 +222,7 @@ int read_current_timer(unsigned long *ti
 void init_cpu_khz(void)
 {
        u64 __cpu_khz = 1000000ULL << 32;
-       struct vcpu_time_info *info;
-       info = &HYPERVISOR_shared_info->vcpu_info[0].time;
+       struct vcpu_time_info *info = &vcpu_info(0)->time;
        do_div(__cpu_khz, info->tsc_to_system_mul);
        if (info->tsc_shift < 0)
                cpu_khz = __cpu_khz << -info->tsc_shift;
@@ -293,14 +292,13 @@ static void update_wallclock(void)
  * Reads a consistent set of time-base values from Xen, into a shadow data
  * area.
  */
-static void get_time_values_from_xen(void)
+static void get_time_values_from_xen(int cpu)
 {
-       shared_info_t           *s = HYPERVISOR_shared_info;
        struct vcpu_time_info   *src;
        struct shadow_time_info *dst;
 
-       src = &s->vcpu_info[smp_processor_id()].time;
-       dst = &per_cpu(shadow_time, smp_processor_id());
+       src = &vcpu_info(cpu)->time;
+       dst = &per_cpu(shadow_time, cpu);
 
        do {
                dst->version = src->version;
@@ -320,7 +318,7 @@ static inline int time_values_up_to_date
        struct vcpu_time_info   *src;
        struct shadow_time_info *dst;
 
-       src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
+       src = &vcpu_info(cpu)->time;
        dst = &per_cpu(shadow_time, cpu);
 
        rmb();
@@ -412,7 +410,7 @@ void do_gettimeofday(struct timeval *tv)
                         * overflowed). Detect that and recalculate
                         * with fresh values.
                         */
-                       get_time_values_from_xen();
+                       get_time_values_from_xen(cpu);
                        continue;
                }
        } while (read_seqretry(&xtime_lock, seq) ||
@@ -456,7 +454,7 @@ int do_settimeofday(struct timespec *tv)
                nsec = tv->tv_nsec - get_nsec_offset(shadow);
                if (time_values_up_to_date(cpu))
                        break;
-               get_time_values_from_xen();
+               get_time_values_from_xen(cpu);
        }
        sec = tv->tv_sec;
        __normalize_time(&sec, &nsec);
@@ -551,7 +549,7 @@ unsigned long long sched_clock(void)
                barrier();
                time = shadow->system_timestamp + get_nsec_offset(shadow);
                if (!time_values_up_to_date(cpu))
-                       get_time_values_from_xen();
+                       get_time_values_from_xen(cpu);
                barrier();
        } while (local_time_version != shadow->version);
 
@@ -621,7 +619,7 @@ irqreturn_t timer_interrupt(int irq, voi
        write_seqlock(&xtime_lock);
 
        do {
-               get_time_values_from_xen();
+               get_time_values_from_xen(cpu);
 
                /* Obtain a consistent snapshot of elapsed wallclock cycles. */
                delta = delta_cpu =
@@ -921,7 +919,7 @@ void __init time_init(void)
                return;
        }
 #endif
-       get_time_values_from_xen();
+       get_time_values_from_xen(0);
 
        processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
        per_cpu(processed_system_time, 0) = processed_system_time;
@@ -1029,7 +1027,7 @@ void time_resume(void)
 {
        init_cpu_khz();
 
-       get_time_values_from_xen();
+       get_time_values_from_xen(0);
 
        processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
        per_cpu(processed_system_time, 0) = processed_system_time;
Index: head-2007-02-08/arch/x86_64/mm/fault-xen.c
===================================================================
--- head-2007-02-08.orig/arch/x86_64/mm/fault-xen.c     2007-02-15 
16:36:50.000000000 +0100
+++ head-2007-02-08/arch/x86_64/mm/fault-xen.c  2007-02-15 16:00:04.000000000 
+0100
@@ -411,8 +411,7 @@ asmlinkage void __kprobes do_page_fault(
        prefetchw(&mm->mmap_sem);
 
        /* get the address */
-       address = HYPERVISOR_shared_info->vcpu_info[
-               smp_processor_id()].arch.cr2;
+       address = current_vcpu_info()->arch.cr2;
 
        info.si_code = SEGV_MAPERR;
 
Index: head-2007-02-08/include/asm-i386/mach-xen/asm/hypervisor.h
===================================================================
--- head-2007-02-08.orig/include/asm-i386/mach-xen/asm/hypervisor.h     
2007-02-15 16:36:47.000000000 +0100
+++ head-2007-02-08/include/asm-i386/mach-xen/asm/hypervisor.h  2007-02-15 
16:05:22.000000000 +0100
@@ -46,16 +46,12 @@
 #include <xen/interface/nmi.h>
 #include <asm/ptrace.h>
 #include <asm/page.h>
-#if defined(__i386__)
-#  ifdef CONFIG_X86_PAE
-#   include <asm-generic/pgtable-nopud.h>
-#  else
-#   include <asm-generic/pgtable-nopmd.h>
-#  endif
-#endif
 
 extern shared_info_t *HYPERVISOR_shared_info;
 
+#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
+#define current_vcpu_info() vcpu_info(smp_processor_id())
+
 #ifdef CONFIG_X86_32
 extern unsigned long hypervisor_virt_start;
 #endif
Index: head-2007-02-08/include/asm-i386/mach-xen/asm/irqflags.h
===================================================================
--- head-2007-02-08.orig/include/asm-i386/mach-xen/asm/irqflags.h       
2007-02-15 16:36:47.000000000 +0100
+++ head-2007-02-08/include/asm-i386/mach-xen/asm/irqflags.h    2007-02-15 
15:55:01.000000000 +0100
@@ -12,12 +12,6 @@
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_SMP
-#define __vcpu_id smp_processor_id()
-#else
-#define __vcpu_id 0
-#endif
-
 /* 
  * The use of 'barrier' in the following reflects their use as local-lock
  * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
@@ -26,8 +20,7 @@
  * includes these barriers, for example.
  */
 
-#define __raw_local_save_flags()                                       \
-       (&HYPERVISOR_shared_info->vcpu_info[__vcpu_id])->evtchn_upcall_mask;
+#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
 
 #define raw_local_save_flags(flags) \
                do { (flags) = __raw_local_save_flags(); } while (0)
@@ -36,7 +29,7 @@
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];          \
+       _vcpu = current_vcpu_info();                                    \
        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
                barrier(); /* unmask then check (avoid races) */        \
                if (unlikely(_vcpu->evtchn_upcall_pending))             \
@@ -46,9 +39,7 @@ do {                                                          
        \
 
 #define raw_local_irq_disable()                                                
\
 do {                                                                   \
-       vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];          \
-       _vcpu->evtchn_upcall_mask = 1;                                  \
+       current_vcpu_info()->evtchn_upcall_mask = 1;                    \
        barrier();                                                      \
 } while (0)
 
@@ -56,7 +47,7 @@ do {                                                          
        \
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];          \
+       _vcpu = current_vcpu_info();            \
        _vcpu->evtchn_upcall_mask = 0;                                  \
        barrier(); /* unmask then check (avoid races) */                \
        if (unlikely(_vcpu->evtchn_upcall_pending))                     \
Index: head-2007-02-08/include/asm-i386/mach-xen/asm/system.h
===================================================================
--- head-2007-02-08.orig/include/asm-i386/mach-xen/asm/system.h 2007-02-15 
16:36:47.000000000 +0100
+++ head-2007-02-08/include/asm-i386/mach-xen/asm/system.h      2007-02-15 
15:59:01.000000000 +0100
@@ -100,8 +100,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
 #define write_cr0(x) \
        __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
 
-#define read_cr2() \
-       (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
+#define read_cr2() (current_vcpu_info()->arch.cr2)
 #define write_cr2(x) \
        __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
 
Index: head-2007-02-08/include/asm-x86_64/mach-xen/asm/irqflags.h
===================================================================
--- head-2007-02-08.orig/include/asm-x86_64/mach-xen/asm/irqflags.h     
2007-02-15 16:36:47.000000000 +0100
+++ head-2007-02-08/include/asm-x86_64/mach-xen/asm/irqflags.h  2007-02-15 
15:55:32.000000000 +0100
@@ -15,12 +15,6 @@
  * Interrupt control:
  */
 
-#ifdef CONFIG_SMP
-#define __vcpu_id smp_processor_id()
-#else
-#define __vcpu_id 0
-#endif
-
 /*
  * The use of 'barrier' in the following reflects their use as local-lock
  * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
@@ -29,8 +23,7 @@
  * includes these barriers, for example.
  */
 
-#define __raw_local_save_flags()                                       \
-       (&HYPERVISOR_shared_info->vcpu_info[__vcpu_id])->evtchn_upcall_mask;
+#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
 
 #define raw_local_save_flags(flags) \
                do { (flags) = __raw_local_save_flags(); } while (0)
@@ -39,7 +32,7 @@
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];          \
+       _vcpu = current_vcpu_info();            \
        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
                barrier(); /* unmask then check (avoid races) */        \
                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
@@ -76,9 +69,7 @@ static inline int raw_irqs_disabled_flag
 
 #define raw_local_irq_disable()                                                
\
 do {                                                                   \
-       vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];          \
-       _vcpu->evtchn_upcall_mask = 1;                                  \
+       current_vcpu_info()->evtchn_upcall_mask = 1;                            
        \
        barrier();                                                      \
 } while (0)
 
@@ -86,7 +77,7 @@ do {                                                          
        \
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];          \
+       _vcpu = current_vcpu_info();            \
        _vcpu->evtchn_upcall_mask = 0;                                  \
        barrier(); /* unmask then check (avoid races) */                \
        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.