[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Manual merge.



ChangeSet 1.1592, 2005/05/30 11:20:47+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Manual merge.



 process.c |  171 ++++++++++++++++++++++++++++----------------------------------
 time.c    |  146 +++++++++++++++++++++++++++++++++++++++-------------
 2 files changed, 186 insertions(+), 131 deletions(-)


diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c 
b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c    2005-05-30 
07:03:36 -04:00
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c    2005-05-30 
07:03:36 -04:00
@@ -13,6 +13,7 @@
 
 #include <stdarg.h>
 
+#include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
@@ -46,8 +47,7 @@
 #include <asm/i387.h>
 #include <asm/irq.h>
 #include <asm/desc.h>
-#include <asm-xen/multicall.h>
-#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm-xen/xen-public/physdev.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
 #endif
@@ -55,6 +55,9 @@
 #include <linux/irq.h>
 #include <linux/err.h>
 
+#include <asm/tlbflush.h>
+#include <asm/cpu.h>
+
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
 int hlt_counter;
@@ -106,6 +109,33 @@
        }
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+#include <asm/nmi.h>
+/* We don't actually take CPU down, just spin without interrupts. */
+static inline void play_dead(void)
+{
+       /* Ack it */
+       __get_cpu_var(cpu_state) = CPU_DEAD;
+
+       /* We shouldn't have to disable interrupts while dead, but
+        * some interrupts just don't seem to go away, and this makes
+        * it "work" for testing purposes. */
+       /* Death loop */
+       while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
+               HYPERVISOR_yield();
+
+       local_irq_disable();
+       __flush_tlb_all();
+       cpu_set(smp_processor_id(), cpu_online_map);
+       local_irq_enable();
+}
+#else
+static inline void play_dead(void)
+{
+       BUG();
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
 /*
  * The idle thread. There's no useful work to be
  * done, so just try to conserve power and have a
@@ -124,6 +154,9 @@
                                cpu_clear(cpu, cpu_idle_map);
                        rmb();
 
+                       if (cpu_is_offline(cpu))
+                               play_dead();
+
                        irq_stat[cpu].idle_timestamp = jiffies;
                        xen_idle();
                }
@@ -221,20 +254,11 @@
 
        /* The process may have allocated an io port bitmap... nuke it. */
        if (unlikely(NULL != t->io_bitmap_ptr)) {
-               int cpu = get_cpu();
-               struct tss_struct *tss = &per_cpu(init_tss, cpu);
-
+               physdev_op_t op = { 0 };
+               op.cmd = PHYSDEVOP_SET_IOBITMAP;
+               HYPERVISOR_physdev_op(&op);
                kfree(t->io_bitmap_ptr);
                t->io_bitmap_ptr = NULL;
-               /*
-                * Careful, clear this in the TSS too:
-                */
-               memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
-               t->io_bitmap_max = 0;
-               tss->io_bitmap_owner = NULL;
-               tss->io_bitmap_max = 0;
-               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
-               put_cpu();
        }
 }
 
@@ -283,7 +307,6 @@
        struct pt_regs * childregs;
        struct task_struct *tsk;
        int err;
-       unsigned long eflags;
 
        childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) 
p->thread_info)) - 1;
        *childregs = *regs;
@@ -333,9 +356,7 @@
                desc->b = LDT_entry_b(&info);
        }
 
-
-       __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
-       p->thread.io_pl = (eflags >> 12) & 3;
+       p->thread.io_pl = current->thread.io_pl;
 
        err = 0;
  out:
@@ -408,37 +429,6 @@
        return 1;
 }
 
-static inline void
-handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
-{
-       if (!next->io_bitmap_ptr) {
-               /*
-                * Disable the bitmap via an invalid offset. We still cache
-                * the previous bitmap owner and the IO bitmap contents:
-                */
-               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
-               return;
-       }
-       if (likely(next == tss->io_bitmap_owner)) {
-               /*
-                * Previous owner of the bitmap (hence the bitmap content)
-                * matches the next task, we dont have to do anything but
-                * to set a valid offset in the TSS:
-                */
-               tss->io_bitmap_base = IO_BITMAP_OFFSET;
-               return;
-       }
-       /*
-        * Lazy TSS's I/O bitmap copy. We set an invalid offset here
-        * and we let the task to get a GPF in case an I/O instruction
-        * is performed.  The handler of the GPF will verify that the
-        * faulting task has a valid I/O bitmap and, it true, does the
-        * real copy and restart the instruction.  This will save us
-        * redundant copies when the currently switched task does not
-        * perform any I/O during its timeslice.
-        */
-       tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
-}
 /*
  * This special macro can be used to load a debugging register
  */
@@ -479,29 +469,10 @@
                                 *next = &next_p->thread;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
-       dom0_op_t op;
-
-        /* NB. No need to disable interrupts as already done in sched.c */
-        /* __cli(); */
-
-       /*
-        * Save away %fs and %gs. No need to save %es and %ds, as
-        * those are always kernel segments while inside the kernel.
-        */
-       asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
-       asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
-
-       /*
-        * We clobber FS and GS here so that we avoid a GPF when restoring
-        * previous task's FS/GS values in Xen when the LDT is switched.
-        */
-       __asm__ __volatile__ ( 
-               "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
-               "eax" );
-
-       MULTICALL_flush_page_update_queue();
+       physdev_op_t iopl_op, iobmp_op;
+       multicall_entry_t _mcl[8], *mcl = _mcl;
 
-       /* never put a printk in __switch_to... printk() calls wake_up*() 
indirectly */
+       /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
 
        /*
         * This is basically '__unlazy_fpu', except that we queue a
@@ -510,7 +481,9 @@
         */
        if (prev_p->thread_info->status & TS_USEDFPU) {
                __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-               queue_multicall0(__HYPERVISOR_fpu_taskswitch);
+               mcl->op      = __HYPERVISOR_fpu_taskswitch;
+               mcl->args[0] = 1;
+               mcl++;
        }
 
        /*
@@ -518,35 +491,50 @@
         * This is load_esp0(tss, next) with a multicall.
         */
        tss->esp0 = next->esp0;
-       queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
+       mcl->op      = __HYPERVISOR_stack_switch;
+       mcl->args[0] = tss->ss0;
+       mcl->args[1] = tss->esp0;
+       mcl++;
 
        /*
         * Load the per-thread Thread-Local Storage descriptor.
         * This is load_TLS(next, cpu) with multicalls.
         */
-#define C(i) do {                                                          \
-       if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||        \
-                    next->tls_array[i].b != prev->tls_array[i].b))         \
-               queue_multicall3(__HYPERVISOR_update_descriptor,            \
-                                virt_to_machine(&get_cpu_gdt_table(cpu)    \
-                                                [GDT_ENTRY_TLS_MIN + i]),  \
-                                ((u32 *)&next->tls_array[i])[0],           \
-                                ((u32 *)&next->tls_array[i])[1]);          \
+#define C(i) do {                                                       \
+       if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||    \
+                    next->tls_array[i].b != prev->tls_array[i].b)) {   \
+               mcl->op      = __HYPERVISOR_update_descriptor;          \
+               mcl->args[0] = virt_to_machine(&get_cpu_gdt_table(cpu)  \
+                                        [GDT_ENTRY_TLS_MIN + i]);      \
+               mcl->args[1] = ((u32 *)&next->tls_array[i])[0];         \
+               mcl->args[2] = ((u32 *)&next->tls_array[i])[1];         \
+               mcl++;                                                  \
+       }                                                               \
 } while (0)
        C(0); C(1); C(2);
 #undef C
 
-       if (xen_start_info.flags & SIF_PRIVILEGED) {
-               op.cmd           = DOM0_IOPL;
-               op.u.iopl.domain = DOMID_SELF;
-               op.u.iopl.iopl   = next->io_pl;
-               op.interface_version = DOM0_INTERFACE_VERSION;
-               queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
+       if (unlikely(prev->io_pl != next->io_pl)) {
+               iopl_op.cmd             = PHYSDEVOP_SET_IOPL;
+               iopl_op.u.set_iopl.iopl = next->io_pl;
+               mcl->op      = __HYPERVISOR_physdev_op;
+               mcl->args[0] = (unsigned long)&iopl_op;
+               mcl++;
+       }
+

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.