[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] merge?



# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID 87dec3b9c54609eb913b9776020bf040ecec476c
# Parent  6c8c3df37bfe5f8d1fa29409e454172e1bc29f21
# Parent  3c1cd2486b7fecd5d97378a7d52e5bfb8eb7c718
merge?

diff -r 6c8c3df37bfe -r 87dec3b9c546 
linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c       Fri Aug 19 
15:21:12 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c       Fri Aug 19 
15:22:05 2005
@@ -115,20 +115,12 @@
 /* We don't actually take CPU down, just spin without interrupts. */
 static inline void play_dead(void)
 {
-       /* Ack it */
-       __get_cpu_var(cpu_state) = CPU_DEAD;
-
-       /* We shouldn't have to disable interrupts while dead, but
-        * some interrupts just don't seem to go away, and this makes
-        * it "work" for testing purposes. */
        /* Death loop */
        while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
                HYPERVISOR_yield();
 
-       local_irq_disable();
        __flush_tlb_all();
        cpu_set(smp_processor_id(), cpu_online_map);
-       local_irq_enable();
 }
 #else
 static inline void play_dead(void)
@@ -156,12 +148,19 @@
                        rmb();
 
                        if (cpu_is_offline(cpu)) {
+                               local_irq_disable();
+                               /* Ack it.  From this point on until
+                                  we get woken up, we're not allowed
+                                  to take any locks.  In particular,
+                                  don't printk. */
+                               __get_cpu_var(cpu_state) = CPU_DEAD;
 #if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU)
                                /* Tell hypervisor to take vcpu down. */
                                HYPERVISOR_vcpu_down(cpu);
 #endif
                                play_dead();
-         }
+                               local_irq_enable();
+                       }
 
                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
                        xen_idle();
@@ -791,3 +790,10 @@
                sp -= get_random_int() % 8192;
        return sp & ~0xf;
 }
+
+
+#ifndef CONFIG_X86_SMP
+void _restore_vcpu(void)
+{
+}
+#endif
diff -r 6c8c3df37bfe -r 87dec3b9c546 
linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c       Fri Aug 19 
15:21:12 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c       Fri Aug 19 
15:22:05 2005
@@ -1616,3 +1616,21 @@
        smp_intr_init();
        local_setup_timer_irq();
 }
+
+DECLARE_PER_CPU(int, timer_irq);
+
+void _restore_vcpu(void)
+{
+       int cpu = smp_processor_id();
+       extern atomic_t vcpus_rebooting;
+
+       /* We are the first thing the vcpu runs when it comes back,
+          and we are supposed to restore the IPIs and timer
+          interrupts etc.  When we return, the vcpu's idle loop will
+          start up again. */
+       _bind_virq_to_irq(VIRQ_TIMER, cpu, per_cpu(timer_irq, cpu));
+       _bind_virq_to_irq(VIRQ_DEBUG, cpu, per_cpu(ldebug_irq, cpu));
+       _bind_ipi_to_irq(RESCHEDULE_VECTOR, cpu, per_cpu(resched_irq, cpu) );
+       _bind_ipi_to_irq(CALL_FUNCTION_VECTOR, cpu, per_cpu(callfunc_irq, cpu) 
);
+       atomic_dec(&vcpus_rebooting);
+}
diff -r 6c8c3df37bfe -r 87dec3b9c546 
linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c  Fri Aug 19 15:21:12 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c  Fri Aug 19 15:22:05 2005
@@ -745,7 +745,7 @@
 #endif
 
 /* Dynamically-mapped IRQ. */
-static DEFINE_PER_CPU(int, timer_irq);
+DEFINE_PER_CPU(int, timer_irq);
 
 static struct irqaction irq_timer = {
        timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer0",
diff -r 6c8c3df37bfe -r 87dec3b9c546 
linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Fri Aug 19 15:21:12 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Fri Aug 19 15:22:05 2005
@@ -144,7 +144,7 @@
     vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
 
     vcpu_info->evtchn_upcall_pending = 0;
-    
+
     /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
     l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
     while ( l1 != 0 )
@@ -158,9 +158,9 @@
             l2 &= ~(1 << l2i);
             
             port = (l1i << 5) + l2i;
-            if ( (irq = evtchn_to_irq[port]) != -1 )
+            if ( (irq = evtchn_to_irq[port]) != -1 ) {
                 do_IRQ(irq, regs);
-            else
+           } else
                 evtchn_device_upcall(port);
         }
     }
@@ -243,6 +243,74 @@
     }
 
     spin_unlock(&irq_mapping_update_lock);
+}
+
+/* This is only used when a vcpu from an xm save.  The ipi is expected
+   to have been bound before we suspended, and so all of the xenolinux
+   state is set up; we only need to restore the Xen side of things.
+   The irq number has to be the same, but the evtchn number can
+   change. */
+void _bind_ipi_to_irq(int ipi, int vcpu, int irq)
+{
+    evtchn_op_t op;
+    int evtchn;
+
+    spin_lock(&irq_mapping_update_lock);
+
+    op.cmd = EVTCHNOP_bind_ipi;
+    if ( HYPERVISOR_event_channel_op(&op) != 0 )
+       panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, vcpu);
+    evtchn = op.u.bind_ipi.port;
+
+    printk("<0>IPI %d, old evtchn %d, evtchn %d.\n",
+          ipi, per_cpu(ipi_to_evtchn, vcpu)[ipi],
+          evtchn);
+
+    evtchn_to_irq[irq_to_evtchn[irq]] = -1;
+    irq_to_evtchn[irq] = -1;
+
+    evtchn_to_irq[evtchn] = irq;
+    irq_to_evtchn[irq]    = evtchn;
+
+    printk("<0>evtchn_to_irq[%d] = %d.\n", evtchn,
+          evtchn_to_irq[evtchn]);
+    per_cpu(ipi_to_evtchn, vcpu)[ipi] = evtchn;
+
+    bind_evtchn_to_cpu(evtchn, vcpu);
+
+    spin_unlock(&irq_mapping_update_lock);
+
+    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_mask);
+    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_pending);
+}
+
+void _bind_virq_to_irq(int virq, int cpu, int irq)
+{
+    evtchn_op_t op;
+    int evtchn;
+
+    spin_lock(&irq_mapping_update_lock);
+
+    op.cmd              = EVTCHNOP_bind_virq;
+    op.u.bind_virq.virq = virq;
+    if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            panic("Failed to bind virtual IRQ %d\n", virq);
+    evtchn = op.u.bind_virq.port;
+
+    evtchn_to_irq[irq_to_evtchn[irq]] = -1;
+    irq_to_evtchn[irq] = -1;
+
+    evtchn_to_irq[evtchn] = irq;
+    irq_to_evtchn[irq]    = evtchn;
+
+    per_cpu(virq_to_irq, cpu)[virq] = irq;
+
+    bind_evtchn_to_cpu(evtchn, cpu);
+
+    spin_unlock(&irq_mapping_update_lock);
+
+    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_mask);
+    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_pending);
 }
 
 int bind_ipi_to_irq(int ipi)
diff -r 6c8c3df37bfe -r 87dec3b9c546 
linux-2.6-xen-sparse/arch/xen/kernel/reboot.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c     Fri Aug 19 15:21:12 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c     Fri Aug 19 15:22:05 2005
@@ -16,6 +16,8 @@
 #include <asm-xen/queues.h>
 #include <asm-xen/xenbus.h>
 #include <asm-xen/ctrl_if.h>
+#include <linux/cpu.h>
+#include <linux/kthread.h>
 
 #define SHUTDOWN_INVALID  -1
 #define SHUTDOWN_POWEROFF  0
@@ -58,10 +60,71 @@
 /* Ignore multiple shutdown requests. */
 static int shutting_down = SHUTDOWN_INVALID;
 
-static void __do_suspend(void)
+#ifndef CONFIG_HOTPLUG_CPU
+#define cpu_down(x) (-EOPNOTSUPP)
+#define cpu_up(x) (-EOPNOTSUPP)
+#endif
+
+static void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+{
+    int r;
+    int gdt_pages;
+    r = HYPERVISOR_vcpu_pickle(vcpu, ctxt);
+    if (r != 0)
+       panic("pickling vcpu %d -> %d!\n", vcpu, r);
+
+    /* Translate from machine to physical addresses where necessary,
+       so that they can be translated to our new machine address space
+       after resume.  libxc is responsible for doing this to vcpu0,
+       but we do it to the others. */
+    gdt_pages = (ctxt->gdt_ents + 511) / 512;
+    ctxt->ctrlreg[3] = machine_to_phys(ctxt->ctrlreg[3]);
+    for (r = 0; r < gdt_pages; r++)
+       ctxt->gdt_frames[r] = mfn_to_pfn(ctxt->gdt_frames[r]);
+}
+
+void _restore_vcpu(int cpu);
+
+atomic_t vcpus_rebooting;
+
+static int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
+{
+    int r;
+    int gdt_pages = (ctxt->gdt_ents + 511) / 512;
+
+    /* This is kind of a hack, and implicitly relies on the fact that
+       the vcpu stops in a place where all of the call clobbered
+       registers are already dead. */
+    ctxt->user_regs.esp -= 4;
+    ((unsigned long *)ctxt->user_regs.esp)[0] = ctxt->user_regs.eip;
+    ctxt->user_regs.eip = (unsigned long)_restore_vcpu;
+
+    /* De-canonicalise.  libxc handles this for vcpu 0, but we need
+       to do it for the other vcpus. */
+    ctxt->ctrlreg[3] = phys_to_machine(ctxt->ctrlreg[3]);
+    for (r = 0; r < gdt_pages; r++)
+       ctxt->gdt_frames[r] = pfn_to_mfn(ctxt->gdt_frames[r]);
+
+    atomic_set(&vcpus_rebooting, 1);
+    r = HYPERVISOR_boot_vcpu(vcpu, ctxt);
+    if (r != 0) {
+       printk(KERN_EMERG "Failed to reboot vcpu %d (%d)\n", vcpu, r);
+       return -1;
+    }
+
+    /* Make sure we wait for the new vcpu to come up before trying to do
+       anything with it or starting the next one. */
+    while (atomic_read(&vcpus_rebooting))
+       barrier();
+
+    return 0;
+}
+
+static int __do_suspend(void *ignore)
 {
     int i, j;
     suspend_record_t *suspend_record;
+    static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
 
     /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
        /* XXX SMH: yes it would :-( */ 
@@ -97,13 +160,63 @@
     extern unsigned long max_pfn;
     extern unsigned int *pfn_to_mfn_frame_list;
 
+    cpumask_t prev_online_cpus, prev_present_cpus;
+    int err = 0;
+
+    BUG_ON(smp_processor_id() != 0);
+    BUG_ON(in_interrupt());
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
+    if (num_online_cpus() > 1) {
+       printk(KERN_WARNING "Can't suspend SMP guests without 
CONFIG_HOTPLUG_CPU\n");
+       return -EOPNOTSUPP;
+    }
+#endif
+
     suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
     if ( suspend_record == NULL )
         goto out;
 
+    /* Take all of the other cpus offline.  We need to be careful not
+       to get preempted between the final test for num_online_cpus()
+       == 1 and disabling interrupts, since otherwise userspace could
+       bring another cpu online, and then we'd be stuffed.  At the
+       same time, cpu_down can reschedule, so we need to enable
+       preemption while doing that.  This kind of sucks, but should be
+       correct. */
+    /* (We don't need to worry about other cpus bringing stuff up,
+       since by the time num_online_cpus() == 1, there aren't any
+       other cpus) */
+    cpus_clear(prev_online_cpus);
+    preempt_disable();
+    while (num_online_cpus() > 1) {
+       preempt_enable();
+       for_each_online_cpu(i) {
+           if (i == 0)
+               continue;
+           err = cpu_down(i);
+           if (err != 0) {
+               printk(KERN_CRIT "Failed to take all CPUs down: %d.\n", err);
+               goto out_reenable_cpus;
+           }
+           cpu_set(i, prev_online_cpus);
+       }
+       preempt_disable();
+    }
+
     suspend_record->nr_pfns = max_pfn; /* final number of pfns */
 
     __cli();
+
+    preempt_enable();
+
+    cpus_clear(prev_present_cpus);
+    for_each_present_cpu(i) {
+       if (i == 0)
+           continue;
+       save_vcpu_context(i, &suspended_cpu_records[i]);
+       cpu_set(i, prev_present_cpus);
+    }
 
 #ifdef __i386__
     mm_pin_all();
@@ -132,6 +245,8 @@
     memcpy(&suspend_record->resume_info, &xen_start_info,
            sizeof(xen_start_info));
 
+    /* We'll stop somewhere inside this hypercall.  When it returns,
+       we'll start resuming after the restore. */
     HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
 
     shutting_down = SHUTDOWN_INVALID; 
@@ -171,11 +286,26 @@
 
     usbif_resume();
 
+    for_each_cpu_mask(i, prev_present_cpus) {
+       restore_vcpu_context(i, &suspended_cpu_records[i]);
+    }
+
     __sti();
+
+ out_reenable_cpus:
+    for_each_cpu_mask(i, prev_online_cpus) {
+       j = cpu_up(i);
+       if (j != 0) {
+           printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
+                  i, j);
+           err = j;
+       }
+    }
 
  out:
     if ( suspend_record != NULL )
         free_page((unsigned long)suspend_record);
+    return err;
 }
 
 static int shutdown_process(void *__unused)
@@ -222,6 +352,18 @@
     return 0;
 }
 
+static struct task_struct *kthread_create_on_cpu(int (*f)(void *arg),
+                                                void *arg,
+                                                const char *name,
+                                                int cpu)
+{
+    struct task_struct *p;
+    p = kthread_create(f, arg, name);
+    kthread_bind(p, cpu);
+    wake_up_process(p);
+    return p;
+}
+
 static void __shutdown_handler(void *unused)
 {
     int err;
@@ -234,7 +376,7 @@
     }
     else
     {
-        __do_suspend();
+       kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
     }
 }
 
diff -r 6c8c3df37bfe -r 87dec3b9c546 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/process.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/process.c     Fri Aug 19 
15:21:12 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/process.c     Fri Aug 19 
15:22:05 2005
@@ -743,3 +743,9 @@
                sp -= get_random_int() % 8192;
        return sp & ~0xf;
 }
+
+#ifndef CONFIG_SMP
+void _restore_vcpu(void)
+{
+}
+#endif
diff -r 6c8c3df37bfe -r 87dec3b9c546 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c     Fri Aug 19 
15:21:12 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c     Fri Aug 19 
15:22:05 2005
@@ -1286,4 +1286,10 @@
        smp_intr_init();
        local_setup_timer_irq();
 }
-#endif
+
+void _restore_vcpu(void)
+{
+       /* XXX need to write this */
+}
+
+#endif
diff -r 6c8c3df37bfe -r 87dec3b9c546 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h Fri Aug 19 
15:21:12 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h Fri Aug 19 
15:22:05 2005
@@ -163,7 +163,7 @@
         TRAP_INSTR
         : "=a" (ret), "=b" (ign)
        : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
-       : "memory" );
+       : "memory", "ecx" );
 
     return ret;
 }
@@ -178,7 +178,7 @@
         TRAP_INSTR
         : "=a" (ret), "=b" (ign1)
        : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
-       : "memory" );
+       : "memory", "ecx" );
 
     return ret;
 }
@@ -194,7 +194,7 @@
         : "=a" (ret), "=b" (ign1)
        : "0" (__HYPERVISOR_sched_op),
          "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
-        : "memory" );
+        : "memory", "ecx" );
 
     return ret;
 }
@@ -210,7 +210,7 @@
         : "=a" (ret), "=b" (ign1)
        : "0" (__HYPERVISOR_sched_op),
          "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
-        : "memory" );
+        : "memory", "ecx" );
 
     return ret;
 }
@@ -228,7 +228,7 @@
         : "=a" (ret), "=b" (ign1), "=S" (ign2)
        : "0" (__HYPERVISOR_sched_op),
         "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
-        "S" (srec) : "memory");
+        "S" (srec) : "memory", "ecx");
 
     return ret;
 }
@@ -244,7 +244,7 @@
         : "=a" (ret), "=b" (ign1)
        : "0" (__HYPERVISOR_sched_op),
          "1" (SCHEDOP_shutdown | (SHUTDOWN_crash << SCHEDOP_reasonshift))
-        : "memory" );
+        : "memory", "ecx" );
 
     return ret;
 }
@@ -529,12 +529,15 @@
 {
     int ret;
     unsigned long ign1;
+    /* Yes, I really do want to clobber edx here: when we resume a
+       vcpu after unpickling a multi-processor domain, it returns
+       here, but clobbers all of the call clobbered registers. */
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret), "=b" (ign1)
        : "0" (__HYPERVISOR_sched_op),
          "1" (SCHEDOP_vcpu_down | (vcpu << SCHEDOP_vcpushift))
-        : "memory" );
+        : "memory", "ecx", "edx" );
 
     return ret;
 }
@@ -550,8 +553,26 @@
         : "=a" (ret), "=b" (ign1)
        : "0" (__HYPERVISOR_sched_op),
          "1" (SCHEDOP_vcpu_up | (vcpu << SCHEDOP_vcpushift))
+        : "memory", "ecx" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_vcpu_pickle(
+    int vcpu, vcpu_guest_context_t *ctxt)
+{
+    int ret;
+    unsigned long ign1, ign2;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
+       : "0" (__HYPERVISOR_sched_op),
+         "1" (SCHEDOP_vcpu_pickle | (vcpu << SCHEDOP_vcpushift)),
+         "2" (ctxt)
         : "memory" );
 
     return ret;
 }
+
 #endif /* __HYPERCALL_H__ */
diff -r 6c8c3df37bfe -r 87dec3b9c546 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Aug 19 15:21:12 2005
+++ b/xen/arch/x86/domain.c     Fri Aug 19 15:22:05 2005
@@ -217,8 +217,16 @@
     return xmalloc(struct vcpu);
 }
 
+/* We assume that vcpu 0 is always the last one to be freed in a
+   domain i.e. if v->vcpu_id == 0, the domain should be
+   single-processor. */
 void arch_free_vcpu_struct(struct vcpu *v)
 {
+    struct vcpu *p;
+    for_each_vcpu(v->domain, p) {
+        if (p->next_in_list == v)
+            p->next_in_list = v->next_in_list;
+    }
     xfree(v);
 }
 
@@ -403,7 +411,7 @@
     {
         if ( ((c->user_regs.cs & 3) == 0) ||
              ((c->user_regs.ss & 3) == 0) )
-                return -EINVAL;
+            return -EINVAL;
     }
 
     clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
@@ -457,7 +465,7 @@
         if ( !(c->flags & VGCF_VMX_GUEST) )
 #endif
             if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d, 
-                  PGT_base_page_table) )
+                                    PGT_base_page_table) )
                 return -EINVAL;
     }
 
diff -r 6c8c3df37bfe -r 87dec3b9c546 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Aug 19 15:21:12 2005
+++ b/xen/arch/x86/mm.c Fri Aug 19 15:22:05 2005
@@ -2633,14 +2633,16 @@
 
     if ( entries > FIRST_RESERVED_GDT_ENTRY )
         return -EINVAL;
-    
+
     shadow_sync_all(d);
 
     /* Check the pages in the new GDT. */
-    for ( i = 0; i < nr_pages; i++ )
-        if ( ((pfn = frames[i]) >= max_page) ||
-             !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
+    for ( i = 0; i < nr_pages; i++ ) {
+        pfn = frames[i];
+        if ((pfn >= max_page) ||
+            !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
             goto fail;
+    }
 
     /* Tear down the old GDT. */
     destroy_gdt(v);
diff -r 6c8c3df37bfe -r 87dec3b9c546 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Fri Aug 19 15:21:12 2005
+++ b/xen/common/event_channel.c        Fri Aug 19 15:22:05 2005
@@ -588,7 +588,6 @@
     long           rc = 0;
 
     if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] == NULL) ) {
-        printf("vcpu %d bad.\n", vcpu);
         return -EINVAL;
     }
 
@@ -596,7 +595,6 @@
 
     if ( !port_is_valid(d, port) )
     {
-        printf("port %d bad.\n", port);
         rc = -EINVAL;
         goto out;
     }
@@ -610,7 +608,6 @@
         chn->notify_vcpu_id = vcpu;
         break;
     default:
-        printf("evtchn type %d can't be rebound.\n", chn->state);
         rc = -EINVAL;
         break;
     }
diff -r 6c8c3df37bfe -r 87dec3b9c546 xen/common/schedule.c
--- a/xen/common/schedule.c     Fri Aug 19 15:21:12 2005
+++ b/xen/common/schedule.c     Fri Aug 19 15:22:05 2005
@@ -38,6 +38,8 @@
 #include <xen/mm.h>
 #include <public/sched_ctl.h>
 
+extern void arch_getdomaininfo_ctxt(struct vcpu *,
+                                    struct vcpu_guest_context *);
 /* opt_sched: scheduler - default to SEDF */
 static char opt_sched[10] = "sedf";
 string_param("sched", opt_sched);
@@ -82,7 +84,8 @@
     int i;
 
     SCHED_OP(free_task, d);
-    for (i = 0; i < MAX_VIRT_CPUS; i++)
+    /* vcpu 0 has to be the last one destructed. */
+    for (i = MAX_VIRT_CPUS-1; i >= 0; i--)
         if ( d->vcpu[i] )
             arch_free_vcpu_struct(d->vcpu[i]);
 
@@ -295,10 +298,36 @@
     return 0;
 }
 
+static long do_vcpu_pickle(int vcpu, unsigned long arg)
+{
+    struct vcpu *v;
+    vcpu_guest_context_t *c;
+    int ret = 0;
+
+    if (vcpu >= MAX_VIRT_CPUS)
+        return -EINVAL;
+    v = current->domain->vcpu[vcpu];
+    if (!v)
+        return -ESRCH;
+    /* Don't pickle vcpus which are currently running */
+    if (!test_bit(_VCPUF_down, &v->vcpu_flags)) {
+        return -EBUSY;
+    }
+    c = xmalloc(vcpu_guest_context_t);
+    if (!c)
+        return -ENOMEM;
+    arch_getdomaininfo_ctxt(v, c);
+    if (copy_to_user((vcpu_guest_context_t *)arg,
+                     (const vcpu_guest_context_t *)c, sizeof(*c)))
+        ret = -EFAULT;
+    xfree(c);
+    return ret;
+}
+
 /*
  * Demultiplex scheduler-related hypercalls.
  */
-long do_sched_op(unsigned long op)
+long do_sched_op(unsigned long op, unsigned long arg)
 {
     long ret = 0;
 
@@ -332,6 +361,11 @@
     case SCHEDOP_vcpu_up:
     {
         ret = do_vcpu_up((int)(op >> SCHEDOP_vcpushift));
+        break;
+    }
+    case SCHEDOP_vcpu_pickle:
+    {
+        ret = do_vcpu_pickle((int)(op >> SCHEDOP_vcpushift), arg);
         break;
     }
 
diff -r 6c8c3df37bfe -r 87dec3b9c546 xen/include/public/xen.h
--- a/xen/include/public/xen.h  Fri Aug 19 15:21:12 2005
+++ b/xen/include/public/xen.h  Fri Aug 19 15:22:05 2005
@@ -203,6 +203,7 @@
 #define SCHEDOP_shutdown        2   /* Stop executing this domain.        */
 #define SCHEDOP_vcpu_down       3   /* make target VCPU not-runnable.     */
 #define SCHEDOP_vcpu_up         4   /* make target VCPU runnable.         */
+#define SCHEDOP_vcpu_pickle     5   /* save a vcpu's context to memory.   */
 #define SCHEDOP_cmdmask       255   /* 8-bit command. */
 #define SCHEDOP_reasonshift     8   /* 8-bit reason code. (SCHEDOP_shutdown) */
 #define SCHEDOP_vcpushift       8   /* 8-bit VCPU target. (SCHEDOP_up|down) */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.