[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] New dom0_op to set max vcpus for a domain.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 52b9aca1916ace16c60955ee598bfc29ba45b9a6
# Parent  29db5bded574aa378a15f85a4ceb5653c3ed1837
New dom0_op to set max vcpus for a domain.

Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 29db5bded574 -r 52b9aca1916a tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Wed Oct 12 15:15:02 2005
+++ b/tools/libxc/xc_domain.c   Wed Oct 12 15:34:49 2005
@@ -329,6 +329,15 @@
     return err;
 }
 
+int xc_domain_max_vcpus(int xc_handle, uint32_t domid, unsigned int max)
+{
+    dom0_op_t op;
+    op.cmd = DOM0_MAX_VCPUS;
+    op.u.max_vcpus.domain = (domid_t)domid;
+    op.u.max_vcpus.max    = max;
+    return do_dom0_op(xc_handle, &op);
+}
+
 /*
  * Local variables:
  * mode: C
diff -r 29db5bded574 -r 52b9aca1916a tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Wed Oct 12 15:15:02 2005
+++ b/tools/libxc/xenctrl.h     Wed Oct 12 15:34:49 2005
@@ -147,6 +147,17 @@
                        uint32_t domid,
                        const char *corename);
 
+/*
+ * This function sets the maximum number vcpus that a domian may create.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface.
+ * @parm domid the domain id in which vcpus are to be created.
+ * @parm max the maximum number of vcpus that the domain may create.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_max_vcpus(int xc_handle,
+                        uint32_t domid, 
+                        unsigned int max);
 
 /**
  * This function pauses a domain. A paused domain still exists in memory
diff -r 29db5bded574 -r 52b9aca1916a tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Wed Oct 12 15:15:02 2005
+++ b/tools/python/xen/lowlevel/xc/xc.c Wed Oct 12 15:34:49 2005
@@ -91,6 +91,26 @@
         return PyErr_SetFromErrno(xc_error);
 
     return PyInt_FromLong(dom);
+}
+
+static PyObject *pyxc_domain_max_vcpus(PyObject *self,
+                                            PyObject *args,
+                                            PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+
+    uint32_t dom, max;
+
+    static char *kwd_list[] = { "dom", "max", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, &dom, &max) )
+        return NULL;
+
+    if ( xc_domain_max_vcpus(xc->xc_handle, dom, max) != 0 )
+        return PyErr_SetFromErrno(xc_error);
+    
+    Py_INCREF(zero);
+    return zero;
 }
 
 static PyObject *pyxc_domain_pause(PyObject *self,
@@ -783,6 +803,14 @@
       " dom    [int, 0]:        Domain identifier to use (allocated if 
zero).\n"
       "Returns: [int] new domain identifier; -1 on error.\n" },
 
+    { "domain_max_vcpus", 
+      (PyCFunction)pyxc_domain_max_vcpus,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Set the maximum number of VCPUs a domain may create.\n"
+      " dom       [int, 0]:      Domain identifier to use.\n"
+      " max     [int, 0]:      New maximum number of VCPUs in domain.\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
     { "domain_dumpcore", 
       (PyCFunction)pyxc_domain_dumpcore, 
       METH_VARARGS | METH_KEYWORDS, "\n"
diff -r 29db5bded574 -r 52b9aca1916a tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Wed Oct 12 15:15:02 2005
+++ b/tools/python/xen/xend/XendDomainInfo.py   Wed Oct 12 15:34:49 2005
@@ -1030,6 +1030,10 @@
             self.image.handleBootloading()
 
         xc.domain_setcpuweight(self.domid, self.info['cpu_weight'])
+
+        # Set maximum number of vcpus in domain
+        xc.domain_max_vcpus(self.domid, int(self.info['vcpus']));
+
         # XXX Merge with configure_maxmem?
         m = self.image.getDomainMemory(self.info['memory_KiB'])
         xc.domain_setmaxmem(self.domid, m)
diff -r 29db5bded574 -r 52b9aca1916a xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed Oct 12 15:15:02 2005
+++ b/xen/arch/ia64/xen/domain.c        Wed Oct 12 15:34:49 2005
@@ -149,14 +149,24 @@
        continue_cpu_idle_loop();
 }
 
-struct vcpu *arch_alloc_vcpu_struct(void)
-{
-       /* Per-vp stack is used here. So we need keep vcpu
-        * same page as per-vp stack */
-       return alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER);
-}
-
-void arch_free_vcpu_struct(struct vcpu *v)
+struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
+{
+       struct vcpu *v;
+
+       if ((v = alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER)) == NULL)
+               return NULL;
+
+       memset(v, 0, sizeof(*v)); 
+        memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
+       v->arch.privregs = 
+               alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+       printf("arch_vcpu_info=%p\n", v->arch.privregs);
+       memset(v->arch.privregs, 0, PAGE_SIZE);
+
+       return v;
+}
+
+void free_vcpu_struct(struct vcpu *v)
 {
        free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
 }
@@ -194,12 +204,6 @@
                while (1);
        }
        memset(d->shared_info, 0, PAGE_SIZE);
-#if 0
-       d->vcpu[0].arch.privregs = 
-                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
-       printf("arch_vcpu_info=%p\n", d->vcpu[0].arch.privregs);
-       memset(d->vcpu.arch.privregs, 0, PAGE_SIZE);
-#endif
        v->vcpu_info = &(d->shared_info->vcpu_data[0]);
 
        d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
@@ -249,13 +253,6 @@
        printf("arch_getdomaininfo_ctxt\n");
        c->regs = *regs;
        c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector;
-#if 0
-       if (c->vcpu.privregs && copy_to_user(c->vcpu.privregs,
-                       v->vcpu_info->arch.privregs, sizeof(mapped_regs_t))) {
-               printk("Bad ctxt address: 0x%lx\n", c->vcpu.privregs);
-               return -EFAULT;
-       }
-#endif
 
        c->shared = v->domain->shared_info->arch;
 }
@@ -279,12 +276,7 @@
 
            vmx_setup_platform(v, c);
        }
-    else{
-       v->arch.privregs =
-                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
-           printf("arch_vcpu_info=%p\n", v->arch.privregs);
-       memset(v->arch.privregs, 0, PAGE_SIZE);
-    }
+
        *regs = c->regs;
        new_thread(v, regs->cr_iip, 0, 0);
 
@@ -302,18 +294,6 @@
        /* Don't redo final setup */
        set_bit(_VCPUF_initialised, &v->vcpu_flags);
        return 0;
-}
-
-void arch_do_boot_vcpu(struct vcpu *v)
-{
-       struct domain *d = v->domain;
-       printf("arch_do_boot_vcpu: not implemented\n");
-
-       d->vcpu[v->vcpu_id]->arch.privregs = 
-                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
-       printf("arch_vcpu_info=%p\n", d->vcpu[v->vcpu_id]->arch.privregs);
-       memset(d->vcpu[v->vcpu_id]->arch.privregs, 0, PAGE_SIZE);
-       return;
 }
 
 void domain_relinquish_resources(struct domain *d)
@@ -824,12 +804,12 @@
        unsigned long ret, progress = 0;
 
 //printf("construct_dom0: starting\n");
+
+#ifndef CLONE_DOMAIN0
        /* Sanity! */
-#ifndef CLONE_DOMAIN0
-       if ( d != dom0 ) 
-           BUG();
-       if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 
-           BUG();
+       BUG_ON(d != dom0);
+       BUG_ON(d->vcpu[0] == NULL);
+       BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
 #endif
 
        memset(&dsi, 0, sizeof(struct domain_setup_info));
@@ -992,14 +972,8 @@
        printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
        if (vmx_dom0)
            vmx_final_setup_domain(dom0);
-    else{
-       d->vcpu[0]->arch.privregs = 
-                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
-           printf("arch_vcpu_info=%p\n", d->vcpu[0]->arch.privregs);
-       memset(d->vcpu[0]->arch.privregs, 0, PAGE_SIZE);
-    }
-
-       set_bit(_DOMF_constructed, &d->domain_flags);
+
+       set_bit(_VCPUF_initialised, &v->vcpu_flags);
 
        new_thread(v, pkern_entry, 0, 0);
        physdev_init_dom0(d);
@@ -1031,7 +1005,7 @@
        unsigned long pkern_entry;
 
 #ifndef DOMU_AUTO_RESTART
-       if ( test_bit(_DOMF_constructed, &d->domain_flags) ) BUG();
+       BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
 #endif
 
        printk("*** LOADING DOMAIN %d ***\n",d->domain_id);
@@ -1051,7 +1025,7 @@
        loaddomainelfimage(d,image_start);
        printk("loaddomainelfimage returns\n");
 
-       set_bit(_DOMF_constructed, &d->domain_flags);
+       set_bit(_VCPUF_initialised, &v->vcpu_flags);
 
        printk("calling new_thread, entry=%p\n",pkern_entry);
 #ifdef DOMU_AUTO_RESTART
diff -r 29db5bded574 -r 52b9aca1916a xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Oct 12 15:15:02 2005
+++ b/xen/arch/x86/domain.c     Wed Oct 12 15:34:49 2005
@@ -212,21 +212,35 @@
            page->u.inuse.type_info);
 }
 
-struct vcpu *arch_alloc_vcpu_struct(void)
-{
-    return xmalloc(struct vcpu);
-}
-
-/* We assume that vcpu 0 is always the last one to be freed in a
-   domain i.e. if v->vcpu_id == 0, the domain should be
-   single-processor. */
-void arch_free_vcpu_struct(struct vcpu *v)
-{
-    struct vcpu *p;
-    for_each_vcpu(v->domain, p) {
-        if (p->next_in_list == v)
-            p->next_in_list = v->next_in_list;
-    }
+struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
+{
+    struct vcpu *v;
+
+    if ( (v = xmalloc(struct vcpu)) == NULL )
+        return NULL;
+
+    memset(v, 0, sizeof(*v));
+
+    memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
+    v->arch.flags = TF_kernel_mode;
+
+    if ( (v->vcpu_id = vcpu_id) != 0 )
+    {
+        v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail;
+        v->arch.perdomain_ptes =
+            d->arch.mm_perdomain_pt + (vcpu_id << PDPT_VCPU_SHIFT);
+        v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
+            l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+    }
+
+    return v;
+}
+
+void free_vcpu_struct(struct vcpu *v)
+{
+    BUG_ON(v->next_in_list != NULL);
+    if ( v->vcpu_id != 0 )
+        v->domain->vcpu[v->vcpu_id - 1]->next_in_list = NULL;
     xfree(v);
 }
 
@@ -242,8 +256,6 @@
 void arch_do_createdomain(struct vcpu *v)
 {
     struct domain *d = v->domain;
-
-    v->arch.flags = TF_kernel_mode;
 
     if ( is_idle_task(d) )
         return;
@@ -291,20 +303,6 @@
     INIT_LIST_HEAD(&d->arch.free_shadow_frames);
 }
 
-void arch_do_boot_vcpu(struct vcpu *v)
-{
-    struct domain *d = v->domain;
-
-    v->arch.flags = TF_kernel_mode;
-
-    v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail;
-
-    v->arch.perdomain_ptes =
-        d->arch.mm_perdomain_pt + (v->vcpu_id << PDPT_VCPU_SHIFT);
-    v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
-        l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
-}
-
 void vcpu_migrate_cpu(struct vcpu *v, int newcpu)
 {
     if ( v->processor == newcpu )
diff -r 29db5bded574 -r 52b9aca1916a xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Wed Oct 12 15:15:02 2005
+++ b/xen/arch/x86/domain_build.c       Wed Oct 12 15:34:49 2005
@@ -14,6 +14,7 @@
 #include <xen/event.h>
 #include <xen/elf.h>
 #include <xen/kernel.h>
+#include <xen/domain.h>
 #include <asm/regs.h>
 #include <asm/system.h>
 #include <asm/io.h>
@@ -146,10 +147,9 @@
         struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn);
 
     /* Sanity! */
-    if ( d->domain_id != 0 ) 
-        BUG();
-    if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 
-        BUG();
+    BUG_ON(d->domain_id != 0);
+    BUG_ON(d->vcpu[0] == NULL);
+    BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
 
     memset(&dsi, 0, sizeof(struct domain_setup_info));
     dsi.image_addr = (unsigned long)image_start;
@@ -559,6 +559,9 @@
         d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
     d->shared_info->n_vcpu = num_online_cpus();
 
+    for ( i = 1; i < d->shared_info->n_vcpu; i++ )
+        (void)alloc_vcpu(d, i);
+
     /* Set up monitor table */
     update_pagetables(v);
 
@@ -657,7 +660,7 @@
 
     init_domain_time(d);
 
-    set_bit(_DOMF_constructed, &d->domain_flags);
+    set_bit(_VCPUF_initialised, &v->vcpu_flags);
 
     new_thread(v, dsi.v_kernentry, vstack_end, vstartinfo_start);
 
diff -r 29db5bded574 -r 52b9aca1916a xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Oct 12 15:15:02 2005
+++ b/xen/arch/x86/mm.c Wed Oct 12 15:34:49 2005
@@ -185,7 +185,7 @@
      * Any Xen-heap pages that we will allow to be mapped will have
      * their domain field set to dom_xen.
      */
-    dom_xen = alloc_domain_struct();
+    dom_xen = alloc_domain();
     atomic_set(&dom_xen->refcnt, 1);
     dom_xen->domain_id = DOMID_XEN;
 
@@ -194,7 +194,7 @@
      * This domain owns I/O pages that are within the range of the pfn_info
      * array. Mappings occur at the priv of the caller.
      */
-    dom_io = alloc_domain_struct();
+    dom_io = alloc_domain();
     atomic_set(&dom_io->refcnt, 1);
     dom_io->domain_id = DOMID_IO;
 
diff -r 29db5bded574 -r 52b9aca1916a xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Wed Oct 12 15:15:02 2005
+++ b/xen/arch/x86/vmx.c        Wed Oct 12 15:34:49 2005
@@ -1674,7 +1674,7 @@
             store_cpu_user_regs(&regs);
             __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, PENDING_DEBUG_EXC_BS);
 
-            set_bit(_VCPUF_ctrl_pause, &current->vcpu_flags);
+            domain_pause_for_debugger();
             do_sched_op(SCHEDOP_yield);
 
             break;
diff -r 29db5bded574 -r 52b9aca1916a xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c     Wed Oct 12 15:15:02 2005
+++ b/xen/common/dom0_ops.c     Wed Oct 12 15:34:49 2005
@@ -44,28 +44,24 @@
     struct vcpu   *v;
     u64 cpu_time = 0;
     int vcpu_count = 0;
-    int flags = DOMFLAGS_PAUSED | DOMFLAGS_BLOCKED;
+    int flags = DOMFLAGS_BLOCKED;
     
     info->domain = d->domain_id;
     
     memset(&info->vcpu_to_cpu, -1, sizeof(info->vcpu_to_cpu));
     memset(&info->cpumap, 0, sizeof(info->cpumap));
-    
+
     /* 
-     * - domain is marked as paused or blocked only if all its vcpus 
-     *   are paused or blocked 
+     * - domain is marked as blocked only if all its vcpus are blocked
      * - domain is marked as running if any of its vcpus is running
      * - only map vcpus that aren't down.  Note, at some point we may
      *   wish to demux the -1 value to indicate down vs. not-ever-booted
-     *   
      */
     for_each_vcpu ( d, v ) {
         /* only map vcpus that are up */
         if ( !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
             info->vcpu_to_cpu[v->vcpu_id] = v->processor;
         info->cpumap[v->vcpu_id] = v->cpumap;
-        if ( !(v->vcpu_flags & VCPUF_ctrl_pause) )
-            flags &= ~DOMFLAGS_PAUSED;
         if ( !(v->vcpu_flags & VCPUF_blocked) )
             flags &= ~DOMFLAGS_BLOCKED;
         if ( v->vcpu_flags & VCPUF_running )
@@ -78,8 +74,9 @@
     info->n_vcpu = vcpu_count;
     
     info->flags = flags |
-        ((d->domain_flags & DOMF_dying)    ? DOMFLAGS_DYING    : 0) |
-        ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
+        ((d->domain_flags & DOMF_dying)      ? DOMFLAGS_DYING    : 0) |
+        ((d->domain_flags & DOMF_shutdown)   ? DOMFLAGS_SHUTDOWN : 0) |
+        ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED   : 0) |
         d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
 
     if (d->ssid != NULL)
@@ -97,6 +94,7 @@
     long ret = 0;
     dom0_op_t curop, *op = &curop;
     void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
+    static spinlock_t dom0_lock = SPIN_LOCK_UNLOCKED;
 
     if ( !IS_PRIV(current->domain) )
         return -EPERM;
@@ -109,6 +107,8 @@
 
     if ( acm_pre_dom0_op(op, &ssid) )
         return -EACCES;
+
+    spin_lock(&dom0_lock);
 
     switch ( op->cmd )
     {
@@ -150,7 +150,7 @@
         {
             ret = -EINVAL;
             if ( (d != current->domain) && 
-                 test_bit(_DOMF_constructed, &d->domain_flags) )
+                 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
             {
                 domain_unpause_by_systemcontroller(d);
                 ret = 0;
@@ -167,17 +167,14 @@
         domid_t        dom;
         struct vcpu   *v;
         unsigned int   i, cnt[NR_CPUS] = { 0 };
-        static spinlock_t alloc_lock = SPIN_LOCK_UNLOCKED;
         static domid_t rover = 0;
-
-        spin_lock(&alloc_lock);
 
         dom = op->u.createdomain.domain;
         if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
         {
             ret = -EINVAL;
             if ( !is_free_domid(dom) )
-                goto alloc_out;
+                break;
         }
         else
         {
@@ -191,7 +188,7 @@
 
             ret = -ENOMEM;
             if ( dom == rover )
-                goto alloc_out;
+                break;
 
             rover = dom;
         }
@@ -215,15 +212,51 @@
 
         ret = -ENOMEM;
         if ( (d = do_createdomain(dom, pro)) == NULL )
-            goto alloc_out;
+            break;
 
         ret = 0;
 
         op->u.createdomain.domain = d->domain_id;
         copy_to_user(u_dom0_op, op, sizeof(*op));
-
-    alloc_out:
-        spin_unlock(&alloc_lock);
+    }
+    break;
+
+    case DOM0_MAX_VCPUS:
+    {
+        struct domain *d;
+        unsigned int i, max = op->u.max_vcpus.max;
+
+        ret = -EINVAL;
+        if ( max > MAX_VIRT_CPUS )
+            break;
+
+        ret = -ESRCH;
+        if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL )
+            break;
+
+        /*
+         * Can only create new VCPUs while the domain is not fully constructed
+         * (and hence not runnable). Xen needs auditing for races before
+         * removing this check.
+         */
+        ret = -EINVAL;
+        if ( test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
+            goto maxvcpu_out;
+
+        /* We cannot reduce maximum VCPUs. */
+        ret = -EINVAL;
+        if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
+            goto maxvcpu_out;
+
+        ret = -ENOMEM;
+        for ( i = 0; i < max; i++ )
+            if ( (d->vcpu[i] == NULL) && (alloc_vcpu(d, i) == NULL) )
+                goto maxvcpu_out;
+
+        ret = 0;
+
+    maxvcpu_out:
+        put_domain(d);
     }
     break;
 
@@ -535,10 +568,14 @@
         ret = arch_do_dom0_op(op,u_dom0_op);
 
     }
+
+    spin_unlock(&dom0_lock);
+
     if (!ret)
         acm_post_dom0_op(op, ssid);
     else
         acm_fail_dom0_op(op, ssid);
+
     return ret;
 }
 
diff -r 29db5bded574 -r 52b9aca1916a xen/common/domain.c
--- a/xen/common/domain.c       Wed Oct 12 15:15:02 2005
+++ b/xen/common/domain.c       Wed Oct 12 15:34:49 2005
@@ -33,7 +33,7 @@
     struct domain *d, **pd;
     struct vcpu *v;
 
-    if ( (d = alloc_domain_struct()) == NULL )
+    if ( (d = alloc_domain()) == NULL )
         return NULL;
 
     v = d->vcpu[0];
@@ -52,12 +52,14 @@
 
     if ( d->domain_id == IDLE_DOMAIN_ID )
         set_bit(_DOMF_idle_domain, &d->domain_flags);
+    else
+        set_bit(_DOMF_ctrl_pause, &d->domain_flags);
 
     if ( !is_idle_task(d) &&
          ((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) )
     {
         evtchn_destroy(d);
-        free_domain_struct(d);
+        free_domain(d);
         return NULL;
     }
     
@@ -224,11 +226,9 @@
      * must issue a PAUSEDOMAIN command to ensure that all execution
      * has ceased and guest state is committed to memory.
      */
+    set_bit(_DOMF_ctrl_pause, &d->domain_flags);
     for_each_vcpu ( d, v )
-    {
-        set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
         vcpu_sleep_nosync(v);
-    }
 
     send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER);
 }
@@ -267,7 +267,7 @@
     free_perdomain_pt(d);
     free_xenheap_page(d->shared_info);
 
-    free_domain_struct(d);
+    free_domain(d);
 
     send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
 }
@@ -310,10 +310,11 @@
 {
     struct vcpu *v;
 
-    for_each_vcpu ( d, v )
-    {
-        BUG_ON(v == current);
-        if ( !test_and_set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
+    BUG_ON(current->domain == d);
+
+    if ( !test_and_set_bit(_DOMF_ctrl_pause, &d->domain_flags) )
+    {
+        for_each_vcpu ( d, v )
             vcpu_sleep_sync(v);
     }
 }
@@ -322,9 +323,9 @@
 {
     struct vcpu *v;
 
-    for_each_vcpu ( d, v )
-    {
-        if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
+    if ( test_and_clear_bit(_DOMF_ctrl_pause, &d->domain_flags) )
+    {
+        for_each_vcpu ( d, v )
             vcpu_wake(v);
     }
 }
@@ -345,61 +346,32 @@
     if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
         return -EINVAL;
     
-    if (test_bit(_DOMF_constructed, &d->domain_flags) && 
-        !test_bit(_VCPUF_ctrl_pause, &v->vcpu_flags))
+    if ( !test_bit(_DOMF_ctrl_pause, &d->domain_flags) )
         return -EINVAL;
 
     if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
         return -ENOMEM;
 
-    if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) )
-    {
-        rc = -EFAULT;
-        goto out;
-    }
-    
-    if ( (rc = arch_set_info_guest(v, c)) != 0 )
-        goto out;
-
-    set_bit(_DOMF_constructed, &d->domain_flags);
-
- out:    
+    rc = -EFAULT;
+    if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) == 0 )
+        rc = arch_set_info_guest(v, c);
+
     xfree(c);
     return rc;
 }
 
 int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt) 
 {
-    struct vcpu *v;
+    struct vcpu *v = d->vcpu[vcpuid];
     int rc;
 
-    ASSERT(d->vcpu[vcpuid] == NULL);
-
-    if ( alloc_vcpu_struct(d, vcpuid) == NULL )
-        return -ENOMEM;
-
-    v = d->vcpu[vcpuid];
-
-    atomic_set(&v->pausecnt, 0);
-    v->cpumap = CPUMAP_RUNANYWHERE;
-
-    memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
-
-    arch_do_boot_vcpu(v);
+    BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
 
     if ( (rc = arch_set_info_guest(v, ctxt)) != 0 )
-        goto out;
+        return rc;
 
     sched_add_domain(v);
 
-    set_bit(_VCPUF_down, &v->vcpu_flags);
-    clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
-
-    return 0;
-
- out:
-    arch_free_vcpu_struct(d->vcpu[vcpuid]);
-    d->vcpu[vcpuid] = NULL;
     return rc;
 }
 
@@ -413,7 +385,7 @@
     if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
         return -EINVAL;
 
-    if ( ((v = d->vcpu[vcpuid]) == NULL) && (cmd != VCPUOP_initialise) )
+    if ( (v = d->vcpu[vcpuid]) == NULL )
         return -ENOENT;
 
     switch ( cmd )
@@ -433,7 +405,9 @@
         }
 
         LOCK_BIGLOCK(d);
-        rc = (d->vcpu[vcpuid] == NULL) ? boot_vcpu(d, vcpuid, ctxt) : -EEXIST;
+        rc = -EEXIST;
+        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+            rc = boot_vcpu(d, vcpuid, ctxt);
         UNLOCK_BIGLOCK(d);
 
         xfree(ctxt);
diff -r 29db5bded574 -r 52b9aca1916a xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Wed Oct 12 15:15:02 2005
+++ b/xen/common/sched_sedf.c   Wed Oct 12 15:34:49 2005
@@ -1122,10 +1122,10 @@
 void sedf_wake(struct vcpu *d) {
     s_time_t              now = NOW();
     struct sedf_vcpu_info* inf = EDOM_INFO(d);
- 
+
     PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id,
           d->vcpu_id);
- 
+
     if (unlikely(is_idle_task(d->domain)))
         return;
    
diff -r 29db5bded574 -r 52b9aca1916a xen/common/schedule.c
--- a/xen/common/schedule.c     Wed Oct 12 15:15:02 2005
+++ b/xen/common/schedule.c     Wed Oct 12 15:34:49 2005
@@ -80,69 +80,59 @@
 /* Per-CPU periodic timer sends an event to the currently-executing domain. */
 static struct ac_timer t_timer[NR_CPUS]; 
 
-void free_domain_struct(struct domain *d)
+void free_domain(struct domain *d)
 {
     int i;
 
     SCHED_OP(free_task, d);
-    /* vcpu 0 has to be the last one destructed. */
-    for (i = MAX_VIRT_CPUS-1; i >= 0; i--)
-        if ( d->vcpu[i] )
-            arch_free_vcpu_struct(d->vcpu[i]);
+
+    for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
+        if ( d->vcpu[i] != NULL )
+            free_vcpu_struct(d->vcpu[i]);
 
     xfree(d);
 }
 
-struct vcpu *alloc_vcpu_struct(
-    struct domain *d, unsigned long vcpu)
-{
-    struct vcpu *v, *vc;
-
-    ASSERT( d->vcpu[vcpu] == NULL );
-
-    if ( (v = arch_alloc_vcpu_struct()) == NULL )
+struct vcpu *alloc_vcpu(struct domain *d, unsigned int vcpu_id)
+{
+    struct vcpu *v;
+
+    BUG_ON(d->vcpu[vcpu_id] != NULL);
+
+    if ( (v = alloc_vcpu_struct(d, vcpu_id)) == NULL )
         return NULL;
 
-    memset(v, 0, sizeof(*v));
-
-    d->vcpu[vcpu] = v;
     v->domain = d;
-    v->vcpu_id = vcpu;
+    v->vcpu_id = vcpu_id;
+    atomic_set(&v->pausecnt, 0);
+    v->cpumap = CPUMAP_RUNANYWHERE;
+
+    d->vcpu[vcpu_id] = v;
 
     if ( SCHED_OP(alloc_task, v) < 0 )
-        goto out;
-
-    if ( vcpu != 0 )
-    {
-        v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
-
-        for_each_vcpu( d, vc )
-        {
-            if ( (vc->next_in_list == NULL) ||
-                 (vc->next_in_list->vcpu_id > vcpu) )
-                break;
-        }
-        v->next_in_list  = vc->next_in_list;
-        vc->next_in_list = v;
-
-        if (test_bit(_VCPUF_cpu_pinned, &vc->vcpu_flags)) {
-            v->processor = (vc->processor + 1) % num_online_cpus();
-            set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
-        } else {
-            v->processor = (vc->processor + 1) % num_online_cpus();
-        }
-    }
+    {
+        d->vcpu[vcpu_id] = NULL;
+        free_vcpu_struct(v);
+        return NULL;
+    }
+
+    if ( vcpu_id == 0 )
+        return v;
+
+    v->vcpu_info = &d->shared_info->vcpu_data[vcpu_id];
+
+    d->vcpu[v->vcpu_id-1]->next_in_list = v;
+
+    v->processor = (d->vcpu[0]->processor + 1) % num_online_cpus();
+    if ( test_bit(_VCPUF_cpu_pinned, &d->vcpu[0]->vcpu_flags) )
+        set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
+
+    set_bit(_VCPUF_down, &v->vcpu_flags);
 
     return v;
-
- out:
-    d->vcpu[vcpu] = NULL;
-    arch_free_vcpu_struct(v);
-
-    return NULL;
-}
-
-struct domain *alloc_domain_struct(void)
+}
+
+struct domain *alloc_domain(void)
 {
     struct domain *d;
 
@@ -151,7 +141,7 @@
     
     memset(d, 0, sizeof(*d));
 
-    if ( alloc_vcpu_struct(d, 0) == NULL )
+    if ( alloc_vcpu(d, 0) == NULL )
         goto out;
 
     return d;
@@ -176,11 +166,6 @@
         schedule_data[v->processor].curr = v;
         schedule_data[v->processor].idle = v;
         set_bit(_VCPUF_running, &v->vcpu_flags);
-    }
-    else
-    {
-        /* Must be unpaused by control software to start execution. */
-        set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
     }
 
     SCHED_OP(add_task, v);
diff -r 29db5bded574 -r 52b9aca1916a xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h     Wed Oct 12 15:15:02 2005
+++ b/xen/include/public/dom0_ops.h     Wed Oct 12 15:34:49 2005
@@ -386,6 +386,13 @@
         int is_ram;
     } *memory_map;
 } dom0_physical_memory_map_t;
+
+#define DOM0_MAX_VCPUS 41
+typedef struct {
+    domid_t domain;             /* domain to be affected */
+    unsigned int max;           /* maximum number of vcpus */
+} dom0_max_vcpus_t;
+
 
 typedef struct {
     uint32_t cmd;
@@ -422,6 +429,7 @@
         dom0_getdomaininfolist_t getdomaininfolist;
         dom0_platform_quirk_t    platform_quirk;
         dom0_physical_memory_map_t physical_memory_map;
+        dom0_max_vcpus_t         max_vcpus;
     } u;
 } dom0_op_t;
 
diff -r 29db5bded574 -r 52b9aca1916a xen/include/xen/domain.h
--- a/xen/include/xen/domain.h  Wed Oct 12 15:15:02 2005
+++ b/xen/include/xen/domain.h  Wed Oct 12 15:34:49 2005
@@ -1,18 +1,19 @@
 
 #ifndef __XEN_DOMAIN_H__
 #define __XEN_DOMAIN_H__
+
+extern int boot_vcpu(
+    struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt);
 
 /*
  * Arch-specifics.
  */
 
-struct vcpu *arch_alloc_vcpu_struct(void);
+struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id);
 
-extern void arch_free_vcpu_struct(struct vcpu *v);
+extern void free_vcpu_struct(struct vcpu *v);
 
 extern void arch_do_createdomain(struct vcpu *v);
-
-extern void arch_do_boot_vcpu(struct vcpu *v);
 
 extern int  arch_set_info_guest(
     struct vcpu *v, struct vcpu_guest_context *c);
diff -r 29db5bded574 -r 52b9aca1916a xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Wed Oct 12 15:15:02 2005
+++ b/xen/include/xen/sched.h   Wed Oct 12 15:34:49 2005
@@ -61,7 +61,8 @@
     vcpu_info_t     *vcpu_info;
 
     struct domain   *domain;
-    struct vcpu *next_in_list;
+
+    struct vcpu     *next_in_list;
 
     struct ac_timer  timer;         /* one-shot timer for timeout values */
     unsigned long    sleep_tick;    /* tick at which this vcpu started sleep */
@@ -166,11 +167,10 @@
 #define IDLE_DOMAIN_ID   (0x7FFFU)
 #define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
 
-struct vcpu *alloc_vcpu_struct(struct domain *d,
-                                             unsigned long vcpu);
-
-void free_domain_struct(struct domain *d);
-struct domain *alloc_domain_struct();
+struct vcpu *alloc_vcpu(struct domain *d, unsigned int vcpu_id);
+
+struct domain *alloc_domain(void);
+void free_domain(struct domain *d);
 
 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
 #define put_domain(_d) \
@@ -327,13 +327,15 @@
 extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
 extern struct domain *domain_list;
 
-#define for_each_domain(_d) \
- for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_in_list )
-
-#define for_each_vcpu(_d,_ed) \
- for ( (_ed) = (_d)->vcpu[0]; \
-       (_ed) != NULL;                \
-       (_ed) = (_ed)->next_in_list )
+#define for_each_domain(_d)                     \
+ for ( (_d) = domain_list;                      \
+       (_d) != NULL;                            \
+       (_d) = (_d)->next_in_list )
+
+#define for_each_vcpu(_d,_v)                    \
+ for ( (_v) = (_d)->vcpu[0];                    \
+       (_v) != NULL;                            \
+       (_v) = (_v)->next_in_list )
 
 /*
  * Per-VCPU flags (vcpu_flags).
@@ -345,57 +347,55 @@
 #define _VCPUF_fpu_dirtied     1
 #define VCPUF_fpu_dirtied      (1UL<<_VCPUF_fpu_dirtied)
  /* Domain is blocked waiting for an event. */
-#define _VCPUF_blocked         3
+#define _VCPUF_blocked         2
 #define VCPUF_blocked          (1UL<<_VCPUF_blocked)
- /* Domain is paused by controller software. */
-#define _VCPUF_ctrl_pause      4
-#define VCPUF_ctrl_pause       (1UL<<_VCPUF_ctrl_pause)
  /* Currently running on a CPU? */
-#define _VCPUF_running         5
+#define _VCPUF_running         3
 #define VCPUF_running          (1UL<<_VCPUF_running)
  /* Disables auto-migration between CPUs. */
-#define _VCPUF_cpu_pinned      6
+#define _VCPUF_cpu_pinned      4
 #define VCPUF_cpu_pinned       (1UL<<_VCPUF_cpu_pinned)
  /* Domain migrated between CPUs. */
-#define _VCPUF_cpu_migrated    7
+#define _VCPUF_cpu_migrated    5
 #define VCPUF_cpu_migrated     (1UL<<_VCPUF_cpu_migrated)
  /* Initialization completed. */
-#define _VCPUF_initialised     8
+#define _VCPUF_initialised     6
 #define VCPUF_initialised      (1UL<<_VCPUF_initialised)
  /* VCPU is not-runnable */
-#define _VCPUF_down            9
+#define _VCPUF_down            7
 #define VCPUF_down             (1UL<<_VCPUF_down)
 
 /*
  * Per-domain flags (domain_flags).
  */
- /* Has the guest OS been fully built yet? */
-#define _DOMF_constructed      0
-#define DOMF_constructed       (1UL<<_DOMF_constructed)
  /* Is this one of the per-CPU idle domains? */
-#define _DOMF_idle_domain      1
+#define _DOMF_idle_domain      0
 #define DOMF_idle_domain       (1UL<<_DOMF_idle_domain)
  /* Is this domain privileged? */
-#define _DOMF_privileged       2
+#define _DOMF_privileged       1
 #define DOMF_privileged        (1UL<<_DOMF_privileged)
  /* May this domain do IO to physical devices? */
-#define _DOMF_physdev_access   3
+#define _DOMF_physdev_access   2
 #define DOMF_physdev_access    (1UL<<_DOMF_physdev_access)
  /* Guest shut itself down for some reason. */
-#define _DOMF_shutdown         4
+#define _DOMF_shutdown         3
 #define DOMF_shutdown          (1UL<<_DOMF_shutdown)
  /* Guest is in process of shutting itself down (becomes DOMF_shutdown). */
-#define _DOMF_shuttingdown     5
+#define _DOMF_shuttingdown     4
 #define DOMF_shuttingdown      (1UL<<_DOMF_shuttingdown)
  /* Death rattle. */
-#define _DOMF_dying            6
+#define _DOMF_dying            5
 #define DOMF_dying             (1UL<<_DOMF_dying)
+ /* Domain is paused by controller software. */
+#define _DOMF_ctrl_pause       6
+#define DOMF_ctrl_pause        (1UL<<_DOMF_ctrl_pause)
 
 static inline int domain_runnable(struct vcpu *v)
 {
     return ( (atomic_read(&v->pausecnt) == 0) &&
-             !(v->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause|VCPUF_down)) &&
-             !(v->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
+             !(v->vcpu_flags & (VCPUF_blocked|VCPUF_down)) &&
+             !(v->domain->domain_flags &
+               (DOMF_shutdown|DOMF_shuttingdown|DOMF_ctrl_pause)) );
 }
 
 void vcpu_pause(struct vcpu *v);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.