[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
* Ryan Harper <ryanh@xxxxxxxxxx> [2005-05-10 10:01]: > * Mike Wray <mike.wray@xxxxxx> [2005-05-10 09:08]: > > So it looks like we need to try again. > > Apologies if this is because of changes after > > you submitted the patch. > > > > I also had two failed hunks, which I fixed manually: > > > > 1 out of 1 hunk FAILED -- saving rejects to file xen/arch/x86/domain.c.rej > > 1 out of 5 hunks FAILED -- saving rejects to file xen/common/dom0_ops.c.rej > > I'll update it against current unstable and resubmit. Thanks. Updated against 2005-05-10 nightly unstable snapshot. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@xxxxxxxxxx diffstat output: tools/libxc/xc.h | 6 ++ tools/libxc/xc_domain.c | 16 +++++- tools/python/xen/lowlevel/xc/xc.c | 71 ++++++++++++++++++------------ tools/python/xen/xend/XendClient.py | 5 +- tools/python/xen/xend/XendDomain.py | 11 ++-- tools/python/xen/xend/XendDomainInfo.py | 6 ++ tools/python/xen/xend/server/SrvDomain.py | 3 - tools/python/xen/xend/server/SrvUsbif.py | 1 tools/python/xen/xm/main.py | 67 +++++++++++++++++++++++----- xen/arch/x86/domain.c | 1 xen/common/dom0_ops.c | 38 +++++++++++++--- xen/common/domain.c | 1 xen/include/public/dom0_ops.h | 7 ++ xen/include/public/xen.h | 2 xen/include/xen/sched.h | 4 + 15 files changed, 179 insertions(+), 60 deletions(-) Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx> --- diff -urN a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c 2005-05-09 22:11:33.000000000 -0500 +++ b/tools/libxc/xc_domain.c 2005-05-10 10:13:18.886383808 -0500 @@ -16,6 +16,8 @@ { int err, errno_saved; dom0_op_t op; + u32 vcpu = 0; /* FIXME, hard coded initial pin to vcpu 0 */ + cpumap_t cpumap = 1<<cpu; op.cmd = DOM0_CREATEDOMAIN; op.u.createdomain.domain = (domid_t)*pdomid; @@ -25,7 +27,7 @@ *pdomid = (u16)op.u.createdomain.domain; if ( (cpu != -1) && - ((err = xc_domain_pincpu(xc_handle, *pdomid, cpu)) != 0) ) + ((err = xc_domain_pincpu(xc_handle, *pdomid, vcpu, &cpumap)) != 0) ) goto fail; if ( (err = xc_domain_setcpuweight(xc_handle, *pdomid, cpu_weight)) != 0 ) @@ -84,13 +86,14 @@ int xc_domain_pincpu(int xc_handle, u32 domid, - int cpu) + int vcpu, + cpumap_t *cpumap) { dom0_op_t op; op.cmd = DOM0_PINCPUDOMAIN; op.u.pincpudomain.domain = (domid_t)domid; - op.u.pincpudomain.exec_domain = 0; - op.u.pincpudomain.cpu = cpu; + op.u.pincpudomain.exec_domain = vcpu; + op.u.pincpudomain.cpumap = cpumap; return do_dom0_op(xc_handle, &op); } @@ -133,6 +136,11 @@ info->max_memkb = op.u.getdomaininfo.max_pages<<(PAGE_SHIFT); info->shared_info_frame = op.u.getdomaininfo.shared_info_frame; info->cpu_time = op.u.getdomaininfo.cpu_time; + info->vcpus = op.u.getdomaininfo.n_vcpu; + memcpy(info->vcpu_to_cpu, &op.u.getdomaininfo.vcpu_to_cpu, + MAX_VIRT_CPUS*sizeof(u32)); + memcpy(info->cpumap, &op.u.getdomaininfo.cpumap, + MAX_VIRT_CPUS*sizeof(cpumap_t)); next_domid = (u16)op.u.getdomaininfo.domain + 1; info++; diff -urN a/tools/libxc/xc.h b/tools/libxc/xc.h --- a/tools/libxc/xc.h 2005-05-09 22:11:29.000000000 -0500 +++ b/tools/libxc/xc.h 2005-05-10 10:13:18.895382440 -0500 @@ -111,6 +111,7 @@ typedef struct { u32 domid; unsigned int cpu; + unsigned int vcpus; unsigned int dying:1, crashed:1, shutdown:1, paused:1, blocked:1, running:1; unsigned int shutdown_reason; /* only meaningful if shutdown==1 */ @@ -118,6 +119,8 @@ unsigned long shared_info_frame; u64 cpu_time; unsigned long max_memkb; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; + cpumap_t cpumap[MAX_VIRT_CPUS]; } xc_dominfo_t; typedef dom0_getdomaininfo_t xc_domaininfo_t; @@ -167,7 +170,8 @@ u32 domid); int xc_domain_pincpu(int xc_handle, u32 domid, - int cpu); + int vcpu, + cpumap_t *cpumap); /** * This function will return information about one or more domains. * diff -urN a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c 2005-05-09 22:11:27.000000000 -0500 +++ b/tools/python/xen/lowlevel/xc/xc.c 2005-05-10 10:13:18.922378336 -0500 @@ -155,15 +155,16 @@ XcObject *xc = (XcObject *)self; u32 dom; - int cpu = -1; + int vcpu = 0; + cpumap_t cpumap = 0xFFFFFFFF; - static char *kwd_list[] = { "dom", "cpu", NULL }; + static char *kwd_list[] = { "dom", "vcpu", "cpumap", NULL }; - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, - &dom, &cpu) ) + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|ii", kwd_list, + &dom, &vcpu, &cpumap) ) return NULL; - if ( xc_domain_pincpu(xc->xc_handle, dom, cpu) != 0 ) + if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, &cpumap) != 0 ) return PyErr_SetFromErrno(xc_error); Py_INCREF(zero); @@ -175,10 +176,10 @@ PyObject *kwds) { XcObject *xc = (XcObject *)self; - PyObject *list; + PyObject *list, *vcpu_list, *cpumap_list, *info_dict; u32 first_dom = 0; - int max_doms = 1024, nr_doms, i; + int max_doms = 1024, nr_doms, i, j; xc_dominfo_t *info; static char *kwd_list[] = { "first_dom", "max_doms", NULL }; @@ -195,23 +196,34 @@ list = PyList_New(nr_doms); for ( i = 0 ; i < nr_doms; i++ ) { - PyList_SetItem( - list, i, - Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" - ",s:l,s:L,s:l,s:i}", - "dom", info[i].domid, - "cpu", info[i].cpu, - "dying", info[i].dying, - "crashed", info[i].crashed, - "shutdown", info[i].shutdown, - "paused", info[i].paused, - "blocked", info[i].blocked, - "running", info[i].running, - "mem_kb", info[i].nr_pages*4, - "cpu_time", info[i].cpu_time, - "maxmem_kb", info[i].max_memkb, - "shutdown_reason", info[i].shutdown_reason - )); + vcpu_list = PyList_New(MAX_VIRT_CPUS); + cpumap_list = PyList_New(MAX_VIRT_CPUS); + for ( j = 0; j < MAX_VIRT_CPUS; j++ ) { + PyList_SetItem( vcpu_list, j, + Py_BuildValue("i", info[i].vcpu_to_cpu[j])); + PyList_SetItem( cpumap_list, j, + Py_BuildValue("i", info[i].cpumap[j])); + } + + info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" + ",s:l,s:L,s:l,s:i}", + "dom", info[i].domid, + "cpu", info[i].cpu, + "vcpus", info[i].vcpus, + "dying", info[i].dying, + "crashed", info[i].crashed, + "shutdown", info[i].shutdown, + "paused", info[i].paused, + "blocked", info[i].blocked, + "running", info[i].running, + "mem_kb", info[i].nr_pages*4, + "cpu_time", info[i].cpu_time, + "maxmem_kb", info[i].max_memkb, + "shutdown_reason", info[i].shutdown_reason); + PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list ); + PyDict_SetItemString( info_dict, "cpumap", cpumap_list ); + PyList_SetItem( list, i, info_dict); + } free(info); @@ -959,9 +971,10 @@ { "domain_pincpu", (PyCFunction)pyxc_domain_pincpu, METH_VARARGS | METH_KEYWORDS, "\n" - "Pin a domain to a specified CPU.\n" - " dom [int]: Identifier of domain to be pinned.\n" - " cpu [int, -1]: CPU to pin to, or -1 to unpin\n\n" + "Pin a VCPU to a specified set CPUs.\n" + " dom [int]: Identifier of domain to which VCPU belongs.\n" + " vcpu [int, 0]: VCPU being pinned.\n" + " cpumap [int, -1]: Bitmap of usable CPUs.\n\n" "Returns: [int] 0 on success; -1 on error.\n" }, { "domain_getinfo", @@ -976,6 +989,7 @@ " domain-id space was reached.\n" " dom [int]: Identifier of domain to which this info pertains\n" " cpu [int]: CPU to which this domain is bound\n" + " vcpus [int]: Number of Virtual CPUS in this domain\n" " dying [int]: Bool - is the domain dying?\n" " crashed [int]: Bool - has the domain crashed?\n" " shutdown [int]: Bool - has the domain shut itself down?\n" @@ -986,7 +1000,8 @@ " maxmem_kb [int]: Maximum memory limit, in kilobytes\n" " cpu_time [long]: CPU time consumed, in nanoseconds\n" " shutdown_reason [int]: Numeric code from guest OS, explaining " - "reason why it shut itself down.\n" }, + "reason why it shut itself down.\n" + " vcpu_to_cpu [[int]]: List that maps VCPUS to CPUS\n" }, { "linux_save", (PyCFunction)pyxc_linux_save, diff -urN a/tools/python/xen/xend/server/SrvDomain.py b/tools/python/xen/xend/server/SrvDomain.py --- a/tools/python/xen/xend/server/SrvDomain.py 2005-05-09 22:11:31.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvDomain.py 2005-05-10 10:13:18.931376968 -0500 @@ -92,7 +92,8 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [['dom', 'str'], - ['cpu', 'int']]) + ['vcpu', 'int'], + ['cpumap', 'int']]) val = fn(req.args, {'dom': self.dom.id}) return val diff -urN a/tools/python/xen/xend/server/SrvUsbif.py b/tools/python/xen/xend/server/SrvUsbif.py --- a/tools/python/xen/xend/server/SrvUsbif.py 2005-05-09 22:11:36.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvUsbif.py 2005-05-10 10:13:18.932376816 -0500 @@ -107,6 +107,7 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [['dom', 'str'], + ['vcpu', 'int'], ['cpu', 'int']]) val = fn(req.args, {'dom': self.dom.id}) return val diff -urN a/tools/python/xen/xend/XendClient.py b/tools/python/xen/xend/XendClient.py --- a/tools/python/xen/xend/XendClient.py 2005-05-09 22:11:25.000000000 -0500 +++ b/tools/python/xen/xend/XendClient.py 2005-05-10 10:13:18.947374536 -0500 @@ -246,10 +246,11 @@ 'live' : live, 'resource' : resource }) - def xend_domain_pincpu(self, id, cpu): + def xend_domain_pincpu(self, id, vcpu, cpumap): return self.xendPost(self.domainurl(id), {'op' : 'pincpu', - 'cpu' : cpu }) + 'vcpu' : vcpu, + 'cpumap' : cpumap }) def xend_domain_cpu_bvt_set(self, id, mcuadv, warpback, warpvalue, warpl, warpu): return self.xendPost(self.domainurl(id), diff -urN a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py 2005-05-09 22:11:27.000000000 -0500 +++ b/tools/python/xen/xend/XendDomainInfo.py 2005-05-10 10:13:18.958372864 -0500 @@ -372,6 +372,10 @@ sxpr.append(['shutdown_reason', reason]) sxpr.append(['cpu', self.info['cpu']]) sxpr.append(['cpu_time', self.info['cpu_time']/1e9]) + sxpr.append(['vcpus', self.info['vcpus']]) + sxpr.append(['cpumap', self.info['cpumap']]) + sxpr.append(['vcpu_to_cpu', ''.join(map(lambda x: str(x), + self.info['vcpu_to_cpu'][0:self.info['vcpus']]))]) if self.start_time: up_time = time.time() - self.start_time @@ -449,7 +453,7 @@ raise VmError('missing memory size') cpu = sxp.child_value(config, 'cpu') if self.recreate and self.dom and cpu is not None: - xc.domain_pincpu(self.dom, int(cpu)) + xc.domain_pincpu(self.dom, 0, 1<<int(cpu)) try: image = sxp.child_value(self.config, 'image') self.vcpus = int(sxp.child_value(image, 'vcpus')) diff -urN a/tools/python/xen/xend/XendDomain.py b/tools/python/xen/xend/XendDomain.py --- a/tools/python/xen/xend/XendDomain.py 2005-05-09 22:11:33.000000000 -0500 +++ b/tools/python/xen/xend/XendDomain.py 2005-05-10 10:13:18.960372560 -0500 @@ -612,15 +612,16 @@ xmigrate = XendMigrate.instance() return xmigrate.save_begin(dominfo, dst) - def domain_pincpu(self, id, cpu): - """Pin a domain to a cpu. + def domain_pincpu(self, id, vcpu, cpumap): + """Set which cpus vcpu can use - @param id: domain - @param cpu: cpu number + @param id: domain + @param vcpu: vcpu number + @param cpumap: bitmap of usbale cpus """ dominfo = self.domain_lookup(id) try: - return xc.domain_pincpu(int(dominfo.id), cpu) + return xc.domain_pincpu(int(dominfo.id), vcpu, cpumap) except Exception, ex: raise XendError(str(ex)) diff -urN a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py 2005-05-09 22:11:32.000000000 -0500 +++ b/tools/python/xen/xm/main.py 2005-05-10 10:13:18.970371040 -0500 @@ -6,6 +6,8 @@ import sys from getopt import getopt import socket +import warnings +warnings.filterwarnings('ignore', category=FutureWarning) from xen.xend import PrettyPrint from xen.xend import sxp @@ -340,8 +342,8 @@ name = "list" info = """List information about domains.""" - short_options = 'l' - long_options = ['long'] + short_options = 'lv' + long_options = ['long','vcpus'] def help(self, args): if help: @@ -350,11 +352,13 @@ Either all domains or the domains given. -l, --long Get more detailed information. + -v, --vcpus Show VCPU to CPU mapping. """ return def main(self, args): use_long = 0 + show_vcpus = 0 (options, params) = getopt(args[1:], self.short_options, self.long_options) @@ -362,6 +366,8 @@ for (k, v) in options: if k in ['-l', '--long']: use_long = 1 + if k in ['-v', '--vcpus']: + show_vcpus = 1 if n == 0: doms = server.xend_domains() @@ -371,11 +377,13 @@ if use_long: self.long_list(doms) + elif show_vcpus: + self.show_vcpus(doms) else: self.brief_list(doms) def brief_list(self, doms): - print 'Name Id Mem(MB) CPU State Time(s) Console' + print 'Name Id Mem(MB) CPU VCPU(s) State Time(s) Console' for dom in doms: info = server.xend_domain(dom) d = {} @@ -383,6 +391,7 @@ d['name'] = sxp.child_value(info, 'name', '??') d['mem'] = int(sxp.child_value(info, 'memory', '0')) d['cpu'] = int(sxp.child_value(info, 'cpu', '0')) + d['vcpus'] = int(sxp.child_value(info, 'vcpus', '0')) d['state'] = sxp.child_value(info, 'state', '??') d['cpu_time'] = float(sxp.child_value(info, 'cpu_time', '0')) console = sxp.child(info, 'console') @@ -390,9 +399,27 @@ d['port'] = sxp.child_value(console, 'console_port') else: d['port'] = '' - print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(state)5s %(cpu_time)7.1f %(port)4s" + print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(vcpus)5d %(state)5s %(cpu_time)7.1f %(port)4s" % d) + def show_vcpus(self, doms): + print 'Name Id VCPU CPU CPUMAP' + for dom in doms: + info = server.xend_domain(dom) + vcpu_to_cpu = sxp.child_value(info, 'vcpu_to_cpu', '?').replace('-','') + cpumap = sxp.child_value(info, 'cpumap', []) + mask = ((int(sxp.child_value(info, 'vcpus', '0')))**2) - 1 + count = 0 + for cpu in vcpu_to_cpu: + d = {} + d['name'] = sxp.child_value(info, 'name', '??') + d['dom'] = int(sxp.child_value(info, 'id', '-1')) + d['vcpu'] = int(count) + d['cpu'] = int(cpu) + d['cpumap'] = int(cpumap[count])&mask + count = count + 1 + print ("%(name)-16s %(dom)3d %(vcpu)4d %(cpu)3d 0x%(cpumap)x" % d) + def long_list(self, doms): for dom in doms: info = server.xend_domain(dom) @@ -474,17 +501,35 @@ class ProgPincpu(Prog): group = 'domain' name = "pincpu" - info = """Pin a domain to a cpu. """ + info = """Set which cpus a VCPU can use. """ def help(self, args): - print args[0],'DOM CPU' - print '\nPin domain DOM to cpu CPU.' + print args[0],'DOM VCPU CPUS' + print '\nSet which cpus VCPU in domain DOM can use.' + + # convert list of cpus to bitmap integer value + def make_map(self, cpulist): + cpus = [] + cpumap = 0 + for c in cpulist.split(','): + if len(c) > 1: + (x,y) = c.split('-') + for i in range(int(x),int(y)+1): + cpus.append(int(i)) + else: + cpus.append(int(c)) + cpus.sort() + for c in cpus: + cpumap = cpumap | 1<<c + + return cpumap def main(self, args): - if len(args) != 3: self.err("%s: Invalid argument(s)" % args[0]) - dom = args[1] - cpu = int(args[2]) - server.xend_domain_pincpu(dom, cpu) + if len(args) != 4: self.err("%s: Invalid argument(s)" % args[0]) + dom = args[1] + vcpu = int(args[2]) + cpumap = self.make_map(args[3]); + server.xend_domain_pincpu(dom, vcpu, cpumap) xm.prog(ProgPincpu) diff -urN a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c 2005-05-09 22:11:29.000000000 -0500 +++ b/xen/arch/x86/domain.c 2005-05-10 10:14:32.764152680 -0500 @@ -253,6 +253,7 @@ d->shared_info = (void *)alloc_xenheap_page(); memset(d->shared_info, 0, PAGE_SIZE); ed->vcpu_info = &d->shared_info->vcpu_data[ed->id]; + ed->cpumap = CPUMAP_RUNANYWHERE; SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); machine_to_phys_mapping[virt_to_phys(d->shared_info) >> PAGE_SHIFT] = INVALID_M2P_ENTRY; diff -urN a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c --- a/xen/common/dom0_ops.c 2005-05-09 22:11:33.000000000 -0500 +++ b/xen/common/dom0_ops.c 2005-05-10 10:32:11.615182856 -0500 @@ -221,7 +221,8 @@ domid_t dom = op->u.pincpudomain.domain; struct domain *d = find_domain_by_id(dom); struct exec_domain *ed; - int cpu = op->u.pincpudomain.cpu; + cpumap_t curmap, *cpumap = &curmap; + if ( d == NULL ) { @@ -229,6 +230,14 @@ break; } + if ( (op->u.pincpudomain.exec_domain >= MAX_VIRT_CPUS) || + !d->exec_domain[op->u.pincpudomain.exec_domain] ) + { + ret = -EINVAL; + put_domain(d); + break; + } + ed = d->exec_domain[op->u.pincpudomain.exec_domain]; if ( ed == NULL ) { @@ -244,17 +253,29 @@ break; } - if ( cpu == -1 ) + if ( copy_from_user(cpumap, + op->u.pincpudomain.cpumap, sizeof(*cpumap)) ) { - clear_bit(EDF_CPUPINNED, &ed->flags); + ret = -EFAULT; + put_domain(d); + break; } + + /* update cpumap for this ed */ + ed->cpumap = *(cpumap); + + if ( *(cpumap) == CPUMAP_RUNANYWHERE ) + clear_bit(EDF_CPUPINNED, &ed->flags); else { + /* pick a new cpu from the usable map */ + int new_cpu = (int)find_first_set_bit(*(cpumap)) % smp_num_cpus; + exec_domain_pause(ed); - if ( ed->processor != (cpu % smp_num_cpus) ) + if ( ed->processor != new_cpu ) set_bit(EDF_MIGRATED, &ed->flags); set_bit(EDF_CPUPINNED, &ed->flags); - ed->processor = cpu % smp_num_cpus; + ed->processor = new_cpu; exec_domain_unpause(ed); } @@ -308,6 +329,12 @@ break; } + memset(&op->u.getdomaininfo.vcpu_to_cpu,-1,MAX_VIRT_CPUS*sizeof(u8)); + for_each_exec_domain ( d, ed ) { + op->u.getdomaininfo.vcpu_to_cpu[ed->id] = ed->processor; + op->u.getdomaininfo.cpumap[ed->id] = ed->cpumap; + } + ed = d->exec_domain[op->u.getdomaininfo.exec_domain]; op->u.getdomaininfo.flags = @@ -325,6 +352,7 @@ op->u.getdomaininfo.tot_pages = d->tot_pages; op->u.getdomaininfo.max_pages = d->max_pages; op->u.getdomaininfo.cpu_time = ed->cpu_time; + op->u.getdomaininfo.n_vcpu = d->shared_info->n_vcpu; op->u.getdomaininfo.shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT; diff -urN a/xen/common/domain.c b/xen/common/domain.c --- a/xen/common/domain.c 2005-05-09 22:11:29.000000000 -0500 +++ b/xen/common/domain.c 2005-05-10 10:13:19.022363136 -0500 @@ -283,6 +283,7 @@ ed = d->exec_domain[vcpu]; atomic_set(&ed->pausecnt, 0); + ed->cpumap = CPUMAP_RUNANYWHERE; memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch)); diff -urN a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h --- a/xen/include/public/dom0_ops.h 2005-05-09 22:11:34.000000000 -0500 +++ b/xen/include/public/dom0_ops.h 2005-05-10 10:13:19.031361768 -0500 @@ -88,6 +88,9 @@ memory_t max_pages; memory_t shared_info_frame; /* MFN of shared_info struct */ u64 cpu_time; + u32 n_vcpu; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; /* current mapping */ + cpumap_t cpumap[MAX_VIRT_CPUS]; /* allowable mapping */ } dom0_getdomaininfo_t; #define DOM0_SETDOMAININFO 13 @@ -170,14 +173,14 @@ } dom0_readconsole_t; /* - * Pin Domain to a particular CPU (use -1 to unpin) + * Set which cpus an exec_domain can use */ #define DOM0_PINCPUDOMAIN 20 typedef struct { /* IN variables. */ domid_t domain; u16 exec_domain; - s32 cpu; /* -1 implies unpin */ + cpumap_t *cpumap; } dom0_pincpudomain_t; /* Get trace buffers machine base address */ diff -urN a/xen/include/public/xen.h b/xen/include/public/xen.h --- a/xen/include/public/xen.h 2005-05-09 22:11:28.000000000 -0500 +++ b/xen/include/public/xen.h 2005-05-10 10:13:19.041360248 -0500 @@ -473,6 +473,8 @@ /* For use in guest OSes. */ extern shared_info_t *HYPERVISOR_shared_info; +typedef u64 cpumap_t; + #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_XEN_H__ */ diff -urN a/xen/include/xen/sched.h b/xen/include/xen/sched.h --- a/xen/include/xen/sched.h 2005-05-09 22:11:27.000000000 -0500 +++ b/xen/include/xen/sched.h 2005-05-10 10:13:19.064356752 -0500 @@ -58,6 +58,8 @@ void destroy_event_channels(struct domain *d); int init_exec_domain_event_channels(struct exec_domain *ed); + +#define CPUMAP_RUNANYWHERE 0xFFFFFFFF struct exec_domain { int id; @@ -84,6 +86,8 @@ atomic_t pausecnt; + cpumap_t cpumap; /* which cpus this domain can run on */ + struct arch_exec_domain arch; }; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |