[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] tools: Obtain platform capabilities via physinfo sysctl.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1207069459 -3600 # Node ID 250606290439708fdcd745270e04aa2f38839403 # Parent 720552439f74fffe419339d8c30172522b8e4395 tools: Obtain platform capabilities via physinfo sysctl. Signed-off-by: Jean Guyader <jean.guyader@xxxxxxxxxxxxx> Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- tools/python/xen/lowlevel/xc/xc.c | 28 +++++++++++++++++----------- tools/python/xen/xend/XendNode.py | 3 +++ xen/arch/ia64/xen/dom0_ops.c | 2 +- xen/arch/powerpc/sysctl.c | 6 ++---- xen/arch/x86/sysctl.c | 14 +++++++++----- xen/include/public/sysctl.h | 11 +++++++++-- 6 files changed, 41 insertions(+), 23 deletions(-) diff -r 720552439f74 -r 250606290439 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Tue Apr 01 17:24:22 2008 +0100 +++ b/tools/python/xen/lowlevel/xc/xc.c Tue Apr 01 18:04:19 2008 +0100 @@ -762,11 +762,12 @@ static PyObject *pyxc_physinfo(XcObject { #define MAX_CPU_ID 255 xc_physinfo_t info; - char cpu_cap[128], *p=cpu_cap, *q=cpu_cap; + char cpu_cap[128], virt_caps[128], *p; int i, j, max_cpu_id; uint64_t free_heap; PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj; xc_cpu_to_node_t map[MAX_CPU_ID + 1]; + const char *virtcap_names[] = { "hvm", "hvm_directio" }; set_xen_guest_handle(info.cpu_to_node, map); info.max_cpu_id = MAX_CPU_ID; @@ -774,17 +775,21 @@ static PyObject *pyxc_physinfo(XcObject if ( xc_physinfo(self->xc_handle, &info) != 0 ) return pyxc_error_to_exception(); - *q = 0; + p = cpu_cap; + *p = '\0'; for ( i = 0; i < sizeof(info.hw_cap)/4; i++ ) - { p += sprintf(p, "%08x:", info.hw_cap[i]); - if ( info.hw_cap[i] ) - q = p; - } - if ( q > cpu_cap ) - *(q-1) = 0; - - ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}", + *(p-1) = 0; + + p = virt_caps; + *p = '\0'; + for ( i = 0; i < 2; i++ ) + if ( (info.capabilities >> i) & 1 ) + p += sprintf(p, "%s ", virtcap_names[i]); + if ( p != virt_caps ) + *(p-1) = '\0'; + + ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s:s:s}", "nr_nodes", info.nr_nodes, "max_cpu_id", info.max_cpu_id, "threads_per_core", info.threads_per_core, @@ -794,7 +799,8 @@ static PyObject *pyxc_physinfo(XcObject "free_memory", pages_to_kib(info.free_pages), "scrub_memory", pages_to_kib(info.scrub_pages), "cpu_khz", info.cpu_khz, - "hw_caps", cpu_cap); + "hw_caps", cpu_cap, + "virt_caps", virt_caps); max_cpu_id = info.max_cpu_id; if ( max_cpu_id > MAX_CPU_ID ) diff -r 720552439f74 -r 250606290439 tools/python/xen/xend/XendNode.py --- a/tools/python/xen/xend/XendNode.py Tue Apr 01 17:24:22 2008 +0100 +++ b/tools/python/xen/xend/XendNode.py Tue Apr 01 18:04:19 2008 +0100 @@ -92,6 +92,7 @@ class XendNode: physinfo = self.physinfo_dict() cpu_count = physinfo['nr_cpus'] cpu_features = physinfo['hw_caps'] + virt_caps = physinfo['virt_caps'] # If the number of CPUs don't match, we should just reinitialise # the CPU UUIDs. @@ -112,6 +113,7 @@ class XendNode: self.cpus[u].update( { 'host' : self.uuid, 'features' : cpu_features, + 'virt_caps': virt_caps, 'speed' : int(float(cpuinfo[number]['cpu MHz'])), 'vendor' : cpuinfo[number]['vendor_id'], 'modelname': cpuinfo[number]['model name'], @@ -605,6 +607,7 @@ class XendNode: 'threads_per_core', 'cpu_mhz', 'hw_caps', + 'virt_caps', 'total_memory', 'free_memory', 'node_to_cpu', diff -r 720552439f74 -r 250606290439 xen/arch/ia64/xen/dom0_ops.c --- a/xen/arch/ia64/xen/dom0_ops.c Tue Apr 01 17:24:22 2008 +0100 +++ b/xen/arch/ia64/xen/dom0_ops.c Tue Apr 01 18:04:19 2008 +0100 @@ -410,6 +410,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XE xen_sysctl_physinfo_t *pi = &op->u.physinfo; + memset(pi, 0, sizeof(*pi)); pi->threads_per_core = cpus_weight(cpu_sibling_map[0]); pi->cores_per_socket = cpus_weight(cpu_core_map[0]) / pi->threads_per_core; @@ -419,7 +420,6 @@ long arch_do_sysctl(xen_sysctl_t *op, XE pi->free_pages = avail_domheap_pages(); pi->scrub_pages = avail_scrub_pages(); pi->cpu_khz = local_cpu_data->proc_freq / 1000; - memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); max_array_ent = pi->max_cpu_id; pi->max_cpu_id = last_cpu(cpu_online_map); diff -r 720552439f74 -r 250606290439 xen/arch/powerpc/sysctl.c --- a/xen/arch/powerpc/sysctl.c Tue Apr 01 17:24:22 2008 +0100 +++ b/xen/arch/powerpc/sysctl.c Tue Apr 01 18:04:19 2008 +0100 @@ -41,6 +41,7 @@ long arch_do_sysctl(struct xen_sysctl *s { xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo; + memset(pi, 0, sizeof(*pi)); pi->threads_per_core = cpus_weight(cpu_sibling_map[0]); pi->cores_per_socket = @@ -50,10 +51,7 @@ long arch_do_sysctl(struct xen_sysctl *s pi->total_pages = total_pages; pi->free_pages = avail_domheap_pages(); pi->cpu_khz = cpu_khz; - memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); - ret = 0; - if ( copy_to_guest(u_sysctl, sysctl, 1) ) - ret = -EFAULT; + ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0; } break; diff -r 720552439f74 -r 250606290439 xen/arch/x86/sysctl.c --- a/xen/arch/x86/sysctl.c Tue Apr 01 17:24:22 2008 +0100 +++ b/xen/arch/x86/sysctl.c Tue Apr 01 18:04:19 2008 +0100 @@ -47,18 +47,22 @@ long arch_do_sysctl( if ( ret ) break; + memset(pi, 0, sizeof(*pi)); pi->threads_per_core = cpus_weight(cpu_sibling_map[0]); pi->cores_per_socket = cpus_weight(cpu_core_map[0]) / pi->threads_per_core; pi->nr_cpus = (u32)num_online_cpus(); pi->nr_nodes = num_online_nodes(); - pi->total_pages = total_pages; - pi->free_pages = avail_domheap_pages(); - pi->scrub_pages = avail_scrub_pages(); - pi->cpu_khz = cpu_khz; - memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); + pi->total_pages = total_pages; + pi->free_pages = avail_domheap_pages(); + pi->scrub_pages = avail_scrub_pages(); + pi->cpu_khz = cpu_khz; memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4); + if ( hvm_enabled ) + pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm; + if ( iommu_enabled ) + pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio; max_array_ent = pi->max_cpu_id; pi->max_cpu_id = last_cpu(cpu_online_map); diff -r 720552439f74 -r 250606290439 xen/include/public/sysctl.h --- a/xen/include/public/sysctl.h Tue Apr 01 17:24:22 2008 +0100 +++ b/xen/include/public/sysctl.h Tue Apr 01 18:04:19 2008 +0100 @@ -84,8 +84,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_ * Get physical information about the host machine */ #define XEN_SYSCTL_physinfo 3 + /* (x86) The platform supports HVM guests. */ +#define _XEN_SYSCTL_PHYSCAP_hvm 0 +#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm) + /* (x86) The platform supports HVM-guest direct access to I/O devices. */ +#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1 +#define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio) struct xen_sysctl_physinfo { - /* IN variables. */ uint32_t threads_per_core; uint32_t cores_per_socket; uint32_t nr_cpus; @@ -96,7 +101,6 @@ struct xen_sysctl_physinfo { uint64_aligned_t scrub_pages; uint32_t hw_cap[8]; - /* IN/OUT variables. */ /* * IN: maximum addressable entry in the caller-provided cpu_to_node array. * OUT: largest cpu identifier in the system. @@ -112,6 +116,9 @@ struct xen_sysctl_physinfo { * elements of the array will not be written by the sysctl. */ XEN_GUEST_HANDLE_64(uint32) cpu_to_node; + + /* XEN_SYSCTL_PHYSCAP_??? */ + uint32_t capabilities; }; typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |