[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Fix XEN_SYSCTL_physinfo to handle NUMA info properly.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1210755145 -3600
# Node ID 547d10d2d38473e84fb47e8bbcde96b1dfe793cc
# Parent  fa8cb2a8ed52b164177583923c9c6a942d81b812
Fix XEN_SYSCTL_physinfo to handle NUMA info properly.
Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/ia64/xen/dom0_ops.c |   10 +++++++---
 xen/arch/x86/sysctl.c        |   19 ++++++++++++++-----
 2 files changed, 21 insertions(+), 8 deletions(-)

diff -r fa8cb2a8ed52 -r 547d10d2d384 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Wed May 14 09:31:44 2008 +0100
+++ b/xen/arch/ia64/xen/dom0_ops.c      Wed May 14 09:52:25 2008 +0100
@@ -407,10 +407,15 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
     {
         int i;
         uint32_t max_array_ent;
+        XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
 
         xen_sysctl_physinfo_t *pi = &op->u.physinfo;
 
+        max_array_ent = pi->max_cpu_id;
+        cpu_to_node_arr = pi->cpu_to_node;
+
         memset(pi, 0, sizeof(*pi));
+        pi->cpu_to_node = cpu_to_node_arr;
         pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
         pi->cores_per_socket =
             cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
@@ -421,16 +426,15 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
         pi->scrub_pages      = avail_scrub_pages();
         pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
 
-        max_array_ent = pi->max_cpu_id;
         pi->max_cpu_id = last_cpu(cpu_online_map);
         max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
 
         ret = 0;
 
-        if (!guest_handle_is_null(pi->cpu_to_node)) {
+        if (!guest_handle_is_null(cpu_to_node_arr)) {
             for (i = 0; i <= max_array_ent; i++) {
                 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
-                if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) {
+                if (copy_to_guest_offset(cpu_to_node_arr, i, &node, 1)) {
                     ret = -EFAULT;
                     break;
                 }
diff -r fa8cb2a8ed52 -r 547d10d2d384 xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c     Wed May 14 09:31:44 2008 +0100
+++ b/xen/arch/x86/sysctl.c     Wed May 14 09:52:25 2008 +0100
@@ -40,6 +40,7 @@ long arch_do_sysctl(
     case XEN_SYSCTL_physinfo:
     {
         uint32_t i, max_array_ent;
+        XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
 
         xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
 
@@ -47,7 +48,11 @@ long arch_do_sysctl(
         if ( ret )
             break;
 
+        max_array_ent = pi->max_cpu_id;
+        cpu_to_node_arr = pi->cpu_to_node;
+
         memset(pi, 0, sizeof(*pi));
+        pi->cpu_to_node = cpu_to_node_arr;
         pi->threads_per_core =
             cpus_weight(cpu_sibling_map[0]);
         pi->cores_per_socket =
@@ -64,22 +69,26 @@ long arch_do_sysctl(
         if ( iommu_enabled )
             pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
 
-        max_array_ent = pi->max_cpu_id;
         pi->max_cpu_id = last_cpu(cpu_online_map);
         max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
 
-        ret = -EFAULT;
-        if ( !guest_handle_is_null(pi->cpu_to_node) )
+        ret = 0;
+
+        if ( !guest_handle_is_null(cpu_to_node_arr) )
         {
             for ( i = 0; i <= max_array_ent; i++ )
             {
                 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
-                if ( copy_to_guest_offset(pi->cpu_to_node, i, &node, 1) )
+                if ( copy_to_guest_offset(cpu_to_node_arr, i, &node, 1) )
+                {
+                    ret = -EFAULT;
                     break;
+                }
             }
         }
 
-        ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0;
+        if ( copy_to_guest(u_sysctl, sysctl, 1) )
+            ret = -EFAULT;
     }
     break;
     

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.