[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] ia64: sysctl build fixes.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1273141168 -3600
# Node ID 24a00bbe7cc0c1dd1293ac78f98b30399db64343
# Parent  8aff748807cbb340d2fd961f66ec60fe512d72c0
ia64: sysctl build fixes.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/ia64/xen/dom0_ops.c |   65 ++++++++++++++++++++++++++++++-------------
 1 files changed, 46 insertions(+), 19 deletions(-)

diff -r 8aff748807cb -r 24a00bbe7cc0 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Thu May 06 11:16:37 2010 +0100
+++ b/xen/arch/ia64/xen/dom0_ops.c      Thu May 06 11:19:28 2010 +0100
@@ -735,20 +735,13 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
     {
     case XEN_SYSCTL_physinfo:
     {
-        int i;
-        uint32_t max_array_ent;
-        XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
-
         xen_sysctl_physinfo_t *pi = &op->u.physinfo;
 
-        max_array_ent = pi->max_cpu_id;
-        cpu_to_node_arr = pi->cpu_to_node;
-
         memset(pi, 0, sizeof(*pi));
-        pi->cpu_to_node = cpu_to_node_arr;
         pi->threads_per_core = cpus_weight(per_cpu(cpu_sibling_map, 0));
         pi->cores_per_socket =
             cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
+        pi->nr_nodes         = (u32)num_online_nodes();
         pi->nr_cpus          = (u32)num_online_cpus();
         pi->total_pages      = total_pages; 
         pi->free_pages       = avail_domheap_pages();
@@ -757,21 +750,55 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
 
         pi->max_node_id = last_node(node_online_map);
         pi->max_cpu_id = last_cpu(cpu_online_map);
-        max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
-
-        ret = 0;
-
-        if (!guest_handle_is_null(cpu_to_node_arr)) {
-            for (i = 0; i <= max_array_ent; i++) {
-                uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
-                if (copy_to_guest_offset(cpu_to_node_arr, i, &node, 1)) {
+
+        if ( copy_field_to_guest(u_sysctl, op, u.physinfo) )
+            ret = -EFAULT;
+    }
+    break;
+
+    case XEN_SYSCTL_topologyinfo:
+    {
+        xen_sysctl_topologyinfo_t *ti = &op->u.topologyinfo;
+        XEN_GUEST_HANDLE_64(uint32) arr;
+        uint32_t i, val, max_array_ent = ti->max_cpu_index;
+
+        ti->max_cpu_index = last_cpu(cpu_online_map);
+        max_array_ent = min(max_array_ent, ti->max_cpu_index);
+
+        arr = ti->cpu_to_core;
+        if ( !guest_handle_is_null(arr) )
+        {
+            for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+            {
+                val = cpu_online(i) ? cpu_to_core(i) : ~0u;
+                if ( copy_to_guest_offset(arr, i, &val, 1) )
                     ret = -EFAULT;
-                    break;
-                }
             }
         }
 
-        if ( copy_to_guest(u_sysctl, op, 1) )
+        arr = ti->cpu_to_socket;
+        if ( !guest_handle_is_null(arr) )
+        {
+            for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+            {
+                val = cpu_online(i) ? cpu_to_socket(i) : ~0u;
+                if ( copy_to_guest_offset(arr, i, &val, 1) )
+                    ret = -EFAULT;
+            }
+        }
+
+        arr = ti->cpu_to_node;
+        if ( !guest_handle_is_null(arr) )
+        {
+            for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+            {
+                val = cpu_online(i) ? cpu_to_node(i) : ~0u;
+                if ( copy_to_guest_offset(arr, i, &val, 1) )
+                    ret = -EFAULT;
+            }
+        }
+
+        if ( copy_field_to_guest(u_sysctl, op, u.topologyinfo.max_cpu_index) )
             ret = -EFAULT;
     }
     break;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.