[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/4] xen: update for interface changes in Xen 4.3 release.



New hypercalls:
- VKI_XENMEM_claim_pages
- VKI_XEN_DOMCTL_getnodeaffinity
- VKI_XEN_DOMCTL_setnodeaffinity

Plus placeholders for other new hypercalls which we don't yet support here.

New revision of sysctl and domctl interfaces, due to new field in
outstanding_pages field in physinfo and dominfo.

Xen changed the API but not ABI of cpumasks to be a more generic bitmask.
Switch to using the latest names.
---
 coregrind/m_syswrap/syswrap-xen.c |   92 +++++++++++++++++++++++++++++++------
 include/vki/vki-xen-domctl.h      |   42 ++++++++++++++++-
 include/vki/vki-xen-memory.h      |    1 +
 include/vki/vki-xen-sysctl.h      |   28 ++++++++++-
 include/vki/vki-xen.h             |    4 +-
 5 files changed, 144 insertions(+), 23 deletions(-)

diff --git a/coregrind/m_syswrap/syswrap-xen.c 
b/coregrind/m_syswrap/syswrap-xen.c
index be884a1..61aa1e1 100644
--- a/coregrind/m_syswrap/syswrap-xen.c
+++ b/coregrind/m_syswrap/syswrap-xen.c
@@ -104,7 +104,8 @@ PRE(memory_op)
    }
    case VKI_XENMEM_increase_reservation:
    case VKI_XENMEM_decrease_reservation:
-   case VKI_XENMEM_populate_physmap: {
+   case VKI_XENMEM_populate_physmap:
+   case VKI_XENMEM_claim_pages: {
       struct xen_memory_reservation *memory_reservation =
          (struct xen_memory_reservation *)ARG2;
       const HChar *which;
@@ -125,6 +126,9 @@ PRE(memory_op)
                       (Addr)memory_reservation->extent_start.p,
                       sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
          break;
+      case VKI_XENMEM_claim_pages:
+         which = "XENMEM_claim_pages";
+         break;
       default:
          which = "XENMEM_unknown";
          break;
@@ -354,6 +358,7 @@ PRE(sysctl) {
    {
    case 0x00000008:
    case 0x00000009:
+   case 0x0000000a:
           break;
    default:
       VG_(dmsg)("WARNING: sysctl version %"PRIx32" not supported\n",
@@ -470,6 +475,7 @@ PRE(domctl)
    {
    case 0x00000007:
    case 0x00000008:
+   case 0x00000009:
           break;
    default:
       VG_(dmsg)("WARNING: domctl version %"PRIx32" not supported\n",
@@ -567,7 +573,17 @@ PRE(domctl)
       __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
       PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
                    (Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
-                   domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+                   domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
+      break;
+
+   case VKI_XEN_DOMCTL_getnodeaffinity:
+      __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
+      break;
+   case VKI_XEN_DOMCTL_setnodeaffinity:
+      __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
+      PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
+                   (Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
+                   domctl->u.nodeaffinity.nodemap.nr_bits / 8);
       break;
 
    case VKI_XEN_DOMCTL_getvcpucontext:
@@ -640,6 +656,7 @@ POST(memory_op)
    switch (ARG1) {
    case VKI_XENMEM_set_memory_map:
    case VKI_XENMEM_decrease_reservation:
+   case VKI_XENMEM_claim_pages:
       /* No outputs */
       break;
    case VKI_XENMEM_increase_reservation:
@@ -743,6 +760,7 @@ POST(sysctl)
    {
    case 0x00000008:
    case 0x00000009:
+   case 0x0000000a:
           break;
    default:
       return;
@@ -787,18 +805,39 @@ POST(sysctl)
       break;
 
    case VKI_XEN_SYSCTL_physinfo:
-      POST_XEN_SYSCTL_WRITE(physinfo, threads_per_core);
-      POST_XEN_SYSCTL_WRITE(physinfo, cores_per_socket);
-      POST_XEN_SYSCTL_WRITE(physinfo, nr_cpus);
-      POST_XEN_SYSCTL_WRITE(physinfo, max_cpu_id);
-      POST_XEN_SYSCTL_WRITE(physinfo, nr_nodes);
-      POST_XEN_SYSCTL_WRITE(physinfo, max_node_id);
-      POST_XEN_SYSCTL_WRITE(physinfo, cpu_khz);
-      POST_XEN_SYSCTL_WRITE(physinfo, total_pages);
-      POST_XEN_SYSCTL_WRITE(physinfo, free_pages);
-      POST_XEN_SYSCTL_WRITE(physinfo, scrub_pages);
-      POST_XEN_SYSCTL_WRITE(physinfo, hw_cap[8]);
-      POST_XEN_SYSCTL_WRITE(physinfo, capabilities);
+      switch (sysctl->interface_version)
+      {
+      case 0x00000008:
+      case 0x00000009: /* Unchanged from version 8 */
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, threads_per_core);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, cores_per_socket);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_cpus);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_cpu_id);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_nodes);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_node_id);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, cpu_khz);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, total_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, free_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, scrub_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, hw_cap[8]);
+         POST_XEN_SYSCTL_WRITE(physinfo_00000008, capabilities);
+         break;
+      case 0x0000000a:
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_cpu_id);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_nodes);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_node_id);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cpu_khz);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, total_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, free_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, scrub_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, outstanding_pages);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
+         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
+         break;
+      }
       break;
 
    case VKI_XEN_SYSCTL_topologyinfo:
@@ -834,6 +873,7 @@ POST(domctl){
    switch (domctl->interface_version) {
    case 0x00000007:
    case 0x00000008:
+   case 0x00000009:
           break;
    default:
           return;
@@ -855,6 +895,7 @@ POST(domctl){
    case VKI_XEN_DOMCTL_hypercall_init:
    case VKI_XEN_DOMCTL_setvcpuaffinity:
    case VKI_XEN_DOMCTL_setvcpucontext:
+   case VKI_XEN_DOMCTL_setnodeaffinity:
    case VKI_XEN_DOMCTL_set_cpuid:
    case VKI_XEN_DOMCTL_unpausedomain:
       /* No output fields */
@@ -908,7 +949,12 @@ POST(domctl){
 
    case VKI_XEN_DOMCTL_getvcpuaffinity:
       POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
-                     domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+                     domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
+      break;
+
+   case VKI_XEN_DOMCTL_getnodeaffinity:
+      POST_MEM_WRITE((Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
+                     domctl->u.nodeaffinity.nodemap.nr_bits / 8);
       break;
 
    case VKI_XEN_DOMCTL_getdomaininfo:
@@ -942,6 +988,22 @@ POST(domctl){
         POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
         POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
       break;
+      case 0x00000009:
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, outstanding_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shr_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, paged_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shared_info_frame);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpu_time);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, nr_online_vcpus);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_vcpu_id);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, ssidref);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, handle);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpupool);
+      break;
       }
       break;
    case VKI_XEN_DOMCTL_getvcpucontext:
diff --git a/include/vki/vki-xen-domctl.h b/include/vki/vki-xen-domctl.h
index 241c008..815e0a7 100644
--- a/include/vki/vki-xen-domctl.h
+++ b/include/vki/vki-xen-domctl.h
@@ -7,6 +7,7 @@
  *
  * - 00000007: Xen 4.1
  * - 00000008: Xen 4.2
+ * - 00000009: Xen 4.3
  *
  * When adding a new subop be sure to include the variants used by all
  * of the above, both here and in syswrap-xen.c
@@ -57,7 +58,7 @@
 #define VKI_XEN_DOMCTL_pin_mem_cacheattr             41
 #define VKI_XEN_DOMCTL_set_ext_vcpucontext           42
 #define VKI_XEN_DOMCTL_get_ext_vcpucontext           43
-#define VKI_XEN_DOMCTL_set_opt_feature               44
+#define VKI_XEN_DOMCTL_set_opt_feature               44 /*Obsolete IA64 only */
 #define VKI_XEN_DOMCTL_test_assign_device            45
 #define VKI_XEN_DOMCTL_set_target                    46
 #define VKI_XEN_DOMCTL_deassign_device               47
@@ -80,6 +81,9 @@
 #define VKI_XEN_DOMCTL_set_access_required           64
 #define VKI_XEN_DOMCTL_audit_p2m                     65
 #define VKI_XEN_DOMCTL_set_virq_handler              66
+#define VKI_XEN_DOMCTL_set_broken_page_p2m           67
+#define VKI_XEN_DOMCTL_setnodeaffinity               68
+#define VKI_XEN_DOMCTL_getnodeaffinity               69
 #define VKI_XEN_DOMCTL_gdbsx_guestmemio            1000
 #define VKI_XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define VKI_XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -130,9 +134,39 @@ struct vki_xen_domctl_getdomaininfo_00000008 {
 typedef struct vki_xen_domctl_getdomaininfo_00000008 
vki_xen_domctl_getdomaininfo_00000008_t;
 DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000008_t);
 
+struct vki_xen_domctl_getdomaininfo_00000009 {
+    /* OUT variables. */
+    vki_xen_domid_t  domain;
+    vki_uint32_t flags;
+    vki_xen_uint64_aligned_t tot_pages;
+    vki_xen_uint64_aligned_t max_pages;
+    vki_xen_uint64_aligned_t outstanding_pages;
+    vki_xen_uint64_aligned_t shr_pages;
+    vki_xen_uint64_aligned_t paged_pages;
+    vki_xen_uint64_aligned_t shared_info_frame;
+    vki_xen_uint64_aligned_t cpu_time;
+    vki_uint32_t nr_online_vcpus;
+    vki_uint32_t max_vcpu_id;
+    vki_uint32_t ssidref;
+    vki_xen_domain_handle_t handle;
+    vki_uint32_t cpupool;
+};
+typedef struct vki_xen_domctl_getdomaininfo_00000009 
vki_xen_domctl_getdomaininfo_00000009_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000009_t);
+
+/* Get/set the NUMA node(s) with which the guest has affinity with. */
+/* XEN_DOMCTL_setnodeaffinity */
+/* XEN_DOMCTL_getnodeaffinity */
+struct vki_xen_domctl_nodeaffinity {
+    struct vki_xenctl_bitmap nodemap;/* IN */
+};
+typedef struct vki_xen_domctl_nodeaffinity vki_xen_domctl_nodeaffinity_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_nodeaffinity_t);
+
+
 struct vki_xen_domctl_vcpuaffinity {
     vki_uint32_t  vcpu;              /* IN */
-    struct vki_xenctl_cpumap cpumap; /* IN/OUT */
+    struct vki_xenctl_bitmap cpumap; /* IN/OUT */
 };
 
 struct vki_xen_domctl_max_mem {
@@ -233,10 +267,12 @@ struct vki_xen_domctl {
         struct vki_xen_domctl_createdomain      createdomain;
         struct vki_xen_domctl_getdomaininfo_00000007 getdomaininfo_00000007;
         struct vki_xen_domctl_getdomaininfo_00000008 getdomaininfo_00000008;
+        struct vki_xen_domctl_getdomaininfo_00000009 getdomaininfo_00000009;
         //struct vki_xen_domctl_getmemlist        getmemlist;
         //struct vki_xen_domctl_getpageframeinfo  getpageframeinfo;
         //struct vki_xen_domctl_getpageframeinfo2 getpageframeinfo2;
         //struct vki_xen_domctl_getpageframeinfo3 getpageframeinfo3;
+        struct vki_xen_domctl_nodeaffinity      nodeaffinity;
         struct vki_xen_domctl_vcpuaffinity      vcpuaffinity;
         //struct vki_xen_domctl_shadow_op         shadow_op;
         struct vki_xen_domctl_max_mem           max_mem;
@@ -266,7 +302,6 @@ struct vki_xen_domctl {
         //struct vki_xen_domctl_ioport_mapping    ioport_mapping;
         //struct vki_xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
         //struct vki_xen_domctl_ext_vcpucontext   ext_vcpucontext;
-        //struct vki_xen_domctl_set_opt_feature   set_opt_feature;
         //struct vki_xen_domctl_set_target        set_target;
         //struct vki_xen_domctl_subscribe         subscribe;
         //struct vki_xen_domctl_debug_op          debug_op;
@@ -280,6 +315,7 @@ struct vki_xen_domctl {
         //struct vki_xen_domctl_audit_p2m         audit_p2m;
         //struct vki_xen_domctl_set_virq_handler  set_virq_handler;
         //struct vki_xen_domctl_gdbsx_memio       gdbsx_guest_memio;
+        //struct vki_xen_domctl_set_broken_page_p2m set_broken_page_p2m;
         //struct vki_xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
         //struct vki_xen_domctl_gdbsx_domstatus   gdbsx_domstatus;
         vki_uint8_t                         pad[128];
diff --git a/include/vki/vki-xen-memory.h b/include/vki/vki-xen-memory.h
index 7de8d33..eac7871 100644
--- a/include/vki/vki-xen-memory.h
+++ b/include/vki/vki-xen-memory.h
@@ -20,6 +20,7 @@
 #define VKI_XENMEM_get_pod_target       17
 #define VKI_XENMEM_get_sharing_freed_pages    18
 #define VKI_XENMEM_get_sharing_shared_pages   19
+#define VKI_XENMEM_claim_pages                24
 
 struct vki_xen_memory_map {
     unsigned int nr_entries;
diff --git a/include/vki/vki-xen-sysctl.h b/include/vki/vki-xen-sysctl.h
index c5178d7..32c8722 100644
--- a/include/vki/vki-xen-sysctl.h
+++ b/include/vki/vki-xen-sysctl.h
@@ -7,6 +7,7 @@
  *
  * - 00000008: Xen 4.1
  * - 00000009: Xen 4.2
+ * - 0000000a: Xen 4.3
  *
  * When adding a new subop be sure to include the variants used by all
  * of the above, both here and in syswrap-xen.c
@@ -35,6 +36,7 @@
 #define VKI_XEN_SYSCTL_numainfo                      17
 #define VKI_XEN_SYSCTL_cpupool_op                    18
 #define VKI_XEN_SYSCTL_scheduler_op                  19
+#define VKI_XEN_SYSCTL_coverage_op                   20
 
 struct vki_xen_sysctl_getdomaininfolist_00000008 {
     /* IN variables. */
@@ -69,7 +71,7 @@ struct vki_xen_sysctl_cpupool_op {
     vki_uint32_t domid;       /* IN: M              */
     vki_uint32_t cpu;         /* IN: AR             */
     vki_uint32_t n_dom;       /*            OUT: I  */
-    struct vki_xenctl_cpumap cpumap; /*     OUT: IF */
+    struct vki_xenctl_bitmap cpumap; /*     OUT: IF */
 };
 
 struct vki_xen_sysctl_topologyinfo {
@@ -85,7 +87,7 @@ struct vki_xen_sysctl_numainfo {
     VKI_XEN_GUEST_HANDLE_64(vki_uint64) node_to_memfree;
     VKI_XEN_GUEST_HANDLE_64(vki_uint32) node_to_node_distance;
 };
-struct vki_xen_sysctl_physinfo {
+struct vki_xen_sysctl_physinfo_00000008 {
     vki_uint32_t threads_per_core;
     vki_uint32_t cores_per_socket;
     vki_uint32_t nr_cpus;     /* # CPUs currently online */
@@ -101,13 +103,31 @@ struct vki_xen_sysctl_physinfo {
     vki_uint32_t capabilities;
 };
 
+struct vki_xen_sysctl_physinfo_0000000a {
+    vki_uint32_t threads_per_core;
+    vki_uint32_t cores_per_socket;
+    vki_uint32_t nr_cpus;     /* # CPUs currently online */
+    vki_uint32_t max_cpu_id;  /* Largest possible CPU ID on this host */
+    vki_uint32_t nr_nodes;    /* # nodes currently online */
+    vki_uint32_t max_node_id; /* Largest possible node ID on this host */
+    vki_uint32_t cpu_khz;
+    vki_xen_uint64_aligned_t total_pages;
+    vki_xen_uint64_aligned_t free_pages;
+    vki_xen_uint64_aligned_t scrub_pages;
+    vki_xen_uint64_aligned_t outstanding_pages;
+    vki_uint32_t hw_cap[8];
+
+    vki_uint32_t capabilities;
+};
+
 struct vki_xen_sysctl {
     vki_uint32_t cmd;
     vki_uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
     union {
         //struct vki_xen_sysctl_readconsole       readconsole;
         //struct vki_xen_sysctl_tbuf_op           tbuf_op;
-        struct vki_xen_sysctl_physinfo          physinfo;
+        struct vki_xen_sysctl_physinfo_00000008 physinfo_00000008;
+        struct vki_xen_sysctl_physinfo_0000000a physinfo_0000000a;
         struct vki_xen_sysctl_topologyinfo      topologyinfo;
         struct vki_xen_sysctl_numainfo          numainfo;
         //struct vki_xen_sysctl_sched_id          sched_id;
@@ -124,6 +144,8 @@ struct vki_xen_sysctl {
         //struct vki_xen_sysctl_lockprof_op       lockprof_op;
         struct vki_xen_sysctl_cpupool_op        cpupool_op;
         //struct vki_xen_sysctl_scheduler_op      scheduler_op;
+        //struct vki_xen_sysctl_coverage_op       coverage_op;
+
         vki_uint8_t                             pad[128];
     } u;
 };
diff --git a/include/vki/vki-xen.h b/include/vki/vki-xen.h
index ed3cc1b..87fbb4f 100644
--- a/include/vki/vki-xen.h
+++ b/include/vki/vki-xen.h
@@ -71,9 +71,9 @@ __DEFINE_VKI_XEN_GUEST_HANDLE(vki_uint16, vki_uint16_t);
 __DEFINE_VKI_XEN_GUEST_HANDLE(vki_uint32, vki_uint32_t);
 __DEFINE_VKI_XEN_GUEST_HANDLE(vki_uint64, vki_uint64_t);
 
-struct vki_xenctl_cpumap {
+struct vki_xenctl_bitmap {
     VKI_XEN_GUEST_HANDLE_64(vki_uint8) bitmap;
-    vki_uint32_t nr_cpus;
+    vki_uint32_t nr_bits;
 };
 
 #include <vki/vki-xen-domctl.h>
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.