[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Pack some hvmop memory structures better



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1297072691 0
# Node ID 77d05af7dc78c76d393b4bff8d93c0e42ee72ad4
# Parent  aeb6ffcbb91796b71be1a3cf7fbe25ad99437c05
Pack some hvmop memory structures better

Some of the hvmop memory structures have a shocking amount of
unnecesssary padding in them.  Elements which can have only 3 values
are given 64 bits of memory, and then aligned (so that there is
padding behind them).

This patch resizes and reorganizes in the following way, (hopefully)
without introducing any differences between the layout for 32- and
64-bit.

xen_hvm_set_mem_type:
 hvmmem_type -> 16 bits
 nr -> 32 bits (limiting us to setting 16TB at a time)

xen_hvm_set_mem_access:
 hvmmem_access -> 16 bits
 nr -> 32 bits

xen_hvm_get_mem_access:
 hvmmem_access -> 16 bits

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/include/public/hvm/hvm_op.h |   22 ++++++++++------------
 1 files changed, 10 insertions(+), 12 deletions(-)

diff -r aeb6ffcbb917 -r 77d05af7dc78 xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h   Mon Feb 07 09:52:11 2011 +0000
+++ b/xen/include/public/hvm/hvm_op.h   Mon Feb 07 09:58:11 2011 +0000
@@ -119,11 +119,11 @@ struct xen_hvm_set_mem_type {
     /* Domain to be updated. */
     domid_t domid;
     /* Memory type */
-    uint64_aligned_t hvmmem_type;
+    uint16_t hvmmem_type;
+    /* Number of pages. */
+    uint32_t nr;
     /* First pfn. */
     uint64_aligned_t first_pfn;
-    /* Number of pages. */
-    uint64_aligned_t nr;
 };
 typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
@@ -179,13 +179,12 @@ struct xen_hvm_set_mem_access {
 struct xen_hvm_set_mem_access {
     /* Domain to be updated. */
     domid_t domid;
-    uint16_t pad[3]; /* align next field on 8-byte boundary */
     /* Memory type */
-    uint64_t hvmmem_access; /* hvm_access_t */
+    uint16_t hvmmem_access; /* hvm_access_t */
+    /* Number of pages, ignored on setting default access */
+    uint32_t nr;
     /* First pfn, or ~0ull to set the default access for new pages */
-    uint64_t first_pfn;
-    /* Number of pages, ignored on setting default access */
-    uint64_t nr;
+    uint64_aligned_t first_pfn;
 };
 typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
@@ -195,11 +194,10 @@ struct xen_hvm_get_mem_access {
 struct xen_hvm_get_mem_access {
     /* Domain to be queried. */
     domid_t domid;
-    uint16_t pad[3]; /* align next field on 8-byte boundary */
     /* Memory type: OUT */
-    uint64_t hvmmem_access; /* hvm_access_t */
+    uint16_t hvmmem_access; /* hvm_access_t */
     /* pfn, or ~0ull for default access for new pages.  IN */
-    uint64_t pfn;
+    uint64_aligned_t pfn;
 };
 typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
@@ -220,7 +218,7 @@ struct xen_hvm_inject_trap {
     /* Error code, or -1 to skip */
     uint32_t error_code;
     /* CR2 for page faults */
-    uint64_t cr2;
+    uint64_aligned_t cr2;
 };
 typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.