[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] RE: compat tool problem with new tmem save/restore tmem_op struct



> Agree.  I'll continue debugging with the flattened
> structure and then switch back to the nested union
> if/when you are able to fix the tool.  Switching
> back will hopefully be syntactic only with search/replace.

Urk... now with the flattened (single-level union)
structure, the accessors generated for my
ctrl_save structure seem to be getting garbage.
Looking in hex at the pointers that are getting
to the hypervisor, it appears that padding and/or
sizing (and thus the offsets?) of the consecutive
tmem_cli_va_t struct components are wrong
(with 32-bit dom0 and 64-bit hyp).

So I am basically dead in the water with the
tmem save/restore.  Relevant patch chunks
below.  This stuff sure isn't very easy or
intuitive!  I know I could rewrite it to work,
avoiding compat/handle stuff entirely, but would
prefer to use the compat/handle mechanism if
possible.

If you see anything wrong, please let me know!

Thanks,
Dan
=========================

diff -r 5333e6497af6 xen/include/public/tmem.h
--- a/xen/include/public/tmem.h Mon Jul 20 15:45:50 2009 +0100
+++ b/xen/include/public/tmem.h Thu Jul 30 16:33:37 2009 -0600
@@ -70,19 +92,12 @@ typedef XEN_GUEST_HANDLE(char) tmem_cli_
 typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t;
 struct tmem_op {
     uint32_t cmd;
-    int32_t pool_id; /* private > 0; shared < 0; 0 is invalid */
+    int32_t pool_id;
     union {
         struct {  /* for cmd == TMEM_NEW_POOL */
             uint64_t uuid[2];
             uint32_t flags;
         } new;
-        struct {  /* for cmd == TMEM_CONTROL */
-            uint32_t subop;
-            uint32_t cli_id;
-            uint32_t arg1;
-            uint32_t arg2;
-            tmem_cli_va_t buf;
-        } ctrl;
         struct {
             uint64_t object;
             uint32_t index;
@@ -91,6 +106,36 @@ struct tmem_op {
             uint32_t len;
             tmem_cli_mfn_t cmfn; /* client machine page frame */
         } gen;
+        struct {  /* for cmd == TMEM_CONTROL */
+            uint32_t subop; /* must be first */
+            uint32_t cli_id;
+            uint32_t arg1;
+            uint32_t arg2;
+            tmem_cli_va_t buf;
+        } ctrl_gen;
+        struct {
+            uint32_t subop; /* must be first */
+            uint32_t cli_id;
+            uint64_t uuid[2];
+            uint32_t flags;
+        } ctrl_auth; /* also used for restore new */
+        struct {
+            uint32_t subop; /* must be first */
+            uint32_t cli_id;
+            tmem_cli_va_t p_pool_id;
+            tmem_cli_va_t p_oid;
+            tmem_cli_va_t p_index;
+            tmem_cli_va_t buf;
+            uint32_t bufsize;
+        } ctrl_save;
+        struct {
+            uint32_t subop;
+            uint32_t cli_id;
+            uint64_t oid;
+            uint32_t index;
+            tmem_cli_va_t buf;
+            uint32_t bufsize;
+        } ctrl_restore;
     } u;
 };
 typedef struct tmem_op tmem_op_t;
diff -r 5333e6497af6 xen/include/xen/tmem_xen.h
--- a/xen/include/xen/tmem_xen.h        Mon Jul 20 15:45:50 2009 +0100
+++ b/xen/include/xen/tmem_xen.h        Thu Jul 30 16:33:37 2009 -0600
@@ -302,13 +309,44 @@ static inline int tmh_get_tmemop_from_cl
         switch ( cop.cmd )
         {
         case TMEM_NEW_POOL: u = XLAT_tmem_op_u_new;  break;
-        case TMEM_CONTROL:  u = XLAT_tmem_op_u_ctrl; break;
+        case TMEM_CONTROL:
+            switch ( cop.u.ctrl_gen.subop )
+            {
+            case TMEMC_SHARED_POOL_DEAUTH:
+            case TMEMC_SHARED_POOL_AUTH:
+            case TMEMC_RESTORE_NEW_POOL:
+                u = XLAT_tmem_op_u_ctrl_auth;
+                break;
+            case TMEMC_SAVE_GET_NEXT_PAGE:
+            case TMEMC_SAVE_GET_NEXT_INV:
+                u = XLAT_tmem_op_u_ctrl_save;
+            case TMEMC_RESTORE_PUT_PAGE:
+            case TMEMC_RESTORE_FLUSH_PAGE:
+                u = XLAT_tmem_op_u_ctrl_restore;
+            default:
+                u = XLAT_tmem_op_u_ctrl_gen;
+            }                                        break;
         default:            u = XLAT_tmem_op_u_gen;  break;
         }
-#define XLAT_tmem_op_HNDL_u_ctrl_buf(_d_, _s_) \
-        guest_from_compat_handle((_d_)->u.ctrl.buf, (_s_)->u.ctrl.buf)
+#define XLAT_tmem_op_HNDL_u_ctrl_gen_buf(_d_, _s_) \
+        guest_from_compat_handle((_d_)->u.ctrl_gen.buf, (_s_)->u.ctrl_gen.buf)
+#define XLAT_tmem_op_HNDL_u_ctrl_save_p_pool_id(_d_, _s_) \
+        guest_from_compat_handle((_d_)->u.ctrl_save.p_pool_id, 
(_s_)->u.ctrl_save.p_pool_id)
+#define XLAT_tmem_op_HNDL_u_ctrl_save_p_oid(_d_, _s_) \
+        guest_from_compat_handle((_d_)->u.ctrl_save.p_oid, 
(_s_)->u.ctrl_save.p_oid)
+#define XLAT_tmem_op_HNDL_u_ctrl_save_p_index(_d_, _s_) \
+        guest_from_compat_handle((_d_)->u.ctrl_save.p_index, 
(_s_)->u.ctrl_save.p_index)
+#define XLAT_tmem_op_HNDL_u_ctrl_save_buf(_d_, _s_) \
+        guest_from_compat_handle((_d_)->u.ctrl_save.buf, 
(_s_)->u.ctrl_save.buf)
+#define XLAT_tmem_op_HNDL_u_ctrl_restore_buf(_d_, _s_) \
+        guest_from_compat_handle((_d_)->u.ctrl_restore.buf, 
(_s_)->u.ctrl_restore.buf)
         XLAT_tmem_op(op, &cop);
-#undef XLAT_tmem_op_HNDL_u_ctrl_buf
+#undef XLAT_tmem_op_HNDL_u_ctrl_gen_buf
+#undef XLAT_tmem_op_HNDL_u_ctrl_save_p_pool_id
+#undef XLAT_tmem_op_HNDL_u_ctrl_save_p_oid
+#undef XLAT_tmem_op_HNDL_u_ctrl_save_p_index
+#undef XLAT_tmem_op_HNDL_u_ctrl_save_buf
+#undef XLAT_tmem_op_HNDL_u_ctrl_restore_buf
         return 0;
     }
 #endif

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.