[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH,RFC 10/17] 32-on-64 mmu ops



Index: 2006-10-04/xen/arch/x86/mm.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/mm.c   2006-10-04 15:18:51.000000000 +0200
+++ 2006-10-04/xen/arch/x86/mm.c        2006-10-04 15:18:57.000000000 +0200
@@ -106,6 +106,7 @@
 #include <asm/ldt.h>
 #include <asm/x86_emulate.h>
 #include <asm/e820.h>
+#include <asm/hypercall.h>
 #include <public/memory.h>
 
 #ifdef VERBOSE
@@ -125,13 +126,6 @@
 #define PTE_UPDATE_WITH_CMPXCHG
 #endif
 
-/*
- * Both do_mmuext_op() and do_mmu_update():
- * We steal the m.s.b. of the @count parameter to indicate whether this
- * invocation of do_mmu_update() is resuming a previously preempted call.
- */
-#define MMU_UPDATE_PREEMPTED          (~(~0U>>1))
-
 /* Used to defer flushing of memory structures. */
 struct percpu_mm_info {
 #define DOP_FLUSH_TLB      (1<<0) /* Flush the local TLB.                    */
@@ -2011,6 +2005,8 @@ int do_mmuext_op(
 
         case MMUEXT_PIN_L4_TABLE:
             type = PGT_l4_page_table;
+            if ( IS_COMPAT(FOREIGNDOM) )
+                break;
 
         pin_page:
             /* Ignore pinning of invalid paging levels. */
@@ -2073,7 +2069,11 @@ int do_mmuext_op(
         
 #ifdef __x86_64__
         case MMUEXT_NEW_USER_BASEPTR:
-            okay = 1;
+            if ( IS_COMPAT(FOREIGNDOM) )
+            {
+                okay = 0;
+                break;
+            }
             if (likely(mfn != 0))
             {
                 if ( shadow_mode_refcounts(d) )
Index: 2006-10-04/xen/arch/x86/x86_64/compat/entry.S
===================================================================
--- 2006-10-04.orig/xen/arch/x86/x86_64/compat/entry.S  2006-10-04 
15:18:51.000000000 +0200
+++ 2006-10-04/xen/arch/x86/x86_64/compat/entry.S       2006-10-04 
15:18:57.000000000 +0200
@@ -279,7 +279,6 @@ CFIX14:
 .section .rodata, "a", @progbits
 
 #define compat_set_trap_table domain_crash_synchronous
-#define compat_mmu_update domain_crash_synchronous
 #define compat_set_gdt domain_crash_synchronous
 #define compat_platform_op domain_crash_synchronous
 #define compat_multicall domain_crash_synchronous
@@ -288,7 +287,6 @@ CFIX14:
 #define compat_physdev_op_compat domain_crash_synchronous
 #define compat_grant_table_op domain_crash_synchronous
 #define compat_vcpu_op domain_crash_synchronous
-#define compat_mmuext_op domain_crash_synchronous
 #define compat_acm_op domain_crash_synchronous
 #define compat_arch_sched_op domain_crash_synchronous
 #define compat_xenoprof_op domain_crash_synchronous
@@ -299,7 +297,7 @@ CFIX14:
 
 ENTRY(compat_hypercall_table)
         .quad compat_set_trap_table     /*  0 */
-        .quad compat_mmu_update
+        .quad do_mmu_update
         .quad compat_set_gdt
         .quad do_stack_switch
         .quad compat_set_callbacks
Index: 2006-10-04/xen/arch/x86/x86_64/compat/mm.c
===================================================================
--- 2006-10-04.orig/xen/arch/x86/x86_64/compat/mm.c     2006-10-04 
15:18:51.000000000 +0200
+++ 2006-10-04/xen/arch/x86/x86_64/compat/mm.c  2006-10-04 15:18:57.000000000 
+0200
@@ -1,6 +1,8 @@
 #ifdef CONFIG_COMPAT
 
+#include <xen/event.h>
 #include <compat/memory.h>
+#include <compat/xen.h>
 
 int compat_update_descriptor(u32 pa_lo, u32 pa_hi, u32 desc_lo, u32 desc_hi)
 {
@@ -115,6 +117,151 @@ int compat_update_va_mapping_otherdomain
 {
     return do_update_va_mapping_otherdomain(va, lo | ((u64)hi << 32), flags, 
domid);
 }
+
+DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t);
+
+int compat_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_compat_t) cmp_uops,
+                     unsigned int count,
+                     XEN_GUEST_HANDLE(uint) pdone,
+                     unsigned int foreigndom)
+{
+    unsigned int i, preempt_mask;
+    int rc = 0;
+    XEN_GUEST_HANDLE(mmuext_op_t) nat_ops;
+
+    preempt_mask = count & MMU_UPDATE_PREEMPTED;
+    count ^= preempt_mask;
+
+    if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
+        return -EFAULT;
+
+    set_xen_guest_handle(nat_ops, (void 
*)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+
+    for ( ; count; count -= i )
+    {
+        mmuext_op_t *nat_op = nat_ops.p;
+        unsigned int limit;
+        int err;
+
+        if ( hypercall_preempt_check() )
+        {
+            rc = hypercall_create_continuation(
+                __HYPERVISOR_mmuext_op, "hihi",
+                cmp_uops, count | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
+            break;
+        }
+
+        limit = COMPAT_ARG_XLAT_SIZE / sizeof(*nat_op);
+
+        for ( i = 0; i < min(limit, count); ++i )
+        {
+            mmuext_op_compat_t cmp_op;
+            enum XLAT_mmuext_op_arg1 arg1;
+            enum XLAT_mmuext_op_arg2 arg2;
+
+            if ( unlikely(__copy_from_guest(&cmp_op, cmp_uops, 1) != 0) )
+            {
+                rc = -EFAULT;
+                break;
+            }
+
+            switch ( cmp_op.cmd )
+            {
+            case MMUEXT_PIN_L1_TABLE:
+            case MMUEXT_PIN_L2_TABLE:
+            case MMUEXT_PIN_L3_TABLE:
+            case MMUEXT_UNPIN_TABLE:
+            case MMUEXT_NEW_BASEPTR:
+                arg1 = XLAT_mmuext_op_arg1_mfn;
+                break;
+            default:
+                arg1 = XLAT_mmuext_op_arg1_linear_addr;
+                break;
+            case MMUEXT_PIN_L4_TABLE:
+            case MMUEXT_NEW_USER_BASEPTR:
+                rc = -EINVAL;
+            case MMUEXT_TLB_FLUSH_LOCAL:
+            case MMUEXT_TLB_FLUSH_MULTI:
+            case MMUEXT_TLB_FLUSH_ALL:
+            case MMUEXT_FLUSH_CACHE:
+                arg1 = -1;
+                break;
+            }
+
+            if ( rc )
+                break;
+
+            switch ( cmp_op.cmd )
+            {
+            case MMUEXT_SET_LDT:
+                arg2 = XLAT_mmuext_op_arg2_nr_ents;
+                break;
+            case MMUEXT_TLB_FLUSH_MULTI:
+            case MMUEXT_INVLPG_MULTI:
+                arg2 = XLAT_mmuext_op_arg2_vcpumask;
+                break;
+            default:
+                arg2 = -1;
+                break;
+            }
+
+#define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \
+            do \
+            { \
+                unsigned int vcpumask; \
+                if ( --limit == i ) \
+                { \
+                    --i; \
+                    continue; \
+                } \
+                (_d_)->arg2.vcpumask.p = (void *)(nat_ops.p + limit); \
+                if ( copy_from_compat(&vcpumask, (_s_)->arg2.vcpumask, 1) == 0 
) \
+                    *(_d_)->arg2.vcpumask.p = vcpumask; \
+                else \
+                    rc = -EFAULT; \
+            } while(0)
+            XLAT_mmuext_op(nat_op, &cmp_op);
+#undef XLAT_mmuext_op_HNDL_arg2_vcpumask
+
+            if ( rc )
+                break;
+
+            guest_handle_add_offset(cmp_uops, 1);
+            ++nat_op;
+        }
+
+        err = do_mmuext_op(nat_ops, i | preempt_mask, pdone, foreigndom);
+
+        if ( err )
+        {
+            unsigned int done = err & ~MMU_UPDATE_PREEMPTED;
+
+            if ( done < i )
+            {
+                /*
+                 * Note that the surrounding conditional is not strictly
+                 * necessary - we are going to leave the function anyway, so
+                 * in case of an error nobody will ever attempt to dereference
+                 * the request handles anymore; only an active continuation
+                 * will result in the compatibility one getting further used.
+                 */
+                guest_handle_add_offset(nat_ops, done);
+                guest_handle_add_offset(cmp_uops, (long)(done - i));
+                hypercall_xlat_continuation(0x01, nat_ops, cmp_uops);
+            }
+            rc = err;
+        }
+
+        if ( rc )
+            break;
+
+        /* Force do_mmuext_op() to not start counting from zero again. */
+        preempt_mask = MMU_UPDATE_PREEMPTED;
+    }
+
+    return rc;
+}
+
 #endif /* CONFIG_COMPAT */
 
 /*
Index: 2006-10-04/xen/common/compat/xlat.c
===================================================================
--- 2006-10-04.orig/xen/common/compat/xlat.c    2006-10-04 15:11:03.000000000 
+0200
+++ 2006-10-04/xen/common/compat/xlat.c 2006-10-04 15:18:57.000000000 +0200
@@ -21,6 +21,10 @@ void xlat_start_info(struct start_info *
 CHECK_dom0_vga_console_info
 #undef dom0_vga_console_info
 
+#define xen_mmu_update mmu_update
+CHECK_mmu_update
+#undef xen_mmu_update
+
 #define xen_vcpu_time_info vcpu_time_info
 CHECK_vcpu_time_info
 #undef xen_vcpu_time_info
Index: 2006-10-04/xen/include/asm-x86/hypercall.h
===================================================================
--- 2006-10-04.orig/xen/include/asm-x86/hypercall.h     2006-05-02 
16:41:35.000000000 +0200
+++ 2006-10-04/xen/include/asm-x86/hypercall.h  2006-10-04 15:18:57.000000000 
+0200
@@ -7,6 +7,13 @@
 
 #include <public/physdev.h>
 
+/*
+ * Both do_mmuext_op() and do_mmu_update():
+ * We steal the m.s.b. of the @count parameter to indicate whether this
+ * invocation of do_mmu_update() is resuming a previously preempted call.
+ */
+#define MMU_UPDATE_PREEMPTED          (~(~0U>>1))
+
 extern long
 do_event_channel_op_compat(
     XEN_GUEST_HANDLE(evtchn_op_t) uop);
Index: 2006-10-04/xen/include/xlat.lst
===================================================================
--- 2006-10-04.orig/xen/include/xlat.lst        2006-10-04 15:18:51.000000000 
+0200
+++ 2006-10-04/xen/include/xlat.lst     2006-10-04 15:18:57.000000000 +0200
@@ -2,6 +2,8 @@
 # ! - needs translation
 # ? - needs checking
 ?      dom0_vga_console_info           xen.h
+?      mmu_update                      xen.h
+!      mmuext_op                       xen.h
 !      start_info                      xen.h
 ?      vcpu_time_info                  xen.h
 !      add_to_physmap                  memory.h


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.