[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] 32-on-64: Clean up and unify compat_arg_xlat_area handling.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1213280555 -3600
# Node ID ebbd0e8c3e72b5e44e0ccc8e8203e85ba54009d3
# Parent  1b29ad98cd878d6fdc394bfc06156293b12bffd0
32-on-64: Clean up and unify compat_arg_xlat_area handling.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domain.c                  |  100 ---------------------------------
 xen/arch/x86/domain_build.c            |    4 -
 xen/arch/x86/hvm/hvm.c                 |   39 +++++++-----
 xen/arch/x86/mm.c                      |    6 -
 xen/arch/x86/mm/shadow/multi.c         |    9 --
 xen/arch/x86/x86_64/compat/mm.c        |    8 +-
 xen/arch/x86/x86_64/cpu_idle.c         |    2 
 xen/arch/x86/x86_64/mm.c               |    2 
 xen/common/compat/domain.c             |    2 
 xen/common/compat/grant_table.c        |    2 
 xen/common/compat/memory.c             |    2 
 xen/include/asm-x86/config.h           |    8 --
 xen/include/asm-x86/domain.h           |    1 
 xen/include/asm-x86/hvm/guest_access.h |    2 
 xen/include/asm-x86/mm.h               |    2 
 xen/include/asm-x86/percpu.h           |    2 
 xen/include/asm-x86/uaccess.h          |    4 -
 xen/include/asm-x86/x86_32/uaccess.h   |    2 
 xen/include/asm-x86/x86_64/uaccess.h   |   19 ++++--
 19 files changed, 50 insertions(+), 166 deletions(-)

diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/arch/x86/domain.c     Thu Jun 12 15:22:35 2008 +0100
@@ -165,98 +165,10 @@ void free_vcpu_struct(struct vcpu *v)
 
 #ifdef CONFIG_COMPAT
 
-int setup_arg_xlat_area(struct vcpu *v, l4_pgentry_t *l4tab)
-{
-    struct domain *d = v->domain;
-    unsigned i;
-    struct page_info *pg;
-
-    if ( !d->arch.mm_arg_xlat_l3 )
-    {
-        pg = alloc_domheap_page(NULL, 0);
-        if ( !pg )
-            return -ENOMEM;
-        d->arch.mm_arg_xlat_l3 = page_to_virt(pg);
-        clear_page(d->arch.mm_arg_xlat_l3);
-    }
-
-    l4tab[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
-        l4e_from_paddr(__pa(d->arch.mm_arg_xlat_l3), __PAGE_HYPERVISOR);
-
-    for ( i = 0; i < COMPAT_ARG_XLAT_PAGES; ++i )
-    {
-        unsigned long va = COMPAT_ARG_XLAT_VIRT_START(v->vcpu_id) + i * 
PAGE_SIZE;
-        l2_pgentry_t *l2tab;
-        l1_pgentry_t *l1tab;
-
-        if ( !l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]) )
-        {
-            pg = alloc_domheap_page(NULL, 0);
-            if ( !pg )
-                return -ENOMEM;
-            clear_page(page_to_virt(pg));
-            d->arch.mm_arg_xlat_l3[l3_table_offset(va)] = l3e_from_page(pg, 
__PAGE_HYPERVISOR);
-        }
-        l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]);
-        if ( !l2e_get_intpte(l2tab[l2_table_offset(va)]) )
-        {
-            pg = alloc_domheap_page(NULL, 0);
-            if ( !pg )
-                return -ENOMEM;
-            clear_page(page_to_virt(pg));
-            l2tab[l2_table_offset(va)] = l2e_from_page(pg, __PAGE_HYPERVISOR);
-        }
-        l1tab = l2e_to_l1e(l2tab[l2_table_offset(va)]);
-        BUG_ON(l1e_get_intpte(l1tab[l1_table_offset(va)]));
-        pg = alloc_domheap_page(NULL, 0);
-        if ( !pg )
-            return -ENOMEM;
-        l1tab[l1_table_offset(va)] = l1e_from_page(pg, PAGE_HYPERVISOR);
-    }
-
-    return 0;
-}
-
-static void release_arg_xlat_area(struct domain *d)
-{
-    if ( d->arch.mm_arg_xlat_l3 )
-    {
-        unsigned l3;
-
-        for ( l3 = 0; l3 < L3_PAGETABLE_ENTRIES; ++l3 )
-        {
-            if ( l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3]) )
-            {
-                l2_pgentry_t *l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3]);
-                unsigned l2;
-
-                for ( l2 = 0; l2 < L2_PAGETABLE_ENTRIES; ++l2 )
-                {
-                    if ( l2e_get_intpte(l2tab[l2]) )
-                    {
-                        l1_pgentry_t *l1tab = l2e_to_l1e(l2tab[l2]);
-                        unsigned l1;
-
-                        for ( l1 = 0; l1 < L1_PAGETABLE_ENTRIES; ++l1 )
-                        {
-                            if ( l1e_get_intpte(l1tab[l1]) )
-                                free_domheap_page(l1e_get_page(l1tab[l1]));
-                        }
-                        free_domheap_page(l2e_get_page(l2tab[l2]));
-                    }
-                }
-                free_domheap_page(l3e_get_page(d->arch.mm_arg_xlat_l3[l3]));
-            }
-        }
-        free_domheap_page(virt_to_page(d->arch.mm_arg_xlat_l3));
-    }
-}
-
 static int setup_compat_l4(struct vcpu *v)
 {
     struct page_info *pg = alloc_domheap_page(NULL, 0);
     l4_pgentry_t *l4tab;
-    int rc;
 
     if ( pg == NULL )
         return -ENOMEM;
@@ -272,12 +184,6 @@ static int setup_compat_l4(struct vcpu *
         l4e_from_paddr(__pa(v->domain->arch.mm_perdomain_l3),
                        __PAGE_HYPERVISOR);
 
-    if ( (rc = setup_arg_xlat_area(v, l4tab)) < 0 )
-    {
-        free_domheap_page(pg);
-        return rc;
-    }
-
     v->arch.guest_table = pagetable_from_page(pg);
     v->arch.guest_table_user = v->arch.guest_table;
 
@@ -309,7 +215,6 @@ int switch_native(struct domain *d)
         return 0;
 
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-    release_arg_xlat_area(d);
 
     /* switch gdt */
     gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
@@ -359,7 +264,6 @@ int switch_compat(struct domain *d)
 
  undo_and_fail:
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-    release_arg_xlat_area(d);
     gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
     while ( vcpuid-- != 0 )
     {
@@ -372,7 +276,6 @@ int switch_compat(struct domain *d)
 }
 
 #else
-#define release_arg_xlat_area(d) ((void)0)
 #define setup_compat_l4(v) 0
 #define release_compat_l4(v) ((void)0)
 #endif
@@ -584,9 +487,6 @@ void arch_domain_destroy(struct domain *
     free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
     free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
 #endif
-
-    if ( is_pv_32on64_domain(d) )
-        release_arg_xlat_area(d);
 
     free_xenheap_page(d->shared_info);
 }
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/arch/x86/domain_build.c       Thu Jun 12 15:22:35 2008 +0100
@@ -592,11 +592,7 @@ int __init construct_dom0(
         l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     if ( is_pv_32on64_domain(d) )
-    {
         v->arch.guest_table_user = v->arch.guest_table;
-        if ( setup_arg_xlat_area(v, l4start) < 0 )
-            panic("Not enough RAM for domain 0 hypercall argument 
translation.\n");
-    }
 
     l4tab += l4_table_offset(v_start);
     mfn = alloc_spfn;
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jun 12 15:22:35 2008 +0100
@@ -1571,17 +1571,21 @@ enum hvm_copy_result hvm_fetch_from_gues
                       PFEC_page_present | pfec);
 }
 
-DEFINE_PER_CPU(int, guest_handles_in_xen_space);
-
-unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
+#ifdef __x86_64__
+DEFINE_PER_CPU(bool_t, hvm_64bit_hcall);
+#endif
+
+unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
 {
     int rc;
 
-    if ( this_cpu(guest_handles_in_xen_space) )
+#ifdef __x86_64__
+    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(to, len) )
     {
         memcpy(to, from, len);
         return 0;
     }
+#endif
 
     rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
                                         len, 0);
@@ -1592,11 +1596,13 @@ unsigned long copy_from_user_hvm(void *t
 {
     int rc;
 
-    if ( this_cpu(guest_handles_in_xen_space) )
+#ifdef __x86_64__
+    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(from, len) )
     {
         memcpy(to, from, len);
         return 0;
     }
+#endif
 
     rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
     return rc ? len : 0; /* fake a copy_from_user() return code */
@@ -1878,20 +1884,17 @@ static long hvm_memory_op_compat32(int c
             uint32_t idx;
             uint32_t gpfn;
         } u;
-        struct xen_add_to_physmap h;
+        struct xen_add_to_physmap *h = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
 
         if ( copy_from_guest(&u, arg, 1) )
             return -EFAULT;
 
-        h.domid = u.domid;
-        h.space = u.space;
-        h.idx = u.idx;
-        h.gpfn = u.gpfn;
-
-        this_cpu(guest_handles_in_xen_space) = 1;
-        rc = hvm_memory_op(cmd, guest_handle_from_ptr(&h, void));
-        this_cpu(guest_handles_in_xen_space) = 0;
-
+        h->domid = u.domid;
+        h->space = u.space;
+        h->idx = u.idx;
+        h->gpfn = u.gpfn;
+
+        rc = hvm_memory_op(cmd, guest_handle_from_ptr(h, void));
         break;
     }
 
@@ -1934,7 +1937,7 @@ int hvm_do_hypercall(struct cpu_user_reg
     switch ( mode )
     {
 #ifdef __x86_64__
-    case 8:
+    case 8:        
 #endif
     case 4:
     case 2:
@@ -1963,11 +1966,13 @@ int hvm_do_hypercall(struct cpu_user_reg
         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx)", eax,
                     regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8);
 
+        this_cpu(hvm_64bit_hcall) = 1;
         regs->rax = hvm_hypercall64_table[eax](regs->rdi,
                                                regs->rsi,
                                                regs->rdx,
                                                regs->r10,
-                                               regs->r8);
+                                               regs->r8); 
+        this_cpu(hvm_64bit_hcall) = 0;
     }
     else
 #endif
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/arch/x86/mm.c Thu Jun 12 15:22:35 2008 +0100
@@ -1253,10 +1253,6 @@ static int alloc_l4_table(struct page_in
     pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
                       __PAGE_HYPERVISOR);
-    if ( is_pv_32on64_domain(d) )
-        pl4e[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
-            l4e_from_page(virt_to_page(d->arch.mm_arg_xlat_l3),
-                          __PAGE_HYPERVISOR);
 
     return 1;
 
@@ -3008,7 +3004,7 @@ int do_update_va_mapping(unsigned long v
 
     perfc_incr(calls_to_update_va);
 
-    if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) )
+    if ( unlikely(!access_ok(va, 1) && !paging_mode_external(d)) )
         return -EINVAL;
 
     rc = xsm_update_va_mapping(current->domain, val);
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Jun 12 15:22:35 2008 +0100
@@ -1629,15 +1629,6 @@ void sh_install_xen_entries_in_l4(struct
         sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
             shadow_l4e_from_mfn(pagetable_get_mfn(d->arch.phys_table),
                                 __PAGE_HYPERVISOR);
-    }
-
-    if ( is_pv_32on64_domain(v->domain) )
-    {
-        /* install compat arg xlat entry */
-        sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
-            shadow_l4e_from_mfn(
-                    page_to_mfn(virt_to_page(d->arch.mm_arg_xlat_l3)),
-                    __PAGE_HYPERVISOR);
     }
 
     sh_unmap_domain_page(sl4e);    
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/arch/x86/x86_64/compat/mm.c
--- a/xen/arch/x86/x86_64/compat/mm.c   Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/arch/x86/x86_64/compat/mm.c   Thu Jun 12 15:22:35 2008 +0100
@@ -58,7 +58,7 @@ int compat_arch_memory_op(int op, XEN_GU
     case XENMEM_add_to_physmap:
     {
         struct compat_add_to_physmap cmp;
-        struct xen_add_to_physmap *nat = (void 
*)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+        struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
 
         if ( copy_from_guest(&cmp, arg, 1) )
             return -EFAULT;
@@ -72,7 +72,7 @@ int compat_arch_memory_op(int op, XEN_GU
     case XENMEM_set_memory_map:
     {
         struct compat_foreign_memory_map cmp;
-        struct xen_foreign_memory_map *nat = (void 
*)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+        struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
 
         if ( copy_from_guest(&cmp, arg, 1) )
             return -EFAULT;
@@ -91,7 +91,7 @@ int compat_arch_memory_op(int op, XEN_GU
     case XENMEM_machine_memory_map:
     {
         struct compat_memory_map cmp;
-        struct xen_memory_map *nat = (void 
*)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+        struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
 
         if ( copy_from_guest(&cmp, arg, 1) )
             return -EFAULT;
@@ -189,7 +189,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
     if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
         return -EFAULT;
 
-    set_xen_guest_handle(nat_ops, (void 
*)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+    set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
 
     for ( ; count; count -= i )
     {
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/arch/x86/x86_64/cpu_idle.c
--- a/xen/arch/x86/x86_64/cpu_idle.c    Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/arch/x86/x86_64/cpu_idle.c    Thu Jun 12 15:22:35 2008 +0100
@@ -35,7 +35,7 @@ DEFINE_XEN_GUEST_HANDLE(compat_processor
 DEFINE_XEN_GUEST_HANDLE(compat_processor_csd_t);
 DEFINE_XEN_GUEST_HANDLE(compat_processor_cx_t);
 
-#define xlat_page_start COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id)
+#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE)
 #define xlat_page_size  COMPAT_ARG_XLAT_SIZE
 #define xlat_page_left_size(xlat_page_current) \
     (xlat_page_start + xlat_page_size - xlat_page_current)
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Thu Jun 12 15:22:35 2008 +0100
@@ -35,6 +35,8 @@
 #ifdef CONFIG_COMPAT
 unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 #endif
+
+DEFINE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
 
 /* Top-level master (and idle-domain) page directory. */
 l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/common/compat/domain.c
--- a/xen/common/compat/domain.c        Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/common/compat/domain.c        Thu Jun 12 15:22:35 2008 +0100
@@ -87,7 +87,7 @@ int compat_vcpu_op(int cmd, int vcpuid, 
 
         if ( copy_from_guest(&cmp, arg, 1) )
             return -EFAULT;
-        nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
+        nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
         XLAT_vcpu_set_singleshot_timer(nat, &cmp);
         rc = do_vcpu_op(cmd, vcpuid, guest_handle_from_ptr(nat, void));
         break;
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/common/compat/grant_table.c
--- a/xen/common/compat/grant_table.c   Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/common/compat/grant_table.c   Thu Jun 12 15:22:35 2008 +0100
@@ -97,7 +97,7 @@ int compat_grant_table_op(unsigned int c
             struct compat_gnttab_copy copy;
         } cmp;
 
-        set_xen_guest_handle(nat.uop, (void 
*)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+        set_xen_guest_handle(nat.uop, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
         switch ( cmd )
         {
         case GNTTABOP_setup_table:
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/common/compat/memory.c
--- a/xen/common/compat/memory.c        Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/common/compat/memory.c        Thu Jun 12 15:22:35 2008 +0100
@@ -27,7 +27,7 @@ int compat_memory_op(unsigned int cmd, X
             struct compat_translate_gpfn_list xlat;
         } cmp;
 
-        set_xen_guest_handle(nat.hnd, (void 
*)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
+        set_xen_guest_handle(nat.hnd, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
         split = 0;
         switch ( op )
         {
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/include/asm-x86/config.h
--- a/xen/include/asm-x86/config.h      Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/include/asm-x86/config.h      Thu Jun 12 15:22:35 2008 +0100
@@ -249,14 +249,6 @@ extern unsigned int video_mode, video_fl
 
 #endif
 
-#define COMPAT_ARG_XLAT_VIRT_BASE      (1UL << ROOT_PAGETABLE_SHIFT)
-#define COMPAT_ARG_XLAT_SHIFT          0
-#define COMPAT_ARG_XLAT_PAGES          (1U << COMPAT_ARG_XLAT_SHIFT)
-#define COMPAT_ARG_XLAT_SIZE           (COMPAT_ARG_XLAT_PAGES << PAGE_SHIFT)
-#define COMPAT_ARG_XLAT_VIRT_START(vcpu_id) \
-    (COMPAT_ARG_XLAT_VIRT_BASE + ((unsigned long)(vcpu_id) << \
-                                  (PAGE_SHIFT + COMPAT_ARG_XLAT_SHIFT + 1)))
-
 #define PGT_base_page_table     PGT_l4_page_table
 
 #define __HYPERVISOR_CS64 0xe008
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/include/asm-x86/domain.h      Thu Jun 12 15:22:35 2008 +0100
@@ -208,7 +208,6 @@ struct arch_domain
 
 #ifdef CONFIG_COMPAT
     unsigned int hv_compat_vstart;
-    l3_pgentry_t *mm_arg_xlat_l3;
 #endif
 
     /* I/O-port admin-specified access capabilities. */
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/include/asm-x86/hvm/guest_access.h
--- a/xen/include/asm-x86/hvm/guest_access.h    Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/include/asm-x86/hvm/guest_access.h    Thu Jun 12 15:22:35 2008 +0100
@@ -2,7 +2,7 @@
 #define __ASM_X86_HVM_GUEST_ACCESS_H__
 
 #include <xen/percpu.h>
-DECLARE_PER_CPU(int, guest_handles_in_xen_space);
+DECLARE_PER_CPU(bool_t, hvm_64bit_hcall);
 
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len);
 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len);
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/include/asm-x86/mm.h  Thu Jun 12 15:22:35 2008 +0100
@@ -342,10 +342,8 @@ int map_ldt_shadow_page(unsigned int);
 int map_ldt_shadow_page(unsigned int);
 
 #ifdef CONFIG_COMPAT
-int setup_arg_xlat_area(struct vcpu *, l4_pgentry_t *);
 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
 #else
-# define setup_arg_xlat_area(vcpu, l4tab) 0
 # define domain_clamp_alloc_bitsize(d, b) (b)
 #endif
 
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/include/asm-x86/percpu.h
--- a/xen/include/asm-x86/percpu.h      Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/include/asm-x86/percpu.h      Thu Jun 12 15:22:35 2008 +0100
@@ -1,7 +1,7 @@
 #ifndef __X86_PERCPU_H__
 #define __X86_PERCPU_H__
 
-#define PERCPU_SHIFT 12
+#define PERCPU_SHIFT 13
 #define PERCPU_SIZE  (1UL << PERCPU_SHIFT)
 
 /* Separate out the type, so (int[3], foo) works. */
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/include/asm-x86/uaccess.h
--- a/xen/include/asm-x86/uaccess.h     Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/include/asm-x86/uaccess.h     Thu Jun 12 15:22:35 2008 +0100
@@ -118,7 +118,7 @@ extern void __put_user_bad(void);
 ({                                                                     \
        long __pu_err = -EFAULT;                                        \
        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
-       if (__addr_ok(__pu_addr))                                       \
+       if (access_ok(__pu_addr,size))                                  \
                __put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
        __pu_err;                                                       \
 })                                                     
@@ -135,7 +135,7 @@ extern void __put_user_bad(void);
        long __gu_err;                                          \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
        __get_user_size((x),__gu_addr,(size),__gu_err,-EFAULT); \
-       if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT;          \
+       if (!access_ok(__gu_addr,size)) __gu_err = -EFAULT;     \
        __gu_err;                                               \
 })                                                     
 
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/include/asm-x86/x86_32/uaccess.h
--- a/xen/include/asm-x86/x86_32/uaccess.h      Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/include/asm-x86/x86_32/uaccess.h      Thu Jun 12 15:22:35 2008 +0100
@@ -1,7 +1,5 @@
 #ifndef __i386_UACCESS_H
 #define __i386_UACCESS_H
-
-#define __addr_ok(addr) ((unsigned long)(addr) < HYPERVISOR_VIRT_START)
 
 /*
  * Test whether a block of memory is a valid user space address.
diff -r 1b29ad98cd87 -r ebbd0e8c3e72 xen/include/asm-x86/x86_64/uaccess.h
--- a/xen/include/asm-x86/x86_64/uaccess.h      Thu Jun 12 09:24:35 2008 +0100
+++ b/xen/include/asm-x86/x86_64/uaccess.h      Thu Jun 12 15:22:35 2008 +0100
@@ -1,5 +1,14 @@
 #ifndef __X86_64_UACCESS_H
 #define __X86_64_UACCESS_H
+
+#define COMPAT_ARG_XLAT_VIRT_BASE this_cpu(compat_arg_xlat)
+#define COMPAT_ARG_XLAT_SIZE      PAGE_SIZE
+DECLARE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
+#define is_compat_arg_xlat_range(addr, size) ({                               \
+    unsigned long __off;                                                      \
+    __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
+    (__off | (__off + (unsigned long)(size))) <= PAGE_SIZE;                   \
+})
 
 /*
  * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
@@ -11,11 +20,11 @@
     (((unsigned long)(addr) < (1UL<<48)) || \
      ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
 
-#define access_ok(addr, size) (__addr_ok(addr))
+#define access_ok(addr, size) \
+    (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
 
-#define array_access_ok(addr, count, size) (__addr_ok(addr))
-
-#ifdef CONFIG_COMPAT
+#define array_access_ok(addr, count, size) \
+    (access_ok(addr, (count)*(size)))
 
 #define __compat_addr_ok(addr) \
     ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(current->domain))
@@ -26,8 +35,6 @@
 #define compat_array_access_ok(addr,count,size) \
     (likely((count) < (~0U / (size))) && \
      compat_access_ok(addr, (count) * (size)))
-
-#endif
 
 #define __put_user_size(x,ptr,size,retval,errret)                      \
 do {                                                                   \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.