[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 28/44] x86/xlat: Use the percpu compat translation area



This allows {setup,free}_compat_arg_xlat() to be dropped.

Changing COMPAT_ARG_XLAT_VIRT_BASE to avoid referencing current has a fairly
large impact on code size, as it is hidden underneath the
copy_{to,from}_guest() logic.

The net bloat-o-meter report for this change is:

  add/remove: 0/2 grow/shrink: 4/35 up/down: 570/-1285 (-715)

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c               |  8 --------
 xen/arch/x86/pv/dom0_build.c         |  2 --
 xen/arch/x86/pv/domain.c             | 12 +-----------
 xen/arch/x86/x86_64/mm.c             | 13 -------------
 xen/include/asm-x86/config.h         |  7 -------
 xen/include/asm-x86/x86_64/uaccess.h |  6 ++----
 6 files changed, 3 insertions(+), 45 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 71fddfd..5836269 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1533,10 +1533,6 @@ int hvm_vcpu_initialise(struct vcpu *v)
 
     v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
 
-    rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
-    if ( rc != 0 )
-        goto fail4;
-
     if ( nestedhvm_enabled(d)
          && (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: 
nestedhvm_vcpu_destroy */
         goto fail5;
@@ -1562,8 +1558,6 @@ int hvm_vcpu_initialise(struct vcpu *v)
  fail6:
     nestedhvm_vcpu_destroy(v);
  fail5:
-    free_compat_arg_xlat(v);
- fail4:
     hvm_funcs.vcpu_destroy(v);
  fail3:
     vlapic_destroy(v);
@@ -1584,8 +1578,6 @@ void hvm_vcpu_destroy(struct vcpu *v)
 
     nestedhvm_vcpu_destroy(v);
 
-    free_compat_arg_xlat(v);
-
     tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
     hvm_funcs.vcpu_destroy(v);
 
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 3baf37b..3f5e3bc 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -385,8 +385,6 @@ int __init dom0_construct_pv(struct domain *d,
     {
         d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
         v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0];
-        if ( setup_compat_arg_xlat(v) != 0 )
-            BUG();
     }
 
     nr_pages = dom0_compute_nr_pages(d, &parms, initrd_len);
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 7e4566d..4e88bfd 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -72,8 +72,7 @@ int switch_compat(struct domain *d)
 
     for_each_vcpu( d, v )
     {
-        if ( (rc = setup_compat_arg_xlat(v)) ||
-             (rc = setup_compat_l4(v)) )
+        if ( (rc = setup_compat_l4(v)) )
             goto undo_and_fail;
     }
 
@@ -87,10 +86,7 @@ int switch_compat(struct domain *d)
  undo_and_fail:
     d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
     for_each_vcpu( d, v )
-    {
-        free_compat_arg_xlat(v);
         release_compat_l4(v);
-    }
 
     return rc;
 }
@@ -112,10 +108,7 @@ static void pv_destroy_gdt_ldt_l1tab(struct vcpu *v)
 void pv_vcpu_destroy(struct vcpu *v)
 {
     if ( is_pv_32bit_vcpu(v) )
-    {
-        free_compat_arg_xlat(v);
         release_compat_l4(v);
-    }
 
     pv_destroy_gdt_ldt_l1tab(v);
     xfree(v->arch.pv_vcpu.trap_ctxt);
@@ -152,9 +145,6 @@ int pv_vcpu_initialise(struct vcpu *v)
 
     if ( is_pv_32bit_domain(d) )
     {
-        if ( (rc = setup_compat_arg_xlat(v)) )
-            goto done;
-
         if ( (rc = setup_compat_l4(v)) )
             goto done;
     }
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 68eee30..aae721b 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -697,19 +697,6 @@ void __init zap_low_mappings(void)
                      __PAGE_HYPERVISOR);
 }
 
-int setup_compat_arg_xlat(struct vcpu *v)
-{
-    return create_perdomain_mapping(v->domain, ARG_XLAT_START(v),
-                                    PFN_UP(COMPAT_ARG_XLAT_SIZE),
-                                    NULL, NIL(struct page_info *));
-}
-
-void free_compat_arg_xlat(struct vcpu *v)
-{
-    destroy_perdomain_mapping(v->domain, ARG_XLAT_START(v),
-                              PFN_UP(COMPAT_ARG_XLAT_SIZE));
-}
-
 static void cleanup_frame_table(struct mem_hotadd_info *info)
 {
     unsigned long sva, eva;
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index 3d64047..c7503ad 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -316,13 +316,6 @@ extern unsigned long xen_phys_start;
 #define LDT_VIRT_START(v)    \
     (GDT_VIRT_START(v) + (64*1024))
 
-/* Argument translation area. The third per-domain-mapping sub-area. */
-#define ARG_XLAT_VIRT_START      PERDOMAIN_VIRT_SLOT(2)
-/* Allow for at least one guard page (COMPAT_ARG_XLAT_SIZE being 2 pages): */
-#define ARG_XLAT_VA_SHIFT        (2 + PAGE_SHIFT)
-#define ARG_XLAT_START(v)        \
-    (ARG_XLAT_VIRT_START + ((v)->vcpu_id << ARG_XLAT_VA_SHIFT))
-
 #define NATIVE_VM_ASSIST_VALID   ((1UL << VMASST_TYPE_4gb_segments)        | \
                                   (1UL << VMASST_TYPE_4gb_segments_notify) | \
                                   (1UL << VMASST_TYPE_writable_pagetables) | \
diff --git a/xen/include/asm-x86/x86_64/uaccess.h 
b/xen/include/asm-x86/x86_64/uaccess.h
index d7dad4f..ce88dce 100644
--- a/xen/include/asm-x86/x86_64/uaccess.h
+++ b/xen/include/asm-x86/x86_64/uaccess.h
@@ -1,11 +1,9 @@
 #ifndef __X86_64_UACCESS_H
 #define __X86_64_UACCESS_H
 
-#define COMPAT_ARG_XLAT_VIRT_BASE ((void *)ARG_XLAT_START(current))
+#define COMPAT_ARG_XLAT_VIRT_BASE ((void *)PERCPU_XLAT_START)
 #define COMPAT_ARG_XLAT_SIZE      (2*PAGE_SIZE)
-struct vcpu;
-int setup_compat_arg_xlat(struct vcpu *v);
-void free_compat_arg_xlat(struct vcpu *v);
+
 #define is_compat_arg_xlat_range(addr, size) ({                               \
     unsigned long __off;                                                      \
     __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.