[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/3] x86 / vmx: use a 'normal' domheap page for APIC_DEFAULT_PHYS_BASE



vmx_alloc_vlapic_mapping() currently contains some very odd looking code
that allocates a MEMF_no_owner domheap page and then shares with the guest
as if it were a xenheap page. This then requires vmx_free_vlapic_mapping()
to call a special function in the mm code: free_shared_domheap_page().

By using a 'normal' domheap page (i.e. by not passing MEMF_no_owner to
alloc_domheap_page()), the odd looking code in vmx_alloc_vlapic_mapping()
can simply use get_page_and_type() to set up a writable mapping before
insertion in the P2M and vmx_free_vlapic_mapping() can simply release the
page using put_page_alloc_ref() followed by put_page_and_type(). This
then allows free_shared_domheap_page() to be purged.

There is, however, some fall-out from this simplification:

- alloc_domheap_page() will now call assign_pages() and run into the fact
  that 'max_pages' is not set until some time after domain_create(). To
  avoid an allocation failure, domain_create() is modified to set
  max_pages to an initial value, sufficient to cover any domheap
  allocations required to complete domain creation. The value will be
  set to the 'real' max_pages when the tool-stack later performs the
  XEN_DOMCTL_max_mem operation, thus allowing the rest of the domain's
  memory to be allocated.

- Because the domheap page is no longer a pseudo-xenheap page, the
  reference counting will prevent the domain from being destroyed. Thus
  the call to vmx_free_vlapic_mapping() is moved from the
  domain_destroy() method into the domain_relinquish_resources() method.
  Whilst in the area, make the domain_destroy() method an optional
  alternative_vcall() (since it will no longer peform any function in VMX
  and is stubbed in SVM anyway).

Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wl@xxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Julien Grall <julien@xxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>

v2:
 - Set an initial value for max_pages rather than avoiding the check in
   assign_pages()
 - Make domain_destroy() optional
---
 xen/arch/x86/hvm/hvm.c     |  4 +++-
 xen/arch/x86/hvm/svm/svm.c |  5 -----
 xen/arch/x86/hvm/vmx/vmx.c | 25 ++++++++++++++++++++-----
 xen/arch/x86/mm.c          | 10 ----------
 xen/common/domain.c        |  8 ++++++++
 xen/include/asm-x86/mm.h   |  2 --
 6 files changed, 31 insertions(+), 23 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e51c077269..d2610f5f01 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -746,7 +746,9 @@ void hvm_domain_destroy(struct domain *d)
 
     hvm_destroy_cacheattr_region_list(d);
 
-    hvm_funcs.domain_destroy(d);
+    if ( hvm_funcs.domain_destroy )
+        alternative_vcall(hvm_funcs.domain_destroy, d);
+
     rtc_deinit(d);
     stdvga_deinit(d);
     vioapic_deinit(d);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b1c376d455..b7f67f9f03 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1155,10 +1155,6 @@ static int svm_domain_initialise(struct domain *d)
     return 0;
 }
 
-static void svm_domain_destroy(struct domain *d)
-{
-}
-
 static int svm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
@@ -2425,7 +2421,6 @@ static struct hvm_function_table __initdata 
svm_function_table = {
     .cpu_up               = svm_cpu_up,
     .cpu_down             = svm_cpu_down,
     .domain_initialise    = svm_domain_initialise,
-    .domain_destroy       = svm_domain_destroy,
     .vcpu_initialise      = svm_vcpu_initialise,
     .vcpu_destroy         = svm_vcpu_destroy,
     .save_cpu_ctxt        = svm_save_vmcb_ctxt,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 8706954d73..f76fdd4f96 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -420,7 +420,7 @@ static int vmx_domain_initialise(struct domain *d)
     return 0;
 }
 
-static void vmx_domain_destroy(struct domain *d)
+static void vmx_domain_relinquish_resources(struct domain *d)
 {
     if ( !has_vlapic(d) )
         return;
@@ -2241,7 +2241,7 @@ static struct hvm_function_table __initdata 
vmx_function_table = {
     .cpu_up_prepare       = vmx_cpu_up_prepare,
     .cpu_dead             = vmx_cpu_dead,
     .domain_initialise    = vmx_domain_initialise,
-    .domain_destroy       = vmx_domain_destroy,
+    .domain_relinquish_resources = vmx_domain_relinquish_resources,
     .vcpu_initialise      = vmx_vcpu_initialise,
     .vcpu_destroy         = vmx_vcpu_destroy,
     .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
@@ -3029,12 +3029,22 @@ static int vmx_alloc_vlapic_mapping(struct domain *d)
     if ( !cpu_has_vmx_virtualize_apic_accesses )
         return 0;
 
-    pg = alloc_domheap_page(d, MEMF_no_owner);
+    pg = alloc_domheap_page(d, 0);
     if ( !pg )
         return -ENOMEM;
+
+    if ( !get_page_and_type(pg, d, PGT_writable_page) )
+    {
+        /*
+         * The domain can't possibly know about this page yet, so failure
+         * here is a clear indication of something fishy going on.
+         */
+        domain_crash(d);
+        return -ENODATA;
+    }
+
     mfn = page_to_mfn(pg);
     clear_domain_page(mfn);
-    share_xen_page_with_guest(pg, d, SHARE_rw);
     d->arch.hvm.vmx.apic_access_mfn = mfn;
 
     return set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), mfn,
@@ -3048,7 +3058,12 @@ static void vmx_free_vlapic_mapping(struct domain *d)
 
     d->arch.hvm.vmx.apic_access_mfn = INVALID_MFN;
     if ( !mfn_eq(mfn, INVALID_MFN) )
-        free_shared_domheap_page(mfn_to_page(mfn));
+    {
+        struct page_info *pg = mfn_to_page(mfn);
+
+        put_page_alloc_ref(pg);
+        put_page_and_type(pg);
+    }
 }
 
 static void vmx_install_vlapic_mapping(struct vcpu *v)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 654190e9e9..2a6d2e8af9 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -496,16 +496,6 @@ void share_xen_page_with_guest(struct page_info *page, 
struct domain *d,
     spin_unlock(&d->page_alloc_lock);
 }
 
-void free_shared_domheap_page(struct page_info *page)
-{
-    put_page_alloc_ref(page);
-    if ( !test_and_clear_bit(_PGC_xen_heap, &page->count_info) )
-        ASSERT_UNREACHABLE();
-    page->u.inuse.type_info = 0;
-    page_set_owner(page, NULL);
-    free_domheap_page(page);
-}
-
 void make_cr3(struct vcpu *v, mfn_t mfn)
 {
     struct domain *d = v->domain;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index ee3f9ffd3e..30c777acb8 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -339,6 +339,8 @@ static int sanitise_domain_config(struct 
xen_domctl_createdomain *config)
     return arch_sanitise_domain_config(config);
 }
 
+#define DOMAIN_INIT_PAGES 1
+
 struct domain *domain_create(domid_t domid,
                              struct xen_domctl_createdomain *config,
                              bool is_priv)
@@ -441,6 +443,12 @@ struct domain *domain_create(domid_t domid,
         radix_tree_init(&d->pirq_tree);
     }
 
+    /*
+     * Allow a limited number of special pages to be allocated for the
+     * domain
+     */
+    d->max_pages = DOMAIN_INIT_PAGES;
+
     if ( (err = arch_domain_create(d, config)) != 0 )
         goto fail;
     init_status |= INIT_arch;
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 2ca8882ad0..e429f38228 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -317,8 +317,6 @@ struct page_info
 
 #define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
 
-extern void free_shared_domheap_page(struct page_info *page);
-
 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
 extern unsigned long max_page;
 extern unsigned long total_pages;
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.