[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: properly use map_domain_page() in nested HVM code


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Mon, 04 Feb 2013 15:44:13 +0000
  • Delivery-date: Mon, 04 Feb 2013 15:44:21 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1358946774 -3600
# Node ID 0c845035d84c5231719dd2bc1ace2c0483f4a5b6
# Parent  29037a487b2abbca2034a1212d990ae91f372bc8
x86: properly use map_domain_page() in nested HVM code

This eliminates a couple of incorrect/inconsistent uses of
map_domain_page() from VT-x code.

Note that this does _not_ add error handling where none was present
before, even though I think NULL returns from any of the mapping
operations touched here need to properly be handled. I just don't know
this code well enough to figure out what the right action in each case
would be.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 29037a487b2a -r 0c845035d84c xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Jan 23 14:11:39 2013 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Jan 23 14:12:54 2013 +0100
@@ -1966,7 +1966,8 @@ int hvm_virtual_to_linear_addr(
 
 /* On non-NULL return, we leave this function holding an additional 
  * ref on the underlying mfn, if any */
-static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
+static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable,
+                                   bool_t permanent)
 {
     void *map;
     p2m_type_t p2mt;
@@ -1991,28 +1992,41 @@ static void *__hvm_map_guest_frame(unsig
     if ( writable )
         paging_mark_dirty(d, page_to_mfn(page));
 
-    map = __map_domain_page(page);
+    if ( !permanent )
+        return __map_domain_page(page);
+
+    map = __map_domain_page_global(page);
+    if ( !map )
+        put_page(page);
+
     return map;
 }
 
-void *hvm_map_guest_frame_rw(unsigned long gfn)
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent)
 {
-    return __hvm_map_guest_frame(gfn, 1);
+    return __hvm_map_guest_frame(gfn, 1, permanent);
 }
 
-void *hvm_map_guest_frame_ro(unsigned long gfn)
+void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
 {
-    return __hvm_map_guest_frame(gfn, 0);
+    return __hvm_map_guest_frame(gfn, 0, permanent);
 }
 
-void hvm_unmap_guest_frame(void *p)
+void hvm_unmap_guest_frame(void *p, bool_t permanent)
 {
-    if ( p )
-    {
-        unsigned long mfn = domain_page_map_to_mfn(p);
+    unsigned long mfn;
+
+    if ( !p )
+        return;
+
+    mfn = domain_page_map_to_mfn(p);
+
+    if ( !permanent )
         unmap_domain_page(p);
-        put_page(mfn_to_page(mfn));
-    }
+    else
+        unmap_domain_page_global(p);
+
+    put_page(mfn_to_page(mfn));
 }
 
 static void *hvm_map_entry(unsigned long va)
@@ -2038,7 +2052,7 @@ static void *hvm_map_entry(unsigned long
     if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) )
         goto fail;
 
-    v = hvm_map_guest_frame_rw(gfn);
+    v = hvm_map_guest_frame_rw(gfn, 0);
     if ( v == NULL )
         goto fail;
 
@@ -2051,7 +2065,7 @@ static void *hvm_map_entry(unsigned long
 
 static void hvm_unmap_entry(void *p)
 {
-    hvm_unmap_guest_frame(p);
+    hvm_unmap_guest_frame(p, 0);
 }
 
 static int hvm_load_segment_selector(
diff -r 29037a487b2a -r 0c845035d84c xen/arch/x86/hvm/nestedhvm.c
--- a/xen/arch/x86/hvm/nestedhvm.c      Wed Jan 23 14:11:39 2013 +0100
+++ b/xen/arch/x86/hvm/nestedhvm.c      Wed Jan 23 14:12:54 2013 +0100
@@ -53,8 +53,7 @@ nestedhvm_vcpu_reset(struct vcpu *v)
     nv->nv_ioport80 = 0;
     nv->nv_ioportED = 0;
 
-    if (nv->nv_vvmcx)
-        hvm_unmap_guest_frame(nv->nv_vvmcx);
+    hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
     nv->nv_vvmcx = NULL;
     nv->nv_vvmcxaddr = VMCX_EADDR;
     nv->nv_flushp2m = 0;
diff -r 29037a487b2a -r 0c845035d84c xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c  Wed Jan 23 14:11:39 2013 +0100
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c  Wed Jan 23 14:12:54 2013 +0100
@@ -69,15 +69,14 @@ int nestedsvm_vmcb_map(struct vcpu *v, u
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
 
     if (nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr) {
-        ASSERT(nv->nv_vvmcx != NULL);
         ASSERT(nv->nv_vvmcxaddr != VMCX_EADDR);
-        hvm_unmap_guest_frame(nv->nv_vvmcx);
+        hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
         nv->nv_vvmcx = NULL;
         nv->nv_vvmcxaddr = VMCX_EADDR;
     }
 
     if (nv->nv_vvmcx == NULL) {
-        nv->nv_vvmcx = hvm_map_guest_frame_rw(vmcbaddr >> PAGE_SHIFT);
+        nv->nv_vvmcx = hvm_map_guest_frame_rw(vmcbaddr >> PAGE_SHIFT, 1);
         if (nv->nv_vvmcx == NULL)
             return 0;
         nv->nv_vvmcxaddr = vmcbaddr;
@@ -141,6 +140,8 @@ void nsvm_vcpu_destroy(struct vcpu *v)
                            get_order_from_bytes(MSRPM_SIZE));
         svm->ns_merged_msrpm = NULL;
     }
+    hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
+    nv->nv_vvmcx = NULL;
     if (nv->nv_n2vmcx) {
         free_vmcb(nv->nv_n2vmcx);
         nv->nv_n2vmcx = NULL;
@@ -358,11 +359,11 @@ static int nsvm_vmrun_permissionmap(stru
     svm->ns_oiomap_pa = svm->ns_iomap_pa;
     svm->ns_iomap_pa = ns_vmcb->_iopm_base_pa;
 
-    ns_viomap = hvm_map_guest_frame_ro(svm->ns_iomap_pa >> PAGE_SHIFT);
+    ns_viomap = hvm_map_guest_frame_ro(svm->ns_iomap_pa >> PAGE_SHIFT, 0);
     ASSERT(ns_viomap != NULL);
     ioport_80 = test_bit(0x80, ns_viomap);
     ioport_ed = test_bit(0xed, ns_viomap);
-    hvm_unmap_guest_frame(ns_viomap);
+    hvm_unmap_guest_frame(ns_viomap, 0);
 
     svm->ns_iomap = nestedhvm_vcpu_iomap_get(ioport_80, ioport_ed);
 
@@ -888,7 +889,7 @@ nsvm_vmcb_guest_intercepts_ioio(paddr_t 
         break;
     }
 
-    io_bitmap = hvm_map_guest_frame_ro(gfn);
+    io_bitmap = hvm_map_guest_frame_ro(gfn, 0);
     if (io_bitmap == NULL) {
         gdprintk(XENLOG_ERR,
             "IOIO intercept: mapping of permission map failed\n");
@@ -896,7 +897,7 @@ nsvm_vmcb_guest_intercepts_ioio(paddr_t 
     }
 
     enabled = test_bit(port, io_bitmap);
-    hvm_unmap_guest_frame(io_bitmap);
+    hvm_unmap_guest_frame(io_bitmap, 0);
 
     if (!enabled)
         return NESTEDHVM_VMEXIT_HOST;
diff -r 29037a487b2a -r 0c845035d84c xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Wed Jan 23 14:11:39 2013 +0100
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Wed Jan 23 14:12:54 2013 +0100
@@ -569,18 +569,20 @@ void nvmx_update_exception_bitmap(struct
 static void nvmx_update_apic_access_address(struct vcpu *v)
 {
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
-    u64 apic_gpfn, apic_mfn;
     u32 ctrl;
-    void *apic_va;
 
     ctrl = __n2_secondary_exec_control(v);
     if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )
     {
+        p2m_type_t p2mt;
+        unsigned long apic_gpfn;
+        struct page_info *apic_pg;
+
         apic_gpfn = __get_vvmcs(nvcpu->nv_vvmcx, APIC_ACCESS_ADDR) >> 
PAGE_SHIFT;
-        apic_va = hvm_map_guest_frame_ro(apic_gpfn);
-        apic_mfn = virt_to_mfn(apic_va);
-        __vmwrite(APIC_ACCESS_ADDR, (apic_mfn << PAGE_SHIFT));
-        hvm_unmap_guest_frame(apic_va); 
+        apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC);
+        ASSERT(apic_pg && !p2m_is_paging(p2mt));
+        __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg));
+        put_page(apic_pg);
     }
     else
         __vmwrite(APIC_ACCESS_ADDR, 0);
@@ -589,18 +591,20 @@ static void nvmx_update_apic_access_addr
 static void nvmx_update_virtual_apic_address(struct vcpu *v)
 {
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
-    u64 vapic_gpfn, vapic_mfn;
     u32 ctrl;
-    void *vapic_va;
 
     ctrl = __n2_exec_control(v);
     if ( ctrl & CPU_BASED_TPR_SHADOW )
     {
+        p2m_type_t p2mt;
+        unsigned long vapic_gpfn;
+        struct page_info *vapic_pg;
+
         vapic_gpfn = __get_vvmcs(nvcpu->nv_vvmcx, VIRTUAL_APIC_PAGE_ADDR) >> 
PAGE_SHIFT;
-        vapic_va = hvm_map_guest_frame_ro(vapic_gpfn);
-        vapic_mfn = virt_to_mfn(vapic_va);
-        __vmwrite(VIRTUAL_APIC_PAGE_ADDR, (vapic_mfn << PAGE_SHIFT));
-        hvm_unmap_guest_frame(vapic_va); 
+        vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC);
+        ASSERT(vapic_pg && !p2m_is_paging(p2mt));
+        __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg));
+        put_page(vapic_pg);
     }
     else
         __vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
@@ -641,9 +645,9 @@ static void __map_msr_bitmap(struct vcpu
     unsigned long gpa;
 
     if ( nvmx->msrbitmap )
-        hvm_unmap_guest_frame (nvmx->msrbitmap); 
+        hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
-    nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT);
+    nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
 }
 
 static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
@@ -654,9 +658,9 @@ static void __map_io_bitmap(struct vcpu 
 
     index = vmcs_reg == IO_BITMAP_A ? 0 : 1;
     if (nvmx->iobitmap[index])
-        hvm_unmap_guest_frame (nvmx->iobitmap[index]); 
+        hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
-    nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT);
+    nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
 }
 
 static inline void map_io_bitmap_all(struct vcpu *v)
@@ -673,17 +677,17 @@ static void nvmx_purge_vvmcs(struct vcpu
 
     __clear_current_vvmcs(v);
     if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
-        hvm_unmap_guest_frame(nvcpu->nv_vvmcx);
+        hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
     nvcpu->nv_vvmcx = NULL;
     nvcpu->nv_vvmcxaddr = VMCX_EADDR;
     for (i=0; i<2; i++) {
         if ( nvmx->iobitmap[i] ) {
-            hvm_unmap_guest_frame(nvmx->iobitmap[i]); 
+            hvm_unmap_guest_frame(nvmx->iobitmap[i], 1);
             nvmx->iobitmap[i] = NULL;
         }
     }
     if ( nvmx->msrbitmap ) {
-        hvm_unmap_guest_frame(nvmx->msrbitmap);
+        hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
         nvmx->msrbitmap = NULL;
     }
 }
@@ -1289,7 +1293,7 @@ int nvmx_handle_vmptrld(struct cpu_user_
 
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
-        nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT);
+        nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
         nvcpu->nv_vvmcxaddr = gpa;
         map_io_bitmap_all (v);
         __map_msr_bitmap(v);
@@ -1350,10 +1354,10 @@ int nvmx_handle_vmclear(struct cpu_user_
     else 
     {
         /* Even if this VMCS isn't the current one, we must clear it. */
-        vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT);
+        vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
         if ( vvmcs ) 
             __set_vvmcs(vvmcs, NVMX_LAUNCH_STATE, 0);
-        hvm_unmap_guest_frame(vvmcs);
+        hvm_unmap_guest_frame(vvmcs, 0);
     }
 
     vmreturn(regs, VMSUCCEED);
diff -r 29037a487b2a -r 0c845035d84c xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Jan 23 14:11:39 2013 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Jan 23 14:12:54 2013 +0100
@@ -423,9 +423,9 @@ int hvm_virtual_to_linear_addr(
     unsigned int addr_size,
     unsigned long *linear_addr);
 
-void *hvm_map_guest_frame_rw(unsigned long gfn);
-void *hvm_map_guest_frame_ro(unsigned long gfn);
-void hvm_unmap_guest_frame(void *p);
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent);
+void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
+void hvm_unmap_guest_frame(void *p, bool_t permanent);
 
 static inline void hvm_set_info_guest(struct vcpu *v)
 {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.