[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] domheap: Allocate privregs from domain heap for VTi domain



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1200596743 25200
# Node ID 6f7e6608cb746188b4ed1635924186c656ffbf27
# Parent  80626da7f6e3afce1b33cbffbae37fd0bf6cf3fa
[IA64] domheap: Allocate privregs from domain heap for VTi domain

- Pin privregs down with both dtr/itr so that privregs can be allocated
  from the domain heap
- Introduce vmx_vpd_pin()/vmx_vpd_unpin().
  The vpd area is pinned down when current.  But two functions,
  update_vhpi() and alloc_vpd() are exceptions.
  We have to pin down the area before PAL call.
- Minor twist context switch not to use unpinned vpd area.
  vmx_load_state() needs the vpd area pinned down.
  Call it after vmx_load_all_rr()
- Fix vmx_load_all_rr()
  vmx_switch_rr7() sets psr.ic = 0 so that clearing psr.ic before calling
  vmx_switch_rr7() doesn't make sense.
- Improve vmx_switch_rr7()
  It sets psr.ic = 0 after switching to physical mode. But it can be
  done at the switching time.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/vlsapic.c      |    6 ++
 xen/arch/ia64/vmx/vmx_entry.S    |   57 ++++++++++++++++++++++---
 xen/arch/ia64/vmx/vmx_init.c     |   88 ++++++++++++++++++++++++++++++++++++---
 xen/arch/ia64/vmx/vmx_phy_mode.c |    8 ---
 xen/arch/ia64/vmx/vmx_vcpu.c     |    4 -
 xen/arch/ia64/xen/domain.c       |    3 -
 xen/include/asm-ia64/vmx_vcpu.h  |    2 
 xen/include/asm-ia64/vmx_vpd.h   |   18 +++++++
 xen/include/asm-ia64/xenkregs.h  |    2 
 9 files changed, 163 insertions(+), 25 deletions(-)

diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/arch/ia64/vmx/vlsapic.c       Thu Jan 17 12:05:43 2008 -0700
@@ -36,6 +36,7 @@
 #include <asm/gcc_intrin.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/vmx.h>
+#include <asm/vmx_vpd.h>
 #include <asm/hw_irq.h>
 #include <asm/vmx_pal_vsa.h>
 #include <asm/kregs.h>
@@ -91,9 +92,12 @@ static void update_vhpi(VCPU *vcpu, int 
 
     VCPU(vcpu,vhpi) = vhpi;
     // TODO: Add support for XENO
-    if (VCPU(vcpu,vac).a_int)
+    if (VCPU(vcpu,vac).a_int) {
+        vmx_vpd_pin(vcpu);
         ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT, 
                       (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0);
+        vmx_vpd_unpin(vcpu);
+    }
 }
 
 
diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S     Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_entry.S     Thu Jan 17 12:05:43 2008 -0700
@@ -623,14 +623,14 @@ END(ia64_leave_hypercall)
 #define PSR_BITS_TO_CLEAR                                           \
        (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |     \
         IA64_PSR_RT | IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI |    \
-        IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH)
+        IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC)
 #define PSR_BITS_TO_SET    IA64_PSR_BN
 
-//extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * 
pal_vaddr );
+//extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * 
pal_vaddr, void * shared_arch_info );
 GLOBAL_ENTRY(vmx_switch_rr7)
        // not sure this unwind statement is correct...
        .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
-       alloc loc1 = ar.pfs, 3, 7, 0, 0
+       alloc loc1 = ar.pfs, 4, 8, 0, 0
 1:{
        mov r28  = in0                  // copy procedure index
        mov r8   = ip                   // save ip to compute branch
@@ -643,7 +643,12 @@ 1:{
        tpa r3 = r8                     // get physical address of ip
        dep loc5 = 0,in1,60,4           // get physical address of guest_vhpt
        dep loc6 = 0,in2,60,4           // get physical address of pal code
-       ;;
+       dep loc7 = 0,in3,60,4           // get physical address of privregs
+       ;;
+       dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
+                                        // mask granule shift
+       dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
+                                        // mask granule shift
        mov loc4 = psr                  // save psr
        ;;
        mov loc3 = ar.rsc               // save RSE configuration
@@ -661,11 +666,9 @@ 1:
        dep r16=-1,r0,61,3
        ;;
        mov rr[r16]=in0
+       ;;
        srlz.d
        ;;
-       rsm 0x6000
-       ;;
-       srlz.d
 
        // re-pin mappings for kernel text and data
        mov r18=KERNEL_TR_PAGE_SHIFT<<2
@@ -679,6 +682,7 @@ 1:
        mov r16=IA64_TR_KERNEL
        movl r25 = PAGE_KERNEL
        // r2=KERNEL_TR_PAGE_SHIFT truncated physicall address of ip
+       //   = ia64_tpa(ip) & (KERNEL_TR_PAGE_SIZE - 1)
        dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
        ;;
        or r24=r2,r25
@@ -737,7 +741,9 @@ 1:
        // re-pin mappings for guest_vhpt
        // unless overlaps with IA64_TR_XEN_HEAP_REGS or IA64_TR_CURRENT_STACK
        dep r18=0,loc5,0,KERNEL_TR_PAGE_SHIFT
+       // r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1)
        dep r21=0,r21,0,IA64_GRANULE_SHIFT 
+       // r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
        dep r17=0,loc5,0,IA64_GRANULE_SHIFT 
        ;;
        cmp.eq p6,p0=r18,r2             // check overlap with xen heap
@@ -771,6 +777,43 @@ 1:
        itr.i itr[r24]=loc6             // wire in new mapping...
        ;;
 
+       // r16, r19, r20 are used by
+       //  ia64_switch_mode_phys()/ia64_switch_mode_virt()
+       // re-pin mappings for privregs
+       // r2   = ia64_tpa(ip) & (KERNEL_TR_PAGE_SIZE - 1)
+       // r21  = (current physical addr) & (IA64_GRANULE_SIZE - 1)
+       // r17  = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
+
+       // r24  = (privregs physical addr) & (KERNEL_TR_PAGE_SIZE - 1)
+       // loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) | 
PAGE_KERNEL
+       // loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
+       dep r24 = 0,loc7,0,KERNEL_TR_PAGE_SHIFT
+       ;;
+       cmp.ne p6,p0=r24,r2             // check overlap with xen heap
+       ;; 
+(p6)   cmp.ne.unc p7,p0=r21,loc7       // check overlap with current stack
+       ;;
+(p7)   cmp.ne.unc p8,p0=r17,loc7       // check overlap with guest_vhpt
+       ;;
+       // loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | 
PAGE_KERNEL
+       or loc7 = r25,loc7          // construct PA | page properties
+       ;;
+       cmp.ne p9,p0=loc6,loc7
+       mov r22=IA64_TR_VPD
+       mov r24=IA64_TR_MAPPED_REGS
+       mov r23=IA64_GRANULE_SHIFT<<2
+       ;;
+(p9)   ptr.i   in3,r23 
+(p8)   ptr.d   in3,r23
+       mov cr.itir=r23
+       mov cr.ifa=in3
+       ;;
+(p9)   itr.i itr[r22]=loc7         // wire in new mapping...
+       ;;
+(p8)   itr.d dtr[r24]=loc7         // wire in new mapping...
+       ;;
+.privregs_overlaps:
+
        // done, switch back to virtual and return
        mov r16=loc4                    // r16= original psr
        br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_init.c      Thu Jan 17 12:05:43 2008 -0700
@@ -51,6 +51,7 @@
 #include <asm/viosapic.h>
 #include <xen/event.h>
 #include <asm/vlsapic.h>
+#include <asm/vhpt.h>
 #include "entry.h"
 
 /* Global flag to identify whether Intel vmx feature is on */
@@ -150,20 +151,21 @@ typedef union {
        };
 } cpuid3_t;
 
-/* Allocate vpd from xenheap */
+/* Allocate vpd from domheap */
 static vpd_t *alloc_vpd(void)
 {
        int i;
        cpuid3_t cpuid3;
+       struct page_info *page;
        vpd_t *vpd;
        mapped_regs_t *mregs;
 
-       vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
-       if (!vpd) {
+       page = alloc_domheap_pages(NULL, get_order(VPD_SIZE), 0);
+       if (page == NULL) {
                printk("VPD allocation failed.\n");
                return NULL;
        }
-       vpd = (vpd_t *)virt_to_xenva(vpd);
+       vpd = page_to_virt(page);
 
        printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n",
               vpd, sizeof(vpd_t));
@@ -191,12 +193,79 @@ static vpd_t *alloc_vpd(void)
        return vpd;
 }
 
-/* Free vpd to xenheap */
+/* Free vpd to domheap */
 static void
 free_vpd(struct vcpu *v)
 {
        if ( v->arch.privregs )
-               free_xenheap_pages(v->arch.privregs, get_order(VPD_SIZE));
+               free_domheap_pages(virt_to_page(v->arch.privregs),
+                                  get_order(VPD_SIZE));
+}
+
+// This is used for PAL_VP_CREATE and PAL_VPS_SET_PENDING_INTERRUPT
+// so that we don't have to pin the vpd down with itr[].
+void
+__vmx_vpd_pin(struct vcpu* v)
+{
+       unsigned long privregs = (unsigned long)v->arch.privregs;
+       u64 psr;
+       
+       // check overlapping with xenheap
+       if ((privregs &
+            ~(KERNEL_TR_PAGE_SIZE - 1)) ==
+           ((unsigned long)__va(ia64_tpa(current_text_addr())) &
+            ~(KERNEL_TR_PAGE_SIZE - 1)))
+               return;
+               
+       privregs &= ~(IA64_GRANULE_SIZE - 1);
+
+       // check overlapping with current stack
+       if (privregs ==
+           ((unsigned long)current & ~(IA64_GRANULE_SIZE - 1)))
+               return;
+
+       if (!VMX_DOMAIN(current)) {
+               // check overlapping with vhpt
+               if (privregs ==
+                   (vcpu_vhpt_maddr(current) & ~(IA64_GRANULE_SHIFT - 1)))
+                       return;
+       } else {
+               // check overlapping with vhpt
+               if (privregs ==
+                   ((unsigned long)current->arch.vhpt.hash &
+                    ~(IA64_GRANULE_SHIFT - 1)))
+                       return;
+
+               // check overlapping with privregs
+               if (privregs ==
+                   ((unsigned long)current->arch.privregs &
+                    ~(IA64_GRANULE_SHIFT - 1)))
+                       return;
+       }
+
+       psr = ia64_clear_ic();
+       ia64_ptr(0x2 /*D*/, privregs, IA64_GRANULE_SIZE);
+       ia64_srlz_d();
+       ia64_itr(0x2 /*D*/, IA64_TR_MAPPED_REGS, privregs,
+                pte_val(pfn_pte(__pa(privregs) >> PAGE_SHIFT, PAGE_KERNEL)),
+                IA64_GRANULE_SHIFT);
+       ia64_set_psr(psr);
+       ia64_srlz_d();
+}
+
+void
+__vmx_vpd_unpin(struct vcpu* v)
+{
+       if (!VMX_DOMAIN(current)) {
+               int rc;
+               rc = !set_one_rr(VRN7 << VRN_SHIFT, VCPU(current, rrs[VRN7]));
+               BUG_ON(rc);
+       } else {
+               IA64FAULT fault;
+               fault = vmx_vcpu_set_rr(current, VRN7 << VRN_SHIFT,
+                                       VMX(current, vrr[VRN7]));
+               BUG_ON(fault != IA64_NO_FAULT);
+       }
 }
 
 /*
@@ -212,7 +281,11 @@ vmx_create_vp(struct vcpu *v)
        /* ia64_ivt is function pointer, so need this tranlation */
        ivt_base = (u64) &vmx_ia64_ivt;
        printk(XENLOG_DEBUG "ivt_base: 0x%lx\n", ivt_base);
+
+       vmx_vpd_pin(v);
        ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
+       vmx_vpd_unpin(v);
+       
        if (ret != PAL_STATUS_SUCCESS){
                panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
        }
@@ -224,6 +297,7 @@ vmx_save_state(struct vcpu *v)
 {
        u64 status;
 
+       BUG_ON(v != current);
        /* FIXME: about setting of pal_proc_vector... time consuming */
        status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS){
@@ -250,6 +324,7 @@ vmx_load_state(struct vcpu *v)
 {
        u64 status;
 
+       BUG_ON(v != current);
        status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS){
                panic_domain(vcpu_regs(v),"Restore vp status failed\n");
@@ -518,6 +593,7 @@ void vmx_do_resume(struct vcpu *v)
        ioreq_t *p;
 
        vmx_load_all_rr(v);
+       vmx_load_state(v);
        migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
 
        /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Jan 17 12:05:43 2008 -0700
@@ -138,7 +138,6 @@ void
 void
 vmx_load_all_rr(VCPU *vcpu)
 {
-       unsigned long psr;
        unsigned long rr0, rr4;
 
        switch (vcpu->arch.arch_vmx.mmu_mode) {
@@ -158,8 +157,6 @@ vmx_load_all_rr(VCPU *vcpu)
                panic_domain(NULL, "bad mmu mode value");
        }
 
-       psr = ia64_clear_ic();
-
        ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
        ia64_dv_serialize_data();
        ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
@@ -175,13 +172,12 @@ vmx_load_all_rr(VCPU *vcpu)
        ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
        ia64_dv_serialize_data();
        vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
-                      (void *)vcpu->arch.vhpt.hash, pal_vaddr);
+                      (void *)vcpu->arch.vhpt.hash,
+                      pal_vaddr, vcpu->arch.privregs);
        ia64_set_pta(VMX(vcpu, mpta));
        vmx_ia64_set_dcr(vcpu);
 
        ia64_srlz_d();
-       ia64_set_psr(psr);
-       ia64_srlz_i();
 }
 
 void
diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Jan 17 12:05:43 2008 -0700
@@ -181,8 +181,8 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6
     switch((u64)(reg>>VRN_SHIFT)) {
     case VRN7:
         if (likely(vcpu == current))
-            vmx_switch_rr7(vrrtomrr(vcpu,val),
-                           (void *)vcpu->arch.vhpt.hash, pal_vaddr );
+            vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
+                           pal_vaddr, vcpu->arch.privregs);
        break;
     case VRN4:
         rrval = vrrtomrr(vcpu,val);
diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/arch/ia64/xen/domain.c        Thu Jan 17 12:05:43 2008 -0700
@@ -241,8 +241,6 @@ void context_switch(struct vcpu *prev, s
             ia64_setreg(_IA64_REG_CR_DCR, dcr);
         }
     }
-    if (VMX_DOMAIN(next))
-        vmx_load_state(next);
 
     ia64_disable_vhpt_walker();
     lazy_fp_switch(prev, current);
@@ -261,6 +259,7 @@ void context_switch(struct vcpu *prev, s
 
     if (VMX_DOMAIN(current)) {
         vmx_load_all_rr(current);
+        vmx_load_state(current);
         migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
                       current->processor);
     } else {
diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Thu Jan 17 12:05:43 2008 -0700
@@ -114,7 +114,7 @@ extern void memwrite_v(VCPU * vcpu, thas
                        size_t s);
 extern void memwrite_p(VCPU * vcpu, u64 * src, u64 * dest, size_t s);
 extern void vcpu_load_kernel_regs(VCPU * vcpu);
-extern void vmx_switch_rr7(unsigned long, void *, void *);
+extern void vmx_switch_rr7(unsigned long, void *, void *, void *);
 
 extern void dtlb_fault(VCPU * vcpu, u64 vadr);
 extern void nested_dtlb(VCPU * vcpu);
diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h    Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/include/asm-ia64/vmx_vpd.h    Thu Jan 17 12:05:43 2008 -0700
@@ -80,6 +80,24 @@ struct arch_vmx_struct {
 
 #define ARCH_VMX_DOMAIN         0       /* Need it to indicate VTi domain */
 
+/* pin/unpin vpd area for PAL call with DTR[] */
+void __vmx_vpd_pin(struct vcpu* v);
+void __vmx_vpd_unpin(struct vcpu* v); 
+
+static inline void vmx_vpd_pin(struct vcpu* v)
+{
+    if (likely(v == current))
+        return;
+    __vmx_vpd_pin(v);
+}
+
+static inline void vmx_vpd_unpin(struct vcpu* v)
+{
+    if (likely(v == current))
+        return;
+    __vmx_vpd_unpin(v);
+}
+
 #endif //__ASSEMBLY__
 
 // VPD field offset
diff -r 80626da7f6e3 -r 6f7e6608cb74 xen/include/asm-ia64/xenkregs.h
--- a/xen/include/asm-ia64/xenkregs.h   Thu Jan 17 12:05:43 2008 -0700
+++ b/xen/include/asm-ia64/xenkregs.h   Thu Jan 17 12:05:43 2008 -0700
@@ -8,6 +8,8 @@
 #define IA64_TR_SHARED_INFO    4       /* dtr4: page shared with domain */
 #define IA64_TR_MAPPED_REGS    5       /* dtr5: vcpu mapped regs */
 #define        IA64_TR_VHPT            6       /* dtr6: vhpt */
+
+#define IA64_TR_VPD            2       /* itr2: vpd */
 
 #define IA64_DTR_GUEST_KERNEL   7
 #define IA64_ITR_GUEST_KERNEL   2

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.