[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] First fast hyperprivop support (hyper_rfi)



ChangeSet 1.1509.1.5, 2005/05/26 09:43:04-06:00, djm@xxxxxxxxxxxxxxx

        First fast hyperprivop support (hyper_rfi)
        
        Signed-off by: Dan Magenheimer <dan.magenheimer@xxxxxx>



 arch/ia64/Makefile           |    2 
 arch/ia64/asm-offsets.c      |    7 ++
 arch/ia64/hyperprivop.S      |  103 +++++++++++++++++++++++++++++++++++++++++++
 arch/ia64/ivt.S              |   13 +++++
 arch/ia64/privop.c           |    2 
 include/asm-ia64/xensystem.h |    2 
 include/public/arch-ia64.h   |    7 ++
 7 files changed, 132 insertions(+), 4 deletions(-)


diff -Nru a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile    2005-05-26 14:01:42 -04:00
+++ b/xen/arch/ia64/Makefile    2005-05-26 14:01:42 -04:00
@@ -9,7 +9,7 @@
        xenmem.o sal.o cmdline.o mm_init.o tlb.o smpboot.o \
        extable.o linuxextable.o xenirq.o xentime.o \
        regionreg.o entry.o unaligned.o privop.o vcpu.o \
-       irq_ia64.o irq_lsapic.o vhpt.o xenasm.o dom_fw.o
+       irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o
 
 ifeq ($(CONFIG_VTI),y)
 OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
diff -Nru a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       2005-05-26 14:01:42 -04:00
+++ b/xen/arch/ia64/asm-offsets.c       2005-05-26 14:01:42 -04:00
@@ -44,6 +44,13 @@
        DEFINE(XSI_PSR_IC_OFS, offsetof(vcpu_info_t, 
arch.interrupt_collection_enabled));
        DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, 
arch.interrupt_collection_enabled)));
        DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, 
arch.interrupt_delivery_enabled));
+       DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip));
+       DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr));
+       DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs));
+       DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
+       DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
+       DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, 
arch.incomplete_regframe));
+       DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption));
        //DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, 
blocked));
        //DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, 
clear_child_tid));
        //DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, 
group_leader));
diff -Nru a/xen/arch/ia64/hyperprivop.S b/xen/arch/ia64/hyperprivop.S
--- /dev/null   Wed Dec 31 16:00:00 196900
+++ b/xen/arch/ia64/hyperprivop.S       2005-05-26 14:01:42 -04:00
@@ -0,0 +1,103 @@
+/*
+ * arch/ia64/kernel/hyperprivop.S
+ *
+ * Copyright (C) 2005 Hewlett-Packard Co
+ *     Dan Magenheimer <dan.magenheimer@xxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <asm/asmmacro.h>
+#include <asm/kregs.h>
+#include <asm/offsets.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <public/arch-ia64.h>
+
+// Note: not hand-scheduled for now
+//  Registers at entry
+//     r16 == cr.isr
+//     r17 == cr.iim
+//     r18 == XSI_PSR_IC_OFS
+//     r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+//     r31 == pr
+GLOBAL_ENTRY(fast_hyperprivop)
+       //cover;;
+       // if domain interrupts pending, give up for now and do it the slow way
+       adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r20=[r20] ;;
+       cmp.ne p7,p0=r0,r20
+(p7)   br.sptk.many dispatch_break_fault ;;
+
+       // HYPERPRIVOP_RFI?
+       cmp.eq p7,p6=XEN_HYPER_RFI,r17
+(p7)   br.sptk.many hyper_rfi;;
+       // if not rfi, give up for now and do it the slow way
+       br.sptk.many dispatch_break_fault ;;
+
+// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
+ENTRY(hyper_rfi)
+       adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r21=[r20];;         // r21 = vcr.ipsr
+       extr.u r22=r21,IA64_PSR_BE_BIT,1 ;;
+       // if turning on psr.be, give up for now and do it the slow way
+       cmp.ne p7,p0=r22,r0
+(p7)   br.sptk.many dispatch_break_fault ;;
+       // if (!(vpsr.dt && vpsr.rt && vpsr.it)), do it the slow way
+       movl r20=(IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT);;
+       and r22=r20,r21
+       ;;
+       cmp.ne p7,p0=r22,r20
+(p7)   br.sptk.many dispatch_break_fault ;;
+       // if was in metaphys mode, do it the slow way (FIXME later?)
+       adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld4 r20=[r20];;
+       cmp.ne p7,p0=r20,r0
+(p7)   br.sptk.many dispatch_break_fault ;;
+       // if domain hasn't already done virtual bank switch
+       //  do it the slow way (FIXME later?)
+       adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld4 r20=[r20];;
+       cmp.eq p7,p0=r20,r0
+(p7)   br.sptk.many dispatch_break_fault ;;
+       // validate vcr.iip, if in Xen range, do it the slow way
+       adds r20=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r22=[r20];;
+       movl r23=XEN_VIRT_SPACE_LOW
+       movl r24=XEN_VIRT_SPACE_HIGH ;;
+       cmp.ltu p0,p7=r22,r23 ;;        // if !(iip<low) &&
+(p7)   cmp.geu p0,p7=r22,r24 ;;        //    !(iip>=high)
+(p7)   br.sptk.many dispatch_break_fault ;;
+
+       // OK now, let's do an rfi.
+       // r18=&vpsr.i|vpsr.ic, r21==vpsr, r20==&vcr.iip, r22=vcr.iip
+       mov cr.iip=r22;;
+       adds r20=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
+       st4 [r20]=r0 ;;
+       adds r20=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r20=[r20];;
+       dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
+       mov cr.ifs=r20 ;;
+// TODO: increment a counter so we can count how many rfi's go the fast way
+//    but where?  counter must be pinned
+       // ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
+       dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
+       // vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
+       mov r19=r0 ;;
+       extr.u r22=r21,IA64_PSR_I_BIT,1 ;;
+       cmp.ne p7,p6=r22,r0 ;;
+(p7)   dep r19=-1,r19,32,1
+       extr.u r22=r21,IA64_PSR_IC_BIT,1 ;;
+       cmp.ne p7,p6=r22,r0 ;;
+(p7)   dep r19=-1,r19,0,1 ;;
+       st8 [r18]=r19 ;;
+       // force on psr.ic, i, dt, rt, it, bn
+       movl 
r20=(IA64_PSR_I|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT|IA64_PSR_BN)
+       ;;
+       or r21=r21,r20
+       ;;
+       mov cr.ipsr=r21
+       mov pr=r31,-1
+       ;;
+       rfi
+       ;;
diff -Nru a/xen/arch/ia64/ivt.S b/xen/arch/ia64/ivt.S
--- a/xen/arch/ia64/ivt.S       2005-05-26 14:01:42 -04:00
+++ b/xen/arch/ia64/ivt.S       2005-05-26 14:01:42 -04:00
@@ -792,6 +792,7 @@
        // Later, they will be fast hand-coded assembly with psr.ic off
        // which means no calls, no use of r1-r15 and no memory accesses
        // except to pinned addresses!
+#define FAST_HYPERPRIVOPS
 #ifdef FAST_HYPERPRIVOPS
        br.sptk.many fast_hyperprivop
 #else
@@ -917,9 +918,10 @@
        // fault ever gets "unreserved", simply moved the following code to a 
more
        // suitable spot...
 
-ENTRY(dispatch_break_fault)
+GLOBAL_ENTRY(dispatch_break_fault)
        SAVE_MIN_WITH_COVER
        ;;
+dispatch_break_fault_post_save:
        alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
        mov out0=cr.ifa
        adds out1=16,sp
@@ -1796,6 +1798,15 @@
        mov rp=r14
        br.sptk.many ia64_prepare_handle_reflection
 END(dispatch_reflection)
+
+#define SAVE_MIN_COVER_DONE    DO_SAVE_MIN(,mov r30=cr.ifs,)
+
+// same as dispatch_break_fault except cover has already been done
+GLOBAL_ENTRY(dispatch_slow_hyperprivop)
+       SAVE_MIN_COVER_DONE
+       ;;
+       br.sptk.many dispatch_break_fault_post_save
+END(dispatch_slow_hyperprivop)
 #endif
 
 #ifdef CONFIG_IA32_SUPPORT
diff -Nru a/xen/arch/ia64/privop.c b/xen/arch/ia64/privop.c
--- a/xen/arch/ia64/privop.c    2005-05-26 14:01:42 -04:00
+++ b/xen/arch/ia64/privop.c    2005-05-26 14:01:42 -04:00
@@ -981,7 +981,7 @@
 {
        int i;
        char *s = buf;
-       s += sprintf(s,"Hyperprivops:\n");
+       s += sprintf(s,"Slow hyperprivops:\n");
        for (i = 1; i <= HYPERPRIVOP_MAX; i++)
                if (hyperpriv_cnt[i])
                        s += sprintf(s,"%10d %s\n",
diff -Nru a/xen/include/asm-ia64/xensystem.h b/xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h  2005-05-26 14:01:42 -04:00
+++ b/xen/include/asm-ia64/xensystem.h  2005-05-26 14:01:42 -04:00
@@ -14,10 +14,10 @@
 #include <linux/kernel.h>
 
 /* Define HV space hierarchy */
-#ifdef CONFIG_VTI
 #define XEN_VIRT_SPACE_LOW      0xe800000000000000
 #define XEN_VIRT_SPACE_HIGH     0xf800000000000000     
 /* This is address to mapping rr7 switch stub, in region 5 */
+#ifdef CONFIG_VTI
 #define XEN_RR7_SWITCH_STUB     0xb700000000000000
 #endif // CONFIG_VTI
 
diff -Nru a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    2005-05-26 14:01:42 -04:00
+++ b/xen/include/public/arch-ia64.h    2005-05-26 14:01:42 -04:00
@@ -81,4 +81,11 @@
 
 #endif /* !__ASSEMBLY__ */
 
+#define        XEN_HYPER_RFI                   1
+#define        XEN_HYPER_RSM_PSR_DT            2
+#define        XEN_HYPER_SSM_PSR_DT            3
+#define        XEN_HYPER_COVER                 4
+#define        XEN_HYPER_ITC_D                 5
+#define        XEN_HYPER_ITC_I                 6
+
 #endif /* __HYPERVISOR_IF_IA64_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.