[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Add counters for hyperprivops and reflections
ChangeSet 1.1564.1.6, 2005/06/01 14:11:09-06:00, djm@xxxxxxxxxxxxxxx Add counters for hyperprivops and reflections Preliminary work for fast break reflection Signed-off by: Dan Magenheimer <dan.magenheimer@xxxxxx> arch/ia64/asm-offsets.c | 7 +++ arch/ia64/dom_fw.c | 4 +- arch/ia64/domain.c | 5 +- arch/ia64/hyperprivop.S | 88 ++++++++++++++++++++++++++++++++++++++++++++-- arch/ia64/ivt.S | 26 ++++++++----- arch/ia64/privop.c | 6 ++- arch/ia64/process.c | 41 ++++++++++++++++++++- include/asm-ia64/domain.h | 2 - 8 files changed, 159 insertions(+), 20 deletions(-) diff -Nru a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c --- a/xen/arch/ia64/asm-offsets.c 2005-06-10 14:03:35 -04:00 +++ b/xen/arch/ia64/asm-offsets.c 2005-06-10 14:03:35 -04:00 @@ -45,10 +45,15 @@ DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled))); DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled)); DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip)); + DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr))); DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr)); DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs)); + DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim)); DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum)); + DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0])); + DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0])); DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode)); + DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs)); DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe)); DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption)); DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0])); @@ -66,6 +71,8 @@ DEFINE(IA64_VCPU_META_RR0_OFFSET, offsetof (struct exec_domain, arch.metaphysical_rr0)); DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct exec_domain, arch.metaphysical_saved_rr0)); + DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct exec_domain, arch.breakimm)); + DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct exec_domain, arch.iva)); BLANK(); diff -Nru a/xen/arch/ia64/dom_fw.c b/xen/arch/ia64/dom_fw.c --- a/xen/arch/ia64/dom_fw.c 2005-06-10 14:03:35 -04:00 +++ b/xen/arch/ia64/dom_fw.c 2005-06-10 14:03:35 -04:00 @@ -50,7 +50,7 @@ if (d == dom0) paddr += dom0_start; imva = domain_mpa_to_imva(d,paddr); - build_hypercall_bundle(imva,d->breakimm,hypercall,1); + build_hypercall_bundle(imva,d->arch.breakimm,hypercall,1); } @@ -61,7 +61,7 @@ if (d == dom0) paddr += dom0_start; imva = domain_mpa_to_imva(d,paddr); - build_hypercall_bundle(imva,d->breakimm,hypercall,ret); + build_hypercall_bundle(imva,d->arch.breakimm,hypercall,ret); } diff -Nru a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c --- a/xen/arch/ia64/domain.c 2005-06-10 14:03:35 -04:00 +++ b/xen/arch/ia64/domain.c 2005-06-10 14:03:35 -04:00 @@ -210,7 +210,7 @@ */ d->xen_vastart = 0xf000000000000000; d->xen_vaend = 0xf300000000000000; - d->breakimm = 0x1000; + d->arch.breakimm = 0x1000; // stay on kernel stack because may get interrupts! // ia64_ret_from_clone (which b0 gets in new_thread) switches @@ -256,7 +256,8 @@ d->xen_vastart = 0xf000000000000000; d->xen_vaend = 0xf300000000000000; d->shared_info_va = 0xf100000000000000; - d->breakimm = 0x1000; + d->arch.breakimm = 0x1000; + ed->arch.breakimm = d->arch.breakimm; // stay on kernel stack because may get interrupts! // ia64_ret_from_clone (which b0 gets in new_thread) switches // to user stack diff -Nru a/xen/arch/ia64/hyperprivop.S b/xen/arch/ia64/hyperprivop.S --- a/xen/arch/ia64/hyperprivop.S 2005-06-10 14:03:35 -04:00 +++ b/xen/arch/ia64/hyperprivop.S 2005-06-10 14:03:35 -04:00 @@ -62,6 +62,92 @@ // if not one of the above, give up for now and do it the slow way br.sptk.many dispatch_break_fault ;; +// reflect domain breaks directly to domain +// FIXME: DOES NOT WORK YET +// r16 == cr.isr +// r17 == cr.iim +// r18 == XSI_PSR_IC +// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits) +// r22 == IA64_KR(CURRENT)+IA64_VCPU_BREAKIMM_OFFSET +// r31 == pr +GLOBAL_ENTRY(fast_break_reflect) + mov r20=cr.ipsr;; + // if big-endian domain or privileged-perfmon bits set, do slow way + extr.u r21=r20,IA64_PSR_BE_BIT,1 ;; + cmp.ne p7,p0=r21,r0 +(p7) br.sptk.many dispatch_break_fault ;; + extr.u r21=r20,IA64_PSR_PP_BIT,1 ;; + cmp.ne p7,p0=r21,r0 +(p7) br.sptk.many dispatch_break_fault ;; + // ensure ipsr.cpl==2, ipsr.ri==0 + // FIXME: any other psr bits need to be properly set/validated? + // ...see process.c: DELIVER_PSR_CLR/SET + extr.u r21=r20,IA64_PSR_CPL0_BIT,2;; + extr.u r23=r20,IA64_PSR_RI_BIT,2;; + dep r20=-1,r20,IA64_PSR_CPL1_BIT,1 ;; + dep r20=0,r20,IA64_PSR_CPL0_BIT,1 ;; + dep r20=0,r20,IA64_PSR_RI_BIT,2 ;; + mov cr.ipsr=r20;; + // save ipsr in shared_info, vipsr.cpl==(ipsr.cpl==3)?3:0 + cmp.ne p7,p0=3,r21;; +(p7) mov r21=r0 + dep r20=r21,r20,IA64_PSR_CPL0_BIT,2 ;; + dep r20=r23,r20,IA64_PSR_RI_BIT,2 ;; + // vipsr.i=vpsr.i + adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;; + ld4 r21=[r21];; + dep r20=r21,r20,IA64_PSR_I_BIT,1 ;; + adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;; + // FIXME: any other vpsr bits need to be properly set/validated? + st8 [r21]=r20;; + // save iim in shared_info + adds r21=XSI_IIM_OFS-XSI_PSR_IC_OFS,r18 ;; + st8 [r21]=r17;; + // save iip in shared_info + mov r20=cr.iip;; + adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;; + st8 [r21]=r20;; + // save ifs in shared_info + adds r21=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;; + st4 [r21]=r0 ;; + adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 + st8 [r21]=r0 ;; + cover ;; + mov r20=cr.ifs;; + adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;; + st8 [r21]=r20;; + // vpsr.i = vpsr.ic = 0 on delivery of interruption + st8 [r18]=r0;; + // FIXME: need to save iipa and isr to be arch-compliant + // set iip to go to domain IVA break instruction vector + adds r22=IA64_VCPU_IVA_OFFSET-IA64_VCPU_BREAKIMM_OFFSET,r22;; + ld8 r23=[r22];; + movl r24=0x2c00;; + add r24=r24,r23;; + mov cr.iip=r24;; + // OK, now all set to go except for switch to virtual bank0 + mov r30=r2; mov r29=r3;; + adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18; + adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;; + bsw.1;; + st8 [r2]=r16,16; st8 [r3]=r17,16 ;; + st8 [r2]=r18,16; st8 [r3]=r19,16 ;; + st8 [r2]=r20,16; st8 [r3]=r21,16 ;; + st8 [r2]=r22,16; st8 [r3]=r23,16 ;; + st8 [r2]=r24,16; st8 [r3]=r25,16 ;; + st8 [r2]=r26,16; st8 [r3]=r27,16 ;; + st8 [r2]=r28,16; st8 [r3]=r29,16 ;; + st8 [r2]=r30,16; st8 [r3]=r31,16 ;; + movl r31=XSI_IPSR;; + bsw.0 ;; + mov r2=r30; mov r3=r29;; + adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;; + st4 [r20]=r0 ;; + mov pr=r31,-1 ;; + rfi + ;; + + // ensure that, if giving up, registers at entry to fast_hyperprivop unchanged ENTRY(hyper_rfi) #define FAST_HYPERPRIVOP_CNT @@ -112,8 +198,6 @@ ld8 r20=[r20];; dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set mov cr.ifs=r20 ;; -// TODO: increment a counter so we can count how many rfi's go the fast way -// but where? counter must be pinned // ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3; dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;; // vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic diff -Nru a/xen/arch/ia64/ivt.S b/xen/arch/ia64/ivt.S --- a/xen/arch/ia64/ivt.S 2005-06-10 14:03:35 -04:00 +++ b/xen/arch/ia64/ivt.S 2005-06-10 14:03:35 -04:00 @@ -783,20 +783,26 @@ ld8 r19=[r18] ;; cmp.eq p7,p0=r0,r17 // is this a psuedo-cover? -(p7) br.sptk.many dispatch_privop_fault +(p7) br.spnt.many dispatch_privop_fault ;; - cmp4.ne p7,p0=r0,r19 -(p7) br.sptk.many dispatch_break_fault - // If we get to here, we have a hyperprivop - // For now, hyperprivops are handled through the break mechanism - // Later, they will be fast hand-coded assembly with psr.ic off + // if vpsr.ic is off, we have a hyperprivop + // A hyperprivop is hand-coded assembly with psr.ic off // which means no calls, no use of r1-r15 and no memory accesses // except to pinned addresses! -#define FAST_HYPERPRIVOPS -#ifdef FAST_HYPERPRIVOPS - br.sptk.many fast_hyperprivop + cmp4.eq p7,p0=r0,r19 +(p7) br.sptk.many fast_hyperprivop + ;; + mov r22=IA64_KR(CURRENT);; + adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;; + ld4 r23=[r22];; + cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm? +(p6) br.spnt.many dispatch_break_fault + ;; +//#define FAST_BREAK +#ifdef FAST_BREAK + br.sptk.many fast_break_reflect #else - br.sptk.many dispatch_break_fault + br.spnt.many dispatch_break_fault #endif ;; #endif diff -Nru a/xen/arch/ia64/privop.c b/xen/arch/ia64/privop.c --- a/xen/arch/ia64/privop.c 2005-06-10 14:03:35 -04:00 +++ b/xen/arch/ia64/privop.c 2005-06-10 14:03:35 -04:00 @@ -987,14 +987,14 @@ char *s = buf; unsigned long total = 0; for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += slow_hyperpriv_cnt[i]; - s += sprintf(s,"Slow hyperprivops (total %d:\n",total); + s += sprintf(s,"Slow hyperprivops (total %d):\n",total); for (i = 1; i <= HYPERPRIVOP_MAX; i++) if (slow_hyperpriv_cnt[i]) s += sprintf(s,"%10d %s\n", slow_hyperpriv_cnt[i], hyperpriv_str[i]); total = 0; for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += fast_hyperpriv_cnt[i]; - s += sprintf(s,"Fast hyperprivops (total %d:\n",total); + s += sprintf(s,"Fast hyperprivops (total %d):\n",total); for (i = 1; i <= HYPERPRIVOP_MAX; i++) if (fast_hyperpriv_cnt[i]) s += sprintf(s,"%10d %s\n", @@ -1016,6 +1016,7 @@ int n = dump_privop_counts(buf); n += dump_hyperprivop_counts(buf + n); + n += dump_reflect_counts(buf + n); #ifdef PRIVOP_ADDR_COUNT n += dump_privop_addrs(buf + n); #endif @@ -1033,6 +1034,7 @@ #ifdef PRIVOP_ADDR_COUNT zero_privop_addrs(); #endif + zero_reflect_counts(); if (len < TMPBUFLEN) return -1; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |