[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] Add VTI related perfc



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 5791030e6473138d37ce346a84859d1921b47a86
# Parent  d4e85f8453ba4da55bcf1e908a862022857f915c
[IA64] Add VTI related perfc

This patch intends to add VTI-related and fw_hypercall counters.

Signed-off-by: Atsushi SAKAI <sakaia@xxxxxxxxxxxxxx>
Signed-off-by: Hiroya INAKOSHI <inakoshi.hiroya@xxxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/mmio.c          |    1 
 xen/arch/ia64/vmx/pal_emul.c      |    1 
 xen/arch/ia64/vmx/vmx_interrupt.c |    1 
 xen/arch/ia64/vmx/vmx_phy_mode.c  |    1 
 xen/arch/ia64/vmx/vmx_process.c   |    1 
 xen/arch/ia64/vmx/vmx_virt.c      |   39 +++++++++++++++++++++++++++++
 xen/arch/ia64/xen/hypercall.c     |    1 
 xen/include/asm-ia64/perfc_defn.h |   50 ++++++++++++++++++++++++++++++++++++++
 8 files changed, 95 insertions(+)

diff -r d4e85f8453ba -r 5791030e6473 xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Sun Sep 10 14:26:27 2006 -0600
+++ b/xen/arch/ia64/vmx/mmio.c  Sun Sep 10 14:31:54 2006 -0600
@@ -213,6 +213,7 @@ static void mmio_access(VCPU *vcpu, u64 
     iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
     v_plat = vmx_vcpu_get_plat(vcpu);
 
+    perfc_incra(vmx_mmio_access, iot >> 56);
     switch (iot) {
     case GPFN_PIB:
         if(!dir)
diff -r d4e85f8453ba -r 5791030e6473 xen/arch/ia64/vmx/pal_emul.c
--- a/xen/arch/ia64/vmx/pal_emul.c      Sun Sep 10 14:26:27 2006 -0600
+++ b/xen/arch/ia64/vmx/pal_emul.c      Sun Sep 10 14:31:54 2006 -0600
@@ -389,6 +389,7 @@ pal_emul(VCPU *vcpu) {
 
        vcpu_get_gr_nat(vcpu,28,&gr28);  //bank1
 
+       perfc_incrc(vmx_pal_emul);
        switch (gr28) {
                case PAL_CACHE_FLUSH:
                        result = pal_cache_flush(vcpu);
diff -r d4e85f8453ba -r 5791030e6473 xen/arch/ia64/vmx/vmx_interrupt.c
--- a/xen/arch/ia64/vmx/vmx_interrupt.c Sun Sep 10 14:26:27 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c Sun Sep 10 14:31:54 2006 -0600
@@ -92,6 +92,7 @@ inject_guest_interruption(VCPU *vcpu, u6
     u64 viva;
     REGS *regs;
     ISR pt_isr;
+    perfc_incra(vmx_inject_guest_interruption, vec >> 8);
     regs=vcpu_regs(vcpu);
     // clear cr.isr.ri 
     pt_isr.val = VMX(vcpu,cr_isr);
diff -r d4e85f8453ba -r 5791030e6473 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Sun Sep 10 14:26:27 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Sun Sep 10 14:31:54 2006 -0600
@@ -262,6 +262,7 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
     int act;
     REGS * regs=vcpu_regs(vcpu);
     act = mm_switch_action(old_psr, new_psr);
+    perfc_incra(vmx_switch_mm_mode, act);
     switch (act) {
     case SW_V2P:
 //        printf("V -> P mode transition: (0x%lx -> 0x%lx)\n",
diff -r d4e85f8453ba -r 5791030e6473 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Sun Sep 10 14:26:27 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c   Sun Sep 10 14:31:54 2006 -0600
@@ -115,6 +115,7 @@ vmx_ia64_handle_break (unsigned long ifa
     struct domain *d = current->domain;
     struct vcpu *v = current;
 
+    perfc_incrc(vmx_ia64_handle_break);
 #ifdef CRASH_DEBUG
     if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
         IS_VMM_ADDRESS(regs->cr_iip)) {
diff -r d4e85f8453ba -r 5791030e6473 xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Sun Sep 10 14:26:27 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Sun Sep 10 14:31:54 2006 -0600
@@ -1398,120 +1398,159 @@ if ( (cause == 0xff && opcode == 0x1e000
 
     switch(cause) {
     case EVENT_RSM:
+        perfc_incrc(vmx_rsm);
         status=vmx_emul_rsm(vcpu, inst);
         break;
     case EVENT_SSM:
+        perfc_incrc(vmx_ssm);
         status=vmx_emul_ssm(vcpu, inst);
         break;
     case EVENT_MOV_TO_PSR:
+        perfc_incrc(vmx_mov_to_psr);
         status=vmx_emul_mov_to_psr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_PSR:
+        perfc_incrc(vmx_mov_from_psr);
         status=vmx_emul_mov_from_psr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_CR:
+        perfc_incrc(vmx_mov_from_cr);
         status=vmx_emul_mov_from_cr(vcpu, inst);
         break;
     case EVENT_MOV_TO_CR:
+        perfc_incrc(vmx_mov_to_cr);
         status=vmx_emul_mov_to_cr(vcpu, inst);
         break;
     case EVENT_BSW_0:
+        perfc_incrc(vmx_bsw0);
         status=vmx_emul_bsw0(vcpu, inst);
         break;
     case EVENT_BSW_1:
+        perfc_incrc(vmx_bsw1);
         status=vmx_emul_bsw1(vcpu, inst);
         break;
     case EVENT_COVER:
+        perfc_incrc(vmx_cover);
         status=vmx_emul_cover(vcpu, inst);
         break;
     case EVENT_RFI:
+        perfc_incrc(vmx_rfi);
         status=vmx_emul_rfi(vcpu, inst);
         break;
     case EVENT_ITR_D:
+        perfc_incrc(vmx_itr_d);
         status=vmx_emul_itr_d(vcpu, inst);
         break;
     case EVENT_ITR_I:
+        perfc_incrc(vmx_itr_i);
         status=vmx_emul_itr_i(vcpu, inst);
         break;
     case EVENT_PTR_D:
+        perfc_incrc(vmx_ptr_d);
         status=vmx_emul_ptr_d(vcpu, inst);
         break;
     case EVENT_PTR_I:
+        perfc_incrc(vmx_ptr_i);
         status=vmx_emul_ptr_i(vcpu, inst);
         break;
     case EVENT_ITC_D:
+        perfc_incrc(vmx_itc_d);
         status=vmx_emul_itc_d(vcpu, inst);
         break;
     case EVENT_ITC_I:
+        perfc_incrc(vmx_itc_i);
         status=vmx_emul_itc_i(vcpu, inst);
         break;
     case EVENT_PTC_L:
+        perfc_incrc(vmx_ptc_l);
         status=vmx_emul_ptc_l(vcpu, inst);
         break;
     case EVENT_PTC_G:
+        perfc_incrc(vmx_ptc_g);
         status=vmx_emul_ptc_g(vcpu, inst);
         break;
     case EVENT_PTC_GA:
+        perfc_incrc(vmx_ptc_ga);
         status=vmx_emul_ptc_ga(vcpu, inst);
         break;
     case EVENT_PTC_E:
+        perfc_incrc(vmx_ptc_e);
         status=vmx_emul_ptc_e(vcpu, inst);
         break;
     case EVENT_MOV_TO_RR:
+        perfc_incrc(vmx_mov_to_rr);
         status=vmx_emul_mov_to_rr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_RR:
+        perfc_incrc(vmx_mov_from_rr);
         status=vmx_emul_mov_from_rr(vcpu, inst);
         break;
     case EVENT_THASH:
+        perfc_incrc(vmx_thash);
         status=vmx_emul_thash(vcpu, inst);
         break;
     case EVENT_TTAG:
+        perfc_incrc(vmx_ttag);
         status=vmx_emul_ttag(vcpu, inst);
         break;
     case EVENT_TPA:
+        perfc_incrc(vmx_tpa);
         status=vmx_emul_tpa(vcpu, inst);
         break;
     case EVENT_TAK:
+        perfc_incrc(vmx_tak);
         status=vmx_emul_tak(vcpu, inst);
         break;
     case EVENT_MOV_TO_AR_IMM:
+        perfc_incrc(vmx_mov_to_ar_imm);
         status=vmx_emul_mov_to_ar_imm(vcpu, inst);
         break;
     case EVENT_MOV_TO_AR:
+        perfc_incrc(vmx_mov_to_ar_reg);
         status=vmx_emul_mov_to_ar_reg(vcpu, inst);
         break;
     case EVENT_MOV_FROM_AR:
+        perfc_incrc(vmx_mov_from_ar_reg);
         status=vmx_emul_mov_from_ar_reg(vcpu, inst);
         break;
     case EVENT_MOV_TO_DBR:
+        perfc_incrc(vmx_mov_to_dbr);
         status=vmx_emul_mov_to_dbr(vcpu, inst);
         break;
     case EVENT_MOV_TO_IBR:
+        perfc_incrc(vmx_mov_to_ibr);
         status=vmx_emul_mov_to_ibr(vcpu, inst);
         break;
     case EVENT_MOV_TO_PMC:
+        perfc_incrc(vmx_mov_to_pmc);
         status=vmx_emul_mov_to_pmc(vcpu, inst);
         break;
     case EVENT_MOV_TO_PMD:
+        perfc_incrc(vmx_mov_to_pmd);
         status=vmx_emul_mov_to_pmd(vcpu, inst);
         break;
     case EVENT_MOV_TO_PKR:
+        perfc_incrc(vmx_mov_to_pkr);
         status=vmx_emul_mov_to_pkr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_DBR:
+        perfc_incrc(vmx_mov_from_dbr);
         status=vmx_emul_mov_from_dbr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_IBR:
+        perfc_incrc(vmx_mov_from_ibr);
         status=vmx_emul_mov_from_ibr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_PMC:
+        perfc_incrc(vmx_mov_from_pmc);
         status=vmx_emul_mov_from_pmc(vcpu, inst);
         break;
     case EVENT_MOV_FROM_PKR:
+        perfc_incrc(vmx_mov_from_pkr);
         status=vmx_emul_mov_from_pkr(vcpu, inst);
         break;
     case EVENT_MOV_FROM_CPUID:
+        perfc_incrc(vmx_mov_from_cpuid);
         status=vmx_emul_mov_from_cpuid(vcpu, inst);
         break;
     case EVENT_VMSW:
diff -r d4e85f8453ba -r 5791030e6473 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Sun Sep 10 14:26:27 2006 -0600
+++ b/xen/arch/ia64/xen/hypercall.c     Sun Sep 10 14:31:54 2006 -0600
@@ -211,6 +211,7 @@ fw_hypercall (struct pt_regs *regs)
        IA64FAULT fault; 
        unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;
 
+       perfc_incra(fw_hypercall, index >> 8);
        switch (index) {
            case FW_HYPERCALL_PAL_CALL:
                //printf("*** PAL hypercall: index=%d\n",regs->r28);
diff -r d4e85f8453ba -r 5791030e6473 xen/include/asm-ia64/perfc_defn.h
--- a/xen/include/asm-ia64/perfc_defn.h Sun Sep 10 14:26:27 2006 -0600
+++ b/xen/include/asm-ia64/perfc_defn.h Sun Sep 10 14:31:54 2006 -0600
@@ -35,6 +35,48 @@ PERFCOUNTER_ARRAY(mov_from_cr,        "p
 
 PERFCOUNTER_ARRAY(misc_privop,        "privop misc", 64)
 
+// privileged instructions to fall into vmx_entry
+PERFCOUNTER_CPU(vmx_rsm,              "vmx privop rsm")
+PERFCOUNTER_CPU(vmx_ssm,              "vmx privop ssm")
+PERFCOUNTER_CPU(vmx_mov_to_psr,       "vmx privop mov_to_psr")
+PERFCOUNTER_CPU(vmx_mov_from_psr,     "vmx privop mov_from_psr")
+PERFCOUNTER_CPU(vmx_mov_from_cr,      "vmx privop mov_from_cr")
+PERFCOUNTER_CPU(vmx_mov_to_cr,        "vmx privop mov_to_cr")
+PERFCOUNTER_CPU(vmx_bsw0,             "vmx privop bsw0")
+PERFCOUNTER_CPU(vmx_bsw1,             "vmx privop bsw1")
+PERFCOUNTER_CPU(vmx_cover,            "vmx privop cover")
+PERFCOUNTER_CPU(vmx_rfi,              "vmx privop rfi")
+PERFCOUNTER_CPU(vmx_itr_d,            "vmx privop itr_d")
+PERFCOUNTER_CPU(vmx_itr_i,            "vmx privop itr_i")
+PERFCOUNTER_CPU(vmx_ptr_d,            "vmx privop ptr_d")
+PERFCOUNTER_CPU(vmx_ptr_i,            "vmx privop ptr_i")
+PERFCOUNTER_CPU(vmx_itc_d,            "vmx privop itc_d")
+PERFCOUNTER_CPU(vmx_itc_i,            "vmx privop itc_i")
+PERFCOUNTER_CPU(vmx_ptc_l,            "vmx privop ptc_l")
+PERFCOUNTER_CPU(vmx_ptc_g,            "vmx privop ptc_g")
+PERFCOUNTER_CPU(vmx_ptc_ga,           "vmx privop ptc_ga")
+PERFCOUNTER_CPU(vmx_ptc_e,            "vmx privop ptc_e")
+PERFCOUNTER_CPU(vmx_mov_to_rr,        "vmx privop mov_to_rr")
+PERFCOUNTER_CPU(vmx_mov_from_rr,      "vmx privop mov_from_rr")
+PERFCOUNTER_CPU(vmx_thash,            "vmx privop thash")
+PERFCOUNTER_CPU(vmx_ttag,             "vmx privop ttag")
+PERFCOUNTER_CPU(vmx_tpa,              "vmx privop tpa")
+PERFCOUNTER_CPU(vmx_tak,              "vmx privop tak")
+PERFCOUNTER_CPU(vmx_mov_to_ar_imm,    "vmx privop mov_to_ar_imm")
+PERFCOUNTER_CPU(vmx_mov_to_ar_reg,    "vmx privop mov_to_ar_reg")
+PERFCOUNTER_CPU(vmx_mov_from_ar_reg,  "vmx privop mov_from_ar_reg")
+PERFCOUNTER_CPU(vmx_mov_to_dbr,       "vmx privop mov_to_dbr")
+PERFCOUNTER_CPU(vmx_mov_to_ibr,       "vmx privop mov_to_ibr")
+PERFCOUNTER_CPU(vmx_mov_to_pmc,       "vmx privop mov_to_pmc")
+PERFCOUNTER_CPU(vmx_mov_to_pmd,       "vmx privop mov_to_pmd")
+PERFCOUNTER_CPU(vmx_mov_to_pkr,       "vmx privop mov_to_pkr")
+PERFCOUNTER_CPU(vmx_mov_from_dbr,     "vmx privop mov_from_dbr")
+PERFCOUNTER_CPU(vmx_mov_from_ibr,     "vmx privop mov_from_ibr")
+PERFCOUNTER_CPU(vmx_mov_from_pmc,     "vmx privop mov_from_pmc")
+PERFCOUNTER_CPU(vmx_mov_from_pkr,     "vmx privop mov_from_pkr")
+PERFCOUNTER_CPU(vmx_mov_from_cpuid,   "vmx privop mov_from_cpuid")
+
+
 PERFCOUNTER_ARRAY(slow_hyperprivop,   "slow hyperprivops", HYPERPRIVOP_MAX + 1)
 PERFCOUNTER_ARRAY(fast_hyperprivop,   "fast hyperprivops", HYPERPRIVOP_MAX + 1)
 
@@ -43,6 +85,14 @@ PERFCOUNTER_ARRAY(fast_reflect,       "f
 
 PERFSTATUS(vhpt_nbr_entries,          "nbr of entries per VHPT")
 PERFSTATUS_CPU(vhpt_valid_entries,    "nbr of valid entries in VHPT")
+
+PERFCOUNTER_ARRAY(vmx_mmio_access,    "vmx_mmio_access", 8)
+PERFCOUNTER_CPU(vmx_pal_emul,         "vmx_pal_emul")
+PERFCOUNTER_ARRAY(vmx_switch_mm_mode, "vmx_switch_mm_mode", 8)
+PERFCOUNTER_CPU(vmx_ia64_handle_break,"vmx_ia64_handle_break")
+PERFCOUNTER_ARRAY(vmx_inject_guest_interruption,
+                                      "vmx_inject_guest_interruption", 0x80)
+PERFCOUNTER_ARRAY(fw_hypercall,       "fw_hypercall", 0x20)
 
 #ifdef CONFIG_PRIVOP_ADDRS
 #ifndef PERFPRIVOPADDR

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.