[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 1/5] xentrace: add TRC_HVM_PI_LIST_ADD



> From: Gao, Chao
> Sent: Thursday, May 11, 2017 2:04 PM
> 
> This patch adds TRC_HVM_PI_LIST_ADD to track adding one entry to
> the per-pcpu blocking list. Also introduce a 'counter' to track
> the number of entries in the list.
> 
> Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
> ---
>  tools/xentrace/formats          |  1 +
>  xen/arch/x86/hvm/vmx/vmx.c      | 12 +++++++++++-
>  xen/include/asm-x86/hvm/trace.h |  1 +
>  xen/include/public/trace.h      |  1 +
>  4 files changed, 14 insertions(+), 1 deletion(-)
> 
> diff --git a/tools/xentrace/formats b/tools/xentrace/formats
> index 8b31780..999ca8c 100644
> --- a/tools/xentrace/formats
> +++ b/tools/xentrace/formats
> @@ -125,6 +125,7 @@
>  0x00082020  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  INTR_WINDOW [ value =
> 0x%(1)08x ]
>  0x00082021  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  NPF         [ gpa =
> 0x%(2)08x%(1)08x mfn = 0x%(4)08x%(3)08x qual = 0x%(5)04x p2mt =
> 0x%(6)04x ]
>  0x00082023  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  TRAP        [ vector =
> 0x%(1)02x ]
> +0x00082026  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  PI_LIST_ADD [ domid =
> 0x%(1)04x vcpu = 0x%(2)04x, pcpu = 0x%(3)04x, #entry = 0x%(4)04x ]
> 
>  0x0010f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_map
> [ domid = %(1)d ]
>  0x0010f002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_unmap
> [ domid = %(1)d ]
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index c8ef18a..efff6cd 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -82,6 +82,7 @@ static int vmx_vmfunc_intercept(struct cpu_user_regs
> *regs);
>  struct vmx_pi_blocking_vcpu {
>      struct list_head     list;
>      spinlock_t           lock;
> +    atomic_t             counter;
>  };
> 
>  /*
> @@ -119,6 +120,9 @@ static void vmx_vcpu_block(struct vcpu *v)
>       */
>      ASSERT(old_lock == NULL);
> 
> +    atomic_inc(&per_cpu(vmx_pi_blocking, v->processor).counter);
> +    HVMTRACE_4D(PI_LIST_ADD, v->domain->domain_id, v->vcpu_id, v-
> >processor,
> +                atomic_read(&per_cpu(vmx_pi_blocking, 
> v->processor).counter));
>      list_add_tail(&v->arch.hvm_vmx.pi_blocking.list,
>                    &per_cpu(vmx_pi_blocking, v->processor).list);
>      spin_unlock_irqrestore(pi_blocking_list_lock, flags);
> @@ -186,6 +190,8 @@ static void vmx_pi_unblock_vcpu(struct vcpu *v)
>      {
>          ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock);
>          list_del(&v->arch.hvm_vmx.pi_blocking.list);
> +        atomic_dec(&container_of(pi_blocking_list_lock,
> +                                 struct vmx_pi_blocking_vcpu, 
> lock)->counter);
>          v->arch.hvm_vmx.pi_blocking.lock = NULL;
>      }
> 
> @@ -234,6 +240,7 @@ void vmx_pi_desc_fixup(unsigned int cpu)
>          if ( pi_test_on(&vmx->pi_desc) )
>          {
>              list_del(&vmx->pi_blocking.list);
> +            atomic_dec(&per_cpu(vmx_pi_blocking, cpu).counter);
>              vmx->pi_blocking.lock = NULL;
>              vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
>          }
> @@ -258,6 +265,8 @@ void vmx_pi_desc_fixup(unsigned int cpu)
> 
>              list_move(&vmx->pi_blocking.list,
>                        &per_cpu(vmx_pi_blocking, new_cpu).list);
> +            atomic_dec(&per_cpu(vmx_pi_blocking, cpu).counter);
> +            atomic_inc(&per_cpu(vmx_pi_blocking, new_cpu).counter);

Don't you also need a trace here?

and from completeness p.o.v, is it useful to trace both dec/inc?

>              vmx->pi_blocking.lock = new_lock;
> 
>              spin_unlock(new_lock);
> @@ -2360,7 +2369,7 @@ static void pi_wakeup_interrupt(struct
> cpu_user_regs *regs)
>      struct arch_vmx_struct *vmx, *tmp;
>      spinlock_t *lock = &per_cpu(vmx_pi_blocking, smp_processor_id()).lock;
>      struct list_head *blocked_vcpus =
> -             &per_cpu(vmx_pi_blocking, smp_processor_id()).list;
> +               &per_cpu(vmx_pi_blocking, smp_processor_id()).list;
> 
>      ack_APIC_irq();
>      this_cpu(irq_count)++;
> @@ -2377,6 +2386,7 @@ static void pi_wakeup_interrupt(struct
> cpu_user_regs *regs)
>          if ( pi_test_on(&vmx->pi_desc) )
>          {
>              list_del(&vmx->pi_blocking.list);
> +            atomic_dec(&per_cpu(vmx_pi_blocking,
> smp_processor_id()).counter);
>              ASSERT(vmx->pi_blocking.lock == lock);
>              vmx->pi_blocking.lock = NULL;
>              vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
> diff --git a/xen/include/asm-x86/hvm/trace.h b/xen/include/asm-
> x86/hvm/trace.h
> index de802a6..b74ffdd 100644
> --- a/xen/include/asm-x86/hvm/trace.h
> +++ b/xen/include/asm-x86/hvm/trace.h
> @@ -54,6 +54,7 @@
>  #define DO_TRC_HVM_TRAP             DEFAULT_HVM_MISC
>  #define DO_TRC_HVM_TRAP_DEBUG       DEFAULT_HVM_MISC
>  #define DO_TRC_HVM_VLAPIC           DEFAULT_HVM_MISC
> +#define DO_TRC_HVM_PI_LIST_ADD      DEFAULT_HVM_MISC
> 
> 
>  #define TRC_PAR_LONG(par) ((par)&0xFFFFFFFF),((par)>>32)
> diff --git a/xen/include/public/trace.h b/xen/include/public/trace.h
> index 7f2e891..c716d57 100644
> --- a/xen/include/public/trace.h
> +++ b/xen/include/public/trace.h
> @@ -234,6 +234,7 @@
>  #define TRC_HVM_TRAP             (TRC_HVM_HANDLER + 0x23)
>  #define TRC_HVM_TRAP_DEBUG       (TRC_HVM_HANDLER + 0x24)
>  #define TRC_HVM_VLAPIC           (TRC_HVM_HANDLER + 0x25)
> +#define TRC_HVM_PI_LIST_ADD      (TRC_HVM_HANDLER + 0x26)
> 
>  #define TRC_HVM_IOPORT_WRITE    (TRC_HVM_HANDLER + 0x216)
>  #define TRC_HVM_IOMEM_WRITE     (TRC_HVM_HANDLER + 0x217)
> --
> 1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.