[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v17 07/10] x86: enable CMT for each domain RMID



On 29/09/14 11:40, Chao Peng wrote:
> If the CMT service is attached to a domain, its related RMID
> will be set to hardware for monitoring when the domain's vcpu is
> scheduled in. When the domain's vcpu is scheduled out, RMID 0
> (system reserved) will be set for monitoring.
>
> Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
> Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx>
> Acked-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

> ---
>  xen/arch/x86/domain.c           |    5 +++++
>  xen/arch/x86/psr.c              |   27 +++++++++++++++++++++++++++
>  xen/include/asm-x86/msr-index.h |    3 +++
>  xen/include/asm-x86/psr.h       |    1 +
>  4 files changed, 36 insertions(+)
>
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 3cfd8f4..04a6719 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1418,6 +1418,8 @@ static void __context_switch(void)
>      {
>          memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
>          vcpu_save_fpu(p);
> +        if ( psr_cmt_enabled() )
> +            psr_assoc_rmid(0);
>          p->arch.ctxt_switch_from(p);
>      }
>  
> @@ -1442,6 +1444,9 @@ static void __context_switch(void)
>          }
>          vcpu_restore_fpu_eager(n);
>          n->arch.ctxt_switch_to(n);
> +
> +        if ( psr_cmt_enabled() && n->domain->arch.psr_rmid > 0 )
> +            psr_assoc_rmid(n->domain->arch.psr_rmid);
>      }
>  
>      gdt = !is_pv_32on64_vcpu(n) ? per_cpu(gdt_table, cpu) :
> diff --git a/xen/arch/x86/psr.c b/xen/arch/x86/psr.c
> index 4b4a4cf..c5ee16e 100644
> --- a/xen/arch/x86/psr.c
> +++ b/xen/arch/x86/psr.c
> @@ -20,9 +20,15 @@
>  
>  #define PSR_CMT        (1<<0)
>  
> +struct pqr_assoc {
> +    uint64_t val;
> +    bool_t initialized;
> +};
> +
>  struct psr_cmt *__read_mostly psr_cmt;
>  static bool_t __initdata opt_psr;
>  static unsigned int __initdata opt_rmid_max = 255;
> +static DEFINE_PER_CPU(struct pqr_assoc, pqr_assoc);
>  
>  static void __init parse_psr_param(char *s)
>  {
> @@ -149,6 +155,27 @@ void psr_free_rmid(struct domain *d)
>      d->arch.psr_rmid = 0;
>  }
>  
> +void psr_assoc_rmid(unsigned int rmid)
> +{
> +    uint64_t val;
> +    uint64_t new_val;
> +    struct pqr_assoc *pqr = &this_cpu(pqr_assoc);
> +
> +    if ( !pqr->initialized )
> +    {
> +        rdmsrl(MSR_IA32_PQR_ASSOC, pqr->val);
> +        pqr->initialized = 1;
> +    }
> +    val = pqr->val;
> +
> +    new_val = (val & ~psr_cmt->rmid_mask) | (rmid & psr_cmt->rmid_mask);
> +    if ( val != new_val )
> +    {
> +        wrmsrl(MSR_IA32_PQR_ASSOC, new_val);
> +        pqr->val = new_val;
> +    }
> +}
> +
>  /*
>   * Local variables:
>   * mode: C
> diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
> index 542222e..dcb2b87 100644
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -323,6 +323,9 @@
>  #define MSR_IA32_TSC_DEADLINE                0x000006E0
>  #define MSR_IA32_ENERGY_PERF_BIAS    0x000001b0
>  
> +/* Platform Shared Resource MSRs */
> +#define MSR_IA32_PQR_ASSOC           0x00000c8f
> +
>  /* Intel Model 6 */
>  #define MSR_P6_PERFCTR(n)            (0x000000c1 + (n))
>  #define MSR_P6_EVNTSEL(n)            (0x00000186 + (n))
> diff --git a/xen/include/asm-x86/psr.h b/xen/include/asm-x86/psr.h
> index 98cfa6d..b544306 100644
> --- a/xen/include/asm-x86/psr.h
> +++ b/xen/include/asm-x86/psr.h
> @@ -47,6 +47,7 @@ static inline bool_t psr_cmt_enabled(void)
>  
>  int psr_alloc_rmid(struct domain *d);
>  void psr_free_rmid(struct domain *d);
> +void psr_assoc_rmid(unsigned int rmid);
>  
>  #endif /* __ASM_PSR_H__ */
>  


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.