[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v10 15/20] x86/VPMU: Handle PMU interrupts for PV guests



>>> On 04.09.14 at 05:41, <boris.ostrovsky@xxxxxxxxxx> wrote:
>  int vpmu_do_interrupt(struct cpu_user_regs *regs)
>  {
> -    struct vcpu *v = current;
> -    struct vpmu_struct *vpmu = vcpu_vpmu(v);
> +    struct vcpu *sampled = current, *sampling;
> +    struct vpmu_struct *vpmu;
> +
> +    /* dom0 will handle interrupt for special domains (e.g. idle domain) */
> +    if ( sampled->domain->domain_id >= DOMID_FIRST_RESERVED )
> +    {
> +        sampling = choose_hwdom_vcpu();
> +        if ( !sampling )
> +            return 0;
> +    }
> +    else
> +        sampling = sampled;
> +
> +    vpmu = vcpu_vpmu(sampling);
> +    if ( !is_hvm_domain(sampling->domain) )
> +    {
> +        /* PV(H) guest or dom0 is doing system profiling */
> +        const struct cpu_user_regs *gregs;
> +
> +        if ( !vpmu->xenpmu_data )
> +            return 0;
> +
> +        if ( vpmu->xenpmu_data->pmu_flags & PMU_CACHED )
> +            return 1;
> +
> +        if ( is_pvh_domain(sampled->domain) &&
> +             !vpmu->arch_vpmu_ops->do_interrupt(regs) )
> +            return 0;
> +
> +        /* PV guest will be reading PMU MSRs from xenpmu_data */
> +        vpmu_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
> +        vpmu->arch_vpmu_ops->arch_vpmu_save(sampling);
> +        vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
> +
> +        /* Store appropriate registers in xenpmu_data */
> +        if ( is_pv_32bit_domain(sampled->domain) )
> +        {
> +            /*
> +             * 32-bit dom0 cannot process Xen's addresses (which are 64 bit)
> +             * and therefore we treat it the same way as a non-priviledged
> +             * PV 32-bit domain.
> +             */
> +            struct compat_cpu_user_regs *cmp;
> +
> +            gregs = guest_cpu_user_regs();
> +
> +            cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs;
> +            XLAT_cpu_user_regs(cmp, gregs);
> +        }
> +        else if ( !is_hardware_domain(sampled->domain) &&
> +                  !is_idle_vcpu(sampled) )
> +        {
> +            /* PV(H) guest */
> +            gregs = guest_cpu_user_regs();
> +            vpmu->xenpmu_data->pmu.r.regs = *gregs;
> +        }
> +        else
> +            vpmu->xenpmu_data->pmu.r.regs = *regs;
> +
> +        vpmu->xenpmu_data->domain_id = sampled->domain->domain_id;
> +        vpmu->xenpmu_data->vcpu_id = sampled->vcpu_id;
> +        vpmu->xenpmu_data->pcpu_id = smp_processor_id();
> +
> +        vpmu->xenpmu_data->pmu_flags |= PMU_CACHED;
> +        apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc | APIC_LVT_MASKED);
> +        vpmu->hw_lapic_lvtpc |= APIC_LVT_MASKED;

Would it be wrong to do the |= first and thus avoiding to do the
same operation twice?

> @@ -230,7 +353,9 @@ void vpmu_load(struct vcpu *v)
>      local_irq_enable();
>  
>      /* Only when PMU is counting, we load PMU context immediately. */
> -    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) )
> +    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) ||
> +         (!is_hvm_domain(v->domain) &&
> +          vpmu->xenpmu_data->pmu_flags & PMU_CACHED) )

Please parenthesize & within &&.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.