[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 15/19] x86/VPMU: Add privileged PMU mode
Add support for privileged PMU mode which allows privileged domain (dom0) profile both itself (and the hypervisor) and the guests. While this mode is on profiling in guests is disabled. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> Reviewed-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> Tested-by: Dietmar Hahn <dietmar.hahn@xxxxxxxxxxxxxx> --- xen/arch/x86/hvm/vpmu.c | 100 +++++++++++++++++++++++++++++++++-------------- xen/arch/x86/traps.c | 10 +++++ xen/include/public/pmu.h | 3 ++ 3 files changed, 84 insertions(+), 29 deletions(-) diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index 896e2be..7cb2231 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -88,7 +88,9 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, uint8_t rw) { struct vpmu_struct *vpmu = vcpu_vpmu(current); - if ( !(vpmu_mode & XENPMU_MODE_ON) ) + if ( (vpmu_mode == XENPMU_MODE_OFF) || + ((vpmu_mode & XENPMU_MODE_PRIV) && + !is_hardware_domain(current->domain)) ) return 0; ASSERT((rw == VPMU_MSR_READ) || (rw == VPMU_MSR_WRITE)); @@ -128,16 +130,23 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs) struct vcpu *v = current; struct vpmu_struct *vpmu; - /* dom0 will handle interrupt for special domains (e.g. idle domain) */ - if ( v->domain->domain_id >= DOMID_FIRST_RESERVED ) + /* + * dom0 will handle interrupt for special domains (e.g. idle domain) or, + * in XENPMU_MODE_PRIV, for everyone. + */ + if ( (vpmu_mode & XENPMU_MODE_PRIV) || + (v->domain->domain_id >= DOMID_FIRST_RESERVED) ) v = hardware_domain->vcpu[smp_processor_id() % hardware_domain->max_vcpus]; vpmu = vcpu_vpmu(v); - if ( !is_hvm_domain(v->domain) ) + if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) ) + return 0; + + if ( !is_hvm_domain(v->domain) || (vpmu_mode & XENPMU_MODE_PRIV) ) { /* PV guest or dom0 is doing system profiling */ - const struct cpu_user_regs *gregs; + struct cpu_user_regs *gregs; int err; if ( v->arch.vpmu.xenpmu_data->pmu_flags & PMU_CACHED ) @@ -148,34 +157,62 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs) err = vpmu->arch_vpmu_ops->arch_vpmu_save(v); vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); - /* Store appropriate registers in xenpmu_data */ - if ( is_pv_32bit_domain(current->domain) ) + if ( !is_hvm_domain(current->domain) ) { - /* - * 32-bit dom0 cannot process Xen's addresses (which are 64 bit) - * and therefore we treat it the same way as a non-priviledged - * PV 32-bit domain. - */ - struct compat_cpu_user_regs *cmp; - - gregs = guest_cpu_user_regs(); + /* Store appropriate registers in xenpmu_data */ + if ( is_pv_32bit_domain(current->domain) ) + { + gregs = guest_cpu_user_regs(); + + if ( (vpmu_mode & XENPMU_MODE_PRIV) && + !is_pv_32bit_domain(v->domain) ) + memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs, + gregs, sizeof(struct cpu_user_regs)); + else + { + /* + * 32-bit dom0 cannot process Xen's addresses (which are + * 64 bit) and therefore we treat it the same way as a + * non-priviledged PV 32-bit domain. + */ + + struct compat_cpu_user_regs *cmp; + + cmp = (struct compat_cpu_user_regs *) + &v->arch.vpmu.xenpmu_data->pmu.r.regs; + XLAT_cpu_user_regs(cmp, gregs); + memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs, + &cmp, sizeof(struct compat_cpu_user_regs)); + } + } + else if ( !is_hardware_domain(current->domain) && + !is_idle_vcpu(current) ) + { + /* PV guest */ + gregs = guest_cpu_user_regs(); + memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs, + gregs, sizeof(struct cpu_user_regs)); + } + else + memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs, + regs, sizeof(struct cpu_user_regs)); - cmp = (void *)&v->arch.vpmu.xenpmu_data->pmu.r.regs; - XLAT_cpu_user_regs(cmp, gregs); - memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs, - &cmp, sizeof(struct compat_cpu_user_regs)); + gregs = &v->arch.vpmu.xenpmu_data->pmu.r.regs; + gregs->cs = (current->arch.flags & TF_kernel_mode) ? 0 : 0x3; } - else if ( !is_hardware_domain(current->domain) && - !is_idle_vcpu(current) ) + else { - /* PV guest */ + /* HVM guest */ + struct segment_register cs; + gregs = guest_cpu_user_regs(); memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs, gregs, sizeof(struct cpu_user_regs)); + + hvm_get_segment_register(current, x86_seg_cs, &cs); + gregs = &v->arch.vpmu.xenpmu_data->pmu.r.regs; + gregs->cs = cs.attr.fields.dpl; } - else - memcpy(&v->arch.vpmu.xenpmu_data->pmu.r.regs, - regs, sizeof(struct cpu_user_regs)); v->arch.vpmu.xenpmu_data->domain_id = current->domain->domain_id; v->arch.vpmu.xenpmu_data->vcpu_id = current->vcpu_id; @@ -481,15 +518,20 @@ long do_xenpmu_op(int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg) if ( copy_from_guest(&pmu_params, arg, 1) ) return -EFAULT; - if ( pmu_params.d.val & ~XENPMU_MODE_ON ) + if ( (pmu_params.d.val & ~(XENPMU_MODE_ON | XENPMU_MODE_PRIV)) || + ((pmu_params.d.val & XENPMU_MODE_ON) && + (pmu_params.d.val & XENPMU_MODE_PRIV)) ) return -EINVAL; vpmu_mode = pmu_params.d.val; - if ( vpmu_mode == XENPMU_MODE_OFF ) + + if ( (vpmu_mode == XENPMU_MODE_OFF) || (vpmu_mode & XENPMU_MODE_PRIV) ) /* * After this VPMU context will never be loaded during context - * switch. We also prevent PMU MSR accesses (which can load - * context) when VPMU is disabled. + * switch. Because PMU MSR accesses load VPMU context we don't + * allow them when VPMU is off and, for non-provileged domains, + * when we are in privileged mode. (We do want these accesses to + * load VPMU context for control domain in this mode) */ vpmu_unload_all(); diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index adbdebe..90c5adb 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -2526,7 +2526,11 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: if ( !vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_WRITE) ) + { + if ( (vpmu_mode & XENPMU_MODE_PRIV) && + is_hardware_domain(v->domain) ) goto invalid; + } break; default: if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 ) @@ -2644,6 +2648,12 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) regs->edx = (uint32_t)(msr_content >> 32); break; } + else if ( !is_hardware_domain(v->domain) ) + { + /* Don't leak PMU MSRs to unprivileged domains */ + regs->eax = regs->edx = 0; + break; + } goto rdmsr_normal; default: diff --git a/xen/include/public/pmu.h b/xen/include/public/pmu.h index 50f6d6d..e3352a2 100644 --- a/xen/include/public/pmu.h +++ b/xen/include/public/pmu.h @@ -56,9 +56,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t); * - XENPMU_MODE_OFF: No PMU virtualization * - XENPMU_MODE_ON: Guests can profile themselves, dom0 profiles * itself and Xen + * - XENPMU_MODE_PRIV: Only dom0 has access to VPMU and it profiles + * everyone: itself, the hypervisor and the guests. */ #define XENPMU_MODE_OFF 0 #define XENPMU_MODE_ON (1<<0) +#define XENPMU_MODE_PRIV (1<<1) /* * PMU features: -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |