[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 1/8] xen/x86: Add initial x2APIC ID to the per-vLAPIC save area



On Wed, May 08, 2024 at 01:39:20PM +0100, Alejandro Vallejo wrote:
> This allows the initial x2APIC ID to be sent on the migration stream. The
> hardcoded mapping x2apic_id=2*vcpu_id is maintained for the time being.
> Given the vlapic data is zero-extended on restore, fix up migrations from
> hosts without the field by setting it to the old convention if zero.
> 
> x2APIC IDs are calculated from the CPU policy where the guest topology is
> defined. For the time being, the function simply returns the old
> relationship, but will eventually return results consistent with the
> topology.
> 
> Signed-off-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
> ---
> v2:
>   * Removed usage of SET_xAPIC_ID().
>   * Restored previous logic when exposing leaf 0xb, and gate it for HVM only.
>   * Rewrote comment in lapic_load_fixup, including the implicit assumption.
>   * Moved vlapic_cpu_policy_changed() into hvm_cpuid_policy_changed())
>   * const-ified policy in vlapic_cpu_policy_changed()
> ---
>  xen/arch/x86/cpuid.c                   | 15 ++++---------
>  xen/arch/x86/hvm/vlapic.c              | 30 ++++++++++++++++++++++++--
>  xen/arch/x86/include/asm/hvm/hvm.h     |  1 +
>  xen/arch/x86/include/asm/hvm/vlapic.h  |  2 ++
>  xen/include/public/arch-x86/hvm/save.h |  2 ++
>  xen/include/xen/lib/x86/cpu-policy.h   |  9 ++++++++
>  xen/lib/x86/policy.c                   | 11 ++++++++++
>  7 files changed, 57 insertions(+), 13 deletions(-)
> 
> diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
> index 7a38e032146a..242c21ec5bb6 100644
> --- a/xen/arch/x86/cpuid.c
> +++ b/xen/arch/x86/cpuid.c
> @@ -139,10 +139,9 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
>          const struct cpu_user_regs *regs;
>  
>      case 0x1:
> -        /* TODO: Rework topology logic. */
>          res->b &= 0x00ffffffu;
>          if ( is_hvm_domain(d) )
> -            res->b |= (v->vcpu_id * 2) << 24;
> +            res->b |= vlapic_x2apic_id(vcpu_vlapic(v)) << 24;
>  
>          /* TODO: Rework vPMU control in terms of toolstack choices. */
>          if ( vpmu_available(v) &&
> @@ -311,19 +310,13 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
>          break;
>  
>      case 0xb:
> -        /*
> -         * In principle, this leaf is Intel-only.  In practice, it is tightly
> -         * coupled with x2apic, and we offer an x2apic-capable APIC emulation
> -         * to guests on AMD hardware as well.
> -         *
> -         * TODO: Rework topology logic.
> -         */
> -        if ( p->basic.x2apic )
> +        /* Don't expose topology information to PV guests */

Not sure whether we want to keep part of the comment about exposing
x2APIC to guests even when x2APIC is not present in the host.  I think
this code has changed and the comment is kind of stale now.

> +        if ( is_hvm_domain(d) && p->basic.x2apic )
>          {
>              *(uint8_t *)&res->c = subleaf;
>  
>              /* Fix the x2APIC identifier. */
> -            res->d = v->vcpu_id * 2;
> +            res->d = vlapic_x2apic_id(vcpu_vlapic(v));
>          }
>          break;
>  
> diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
> index 05072a21bf38..61a96474006b 100644
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -1069,7 +1069,7 @@ static uint32_t x2apic_ldr_from_id(uint32_t id)
>  static void set_x2apic_id(struct vlapic *vlapic)
>  {
>      const struct vcpu *v = vlapic_vcpu(vlapic);
> -    uint32_t apic_id = v->vcpu_id * 2;
> +    uint32_t apic_id = vlapic->hw.x2apic_id;
>      uint32_t apic_ldr = x2apic_ldr_from_id(apic_id);
>  
>      /*
> @@ -1083,6 +1083,22 @@ static void set_x2apic_id(struct vlapic *vlapic)
>      vlapic_set_reg(vlapic, APIC_LDR, apic_ldr);
>  }
>  
> +void vlapic_cpu_policy_changed(struct vcpu *v)
> +{
> +    struct vlapic *vlapic = vcpu_vlapic(v);
> +    const struct cpu_policy *cp = v->domain->arch.cpu_policy;
> +
> +    /*
> +     * Don't override the initial x2APIC ID if we have migrated it or
> +     * if the domain doesn't have vLAPIC at all.
> +     */
> +    if ( !has_vlapic(v->domain) || vlapic->loaded.hw )
> +        return;
> +
> +    vlapic->hw.x2apic_id = x86_x2apic_id_from_vcpu_id(cp, v->vcpu_id);
> +    vlapic_set_reg(vlapic, APIC_ID, SET_xAPIC_ID(vlapic->hw.x2apic_id));

Nit: in case we decide to start APICs in x2APIC mode, might be good to
take this into account here and use vlapic_x2apic_mode(vlapic) to
select whether SET_xAPIC_ID() needs to be used or not:

    vlapic_set_reg(vlapic, APIC_ID,
        vlapic_x2apic_mode(vlapic) ? vlapic->hw.x2apic_id
                                   : SET_xAPIC_ID(vlapic->hw.x2apic_id));

Or similar.

> +}
> +
>  int guest_wrmsr_apic_base(struct vcpu *v, uint64_t val)
>  {
>      const struct cpu_policy *cp = v->domain->arch.cpu_policy;
> @@ -1449,7 +1465,7 @@ void vlapic_reset(struct vlapic *vlapic)
>      if ( v->vcpu_id == 0 )
>          vlapic->hw.apic_base_msr |= APIC_BASE_BSP;
>  
> -    vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24);
> +    vlapic_set_reg(vlapic, APIC_ID, SET_xAPIC_ID(vlapic->hw.x2apic_id));
>      vlapic_do_init(vlapic);
>  }
>  
> @@ -1514,6 +1530,16 @@ static void lapic_load_fixup(struct vlapic *vlapic)
>      const struct vcpu *v = vlapic_vcpu(vlapic);
>      uint32_t good_ldr = x2apic_ldr_from_id(vlapic->loaded.id);
>  
> +    /*
> +     * Loading record without hw.x2apic_id in the save stream, calculate 
> using
> +     * the traditional "vcpu_id * 2" relation. There's an implicit assumption
> +     * that vCPU0 always has x2APIC0, which is true for the old relation, and
> +     * still holds under the new x2APIC generation algorithm. While that case
> +     * goes through the conditional it's benign because it still maps to 
> zero.
> +     */
> +    if ( !vlapic->hw.x2apic_id )
> +        vlapic->hw.x2apic_id = v->vcpu_id * 2;
> +
>      /* Skip fixups on xAPIC mode, or if the x2APIC LDR is already correct */
>      if ( !vlapic_x2apic_mode(vlapic) ||
>           (vlapic->loaded.ldr == good_ldr) )
> diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
> b/xen/arch/x86/include/asm/hvm/hvm.h
> index 0c9e6f15645d..e1f0585d75a9 100644
> --- a/xen/arch/x86/include/asm/hvm/hvm.h
> +++ b/xen/arch/x86/include/asm/hvm/hvm.h
> @@ -448,6 +448,7 @@ static inline void hvm_update_guest_efer(struct vcpu *v)
>  static inline void hvm_cpuid_policy_changed(struct vcpu *v)
>  {
>      alternative_vcall(hvm_funcs.cpuid_policy_changed, v);
> +    vlapic_cpu_policy_changed(v);

Note sure whether this call would better be placed in
cpu_policy_updated() inside the is_hvm_vcpu() conditional branch.

hvm_cpuid_policy_changed() &c are just wrappers around the hvm_funcs
hooks, pulling vlapic functions in there is likely to complicate the
header dependencies in the long term.

>  }
>  
>  static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
> diff --git a/xen/arch/x86/include/asm/hvm/vlapic.h 
> b/xen/arch/x86/include/asm/hvm/vlapic.h
> index 88ef94524339..e8d41313abd3 100644
> --- a/xen/arch/x86/include/asm/hvm/vlapic.h
> +++ b/xen/arch/x86/include/asm/hvm/vlapic.h
> @@ -44,6 +44,7 @@
>  #define vlapic_xapic_mode(vlapic)                               \
>      (!vlapic_hw_disabled(vlapic) && \
>       !((vlapic)->hw.apic_base_msr & APIC_BASE_EXTD))
> +#define vlapic_x2apic_id(vlapic) ((vlapic)->hw.x2apic_id)
>  
>  /*
>   * Generic APIC bitmap vector update & search routines.
> @@ -107,6 +108,7 @@ int vlapic_ack_pending_irq(struct vcpu *v, int vector, 
> bool force_ack);
>  
>  int  vlapic_init(struct vcpu *v);
>  void vlapic_destroy(struct vcpu *v);
> +void vlapic_cpu_policy_changed(struct vcpu *v);
>  
>  void vlapic_reset(struct vlapic *vlapic);
>  
> diff --git a/xen/include/public/arch-x86/hvm/save.h 
> b/xen/include/public/arch-x86/hvm/save.h
> index 7ecacadde165..1c2ec669ffc9 100644
> --- a/xen/include/public/arch-x86/hvm/save.h
> +++ b/xen/include/public/arch-x86/hvm/save.h
> @@ -394,6 +394,8 @@ struct hvm_hw_lapic {
>      uint32_t             disabled; /* VLAPIC_xx_DISABLED */
>      uint32_t             timer_divisor;
>      uint64_t             tdt_msr;
> +    uint32_t             x2apic_id;
> +    uint32_t             rsvd_zero;

I think Jan requested for this field to be checked to be 0 for
incoming migrations, yet I don't see such check added.

>  };
>  
>  DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
> diff --git a/xen/include/xen/lib/x86/cpu-policy.h 
> b/xen/include/xen/lib/x86/cpu-policy.h
> index d5e447e9dc06..392320b9adbe 100644
> --- a/xen/include/xen/lib/x86/cpu-policy.h
> +++ b/xen/include/xen/lib/x86/cpu-policy.h
> @@ -542,6 +542,15 @@ int x86_cpu_policies_are_compatible(const struct 
> cpu_policy *host,
>                                      const struct cpu_policy *guest,
>                                      struct cpu_policy_errors *err);
>  
> +/**
> + * Calculates the x2APIC ID of a vCPU given a CPU policy
> + *
> + * @param p          CPU policy of the domain.
> + * @param id         vCPU ID of the vCPU.
> + * @returns x2APIC ID of the vCPU.
> + */
> +uint32_t x86_x2apic_id_from_vcpu_id(const struct cpu_policy *p, uint32_t id);
> +
>  #endif /* !XEN_LIB_X86_POLICIES_H */
>  
>  /*
> diff --git a/xen/lib/x86/policy.c b/xen/lib/x86/policy.c
> index f033d22785be..4cef658feeb8 100644
> --- a/xen/lib/x86/policy.c
> +++ b/xen/lib/x86/policy.c
> @@ -2,6 +2,17 @@
>  
>  #include <xen/lib/x86/cpu-policy.h>
>  
> +uint32_t x86_x2apic_id_from_vcpu_id(const struct cpu_policy *p, uint32_t id)
> +{
> +    /*
> +     * TODO: Derive x2APIC ID from the topology information inside `p`
> +     *       rather than from vCPU ID. This bodge is a temporary measure
> +     *       until all infra is in place to retrieve or derive the initial
> +     *       x2APIC ID from migrated domains.
> +     */
> +    return vcpu_id * 2;

I don't think this builds?

Note the parameter is plain 'id' not 'vcpu_id'.

Thanks, Roger.



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.