[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 13/28] ARM: vGICv3: handle virtual LPI pending and property tables



On Mon, 30 Jan 2017, Andre Przywara wrote:
> Allow a guest to provide the address and size for the memory regions
> it has reserved for the GICv3 pending and property tables.
> We sanitise the various fields of the respective redistributor
> registers and map those pages into Xen's address space to have easy
> access.
> 
> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>

Please give a look at
alpine.DEB.2.10.1610281619240.9978@sstabellini-ThinkPad-X260


>  xen/arch/arm/vgic-v3.c       | 220 
> +++++++++++++++++++++++++++++++++++++++----
>  xen/arch/arm/vgic.c          |   4 +
>  xen/include/asm-arm/domain.h |   8 +-
>  xen/include/asm-arm/vgic.h   |  24 ++++-
>  4 files changed, 233 insertions(+), 23 deletions(-)
> 
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index b0653c2..c6db2d7 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -20,12 +20,14 @@
>  
>  #include <xen/bitops.h>
>  #include <xen/config.h>
> +#include <xen/domain_page.h>
>  #include <xen/lib.h>
>  #include <xen/init.h>
>  #include <xen/softirq.h>
>  #include <xen/irq.h>
>  #include <xen/sched.h>
>  #include <xen/sizes.h>
> +#include <xen/vmap.h>
>  #include <asm/current.h>
>  #include <asm/mmio.h>
>  #include <asm/gic_v3_defs.h>
> @@ -229,12 +231,15 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu 
> *v, mmio_info_t *info,
>          goto read_reserved;
>  
>      case VREG64(GICR_PROPBASER):
> -        /* LPI's not implemented */
> -        goto read_as_zero_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +        *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
> +        return 1;
>  
>      case VREG64(GICR_PENDBASER):
> -        /* LPI's not implemented */
> -        goto read_as_zero_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +        *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
> +        *r &= ~GICR_PENDBASER_PTZ;       /* WO, reads as 0 */
> +        return 1;
>  
>      case 0x0080:
>          goto read_reserved;
> @@ -302,11 +307,6 @@ bad_width:
>      domain_crash_synchronous();
>      return 0;
>  
> -read_as_zero_64:
> -    if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> -    *r = 0;
> -    return 1;
> -
>  read_as_zero_32:
>      if ( dabt.size != DABT_WORD ) goto bad_width;
>      *r = 0;
> @@ -331,11 +331,179 @@ read_unknown:
>      return 1;
>  }
>  
> +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
> +                                    int field_shift,
> +                                    uint64_t (*sanitise_fn)(uint64_t))
> +{
> +    uint64_t field = (reg & field_mask) >> field_shift;
> +
> +    field = sanitise_fn(field) << field_shift;
> +
> +    return (reg & ~field_mask) | field;
> +}
> +
> +/* We want to avoid outer shareable. */
> +static uint64_t vgic_sanitise_shareability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_OuterShareable:
> +        return GIC_BASER_InnerShareable;
> +    default:
> +        return field;
> +    }
> +}
> +
> +/* Avoid any inner non-cacheable mapping. */
> +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_CACHE_nCnB:
> +    case GIC_BASER_CACHE_nC:
> +        return GIC_BASER_CACHE_RaWb;
> +    default:
> +        return field;
> +    }
> +}
> +
> +/* Non-cacheable or same-as-inner are OK. */
> +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
> +{
> +    switch (field) {
> +    case GIC_BASER_CACHE_SameAsInner:
> +    case GIC_BASER_CACHE_nC:
> +        return field;
> +    default:
> +        return GIC_BASER_CACHE_nC;
> +    }
> +}
> +
> +static uint64_t sanitize_propbaser(uint64_t reg)
> +{
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
> +                              GICR_PROPBASER_SHAREABILITY_SHIFT,
> +                              vgic_sanitise_shareability);
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
> +                              GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_inner_cacheability);
> +    reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
> +                              GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_outer_cacheability);
> +
> +    reg &= ~GICR_PROPBASER_RES0_MASK;
> +    return reg;
> +}
> +
> +static uint64_t sanitize_pendbaser(uint64_t reg)
> +{
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
> +                              GICR_PENDBASER_SHAREABILITY_SHIFT,
> +                              vgic_sanitise_shareability);
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
> +                              GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_inner_cacheability);
> +    reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
> +                              GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
> +                              vgic_sanitise_outer_cacheability);
> +
> +    reg &= ~GICR_PENDBASER_RES0_MASK;
> +    return reg;
> +}
> +
> +/*
> + * Mark a given number of guest pages as used (by increasing their refcount),
> + * starting with the given guest address. This needs to be called once before
> + * calling (possibly repeatedly) map_guest_pages().
> + * Before the domain gets destroyed, call put_guest_pages() to drop the
> + * reference.
> + */
> +int get_guest_pages(struct domain *d, paddr_t gpa, int nr_pages)
> +{
> +    int i;
> +    struct page_info *page;
> +
> +    for ( i = 0; i < nr_pages; i++ )
> +    {
> +        page = get_page_from_gfn(d, (gpa >> PAGE_SHIFT) + i, NULL, 
> P2M_ALLOC);
> +        if ( ! page )
> +            return -EINVAL;
> +    }
> +
> +    return 0;
> +}
> +
> +void put_guest_pages(struct domain *d, paddr_t gpa, int nr_pages)
> +{
> +    mfn_t mfn;
> +    int i;
> +
> +    p2m_read_lock(&d->arch.p2m);
> +    for ( i = 0; i < nr_pages; i++ )
> +    {
> +        mfn = p2m_get_entry(&d->arch.p2m, _gfn((gpa >> PAGE_SHIFT) + i),
> +                            NULL, NULL, NULL);
> +        if ( mfn_eq(mfn, INVALID_MFN) )
> +            continue;
> +        put_page(mfn_to_page(mfn_x(mfn)));
> +    }
> +    p2m_read_unlock(&d->arch.p2m);
> +}
> +
> +/*
> + * Provides easy access to guest memory by "mapping" some parts of it into
> + * Xen's VA space. In fact it relies on the memory being already mapped
> + * and just provides a pointer to it.
> + * This allows the ITS configuration data to be held in guest memory and
> + * avoids using Xen's memory for that.
> + */
> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages)
> +{
> +    int i;
> +    void *ptr, *follow;
> +
> +    ptr = map_domain_page(_mfn(guest_addr >> PAGE_SHIFT));
> +
> +    /* Make sure subsequent pages are mapped in a virtually contigious way. 
> */
> +    for ( i = 1; i < nr_pages; i++ )
> +    {
> +        follow = map_domain_page(_mfn((guest_addr >> PAGE_SHIFT) + i));
> +        if ( follow != ptr + ((long)i << PAGE_SHIFT) )
> +            return NULL;
> +    }
> +
> +    return ptr + (guest_addr & ~PAGE_MASK);
> +}
> +
> +/* "Unmap" previously mapped guest pages. Should be optimized away on arm64. 
> */
> +void unmap_guest_pages(void *va, int nr_pages)
> +{
> +    long i;
> +
> +    for ( i = nr_pages - 1; i >= 0; i-- )
> +        unmap_domain_page(((uintptr_t)va & PAGE_MASK) + (i << PAGE_SHIFT));
> +}
> +
> +int vgic_lpi_get_priority(struct domain *d, uint32_t vlpi)
> +{
> +    if ( vlpi >= d->arch.vgic.nr_lpis )
> +        return GIC_PRI_IRQ;
> +
> +    return d->arch.vgic.proptable[vlpi - LPI_OFFSET] & LPI_PROP_PRIO_MASK;
> +}
> +
> +bool vgic_lpi_is_enabled(struct domain *d, uint32_t vlpi)
> +{
> +    if ( vlpi >= d->arch.vgic.nr_lpis )
> +        return false;
> +
> +    return d->arch.vgic.proptable[vlpi - LPI_OFFSET] & LPI_PROP_ENABLED;
> +}
> +
>  static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
>                                            uint32_t gicr_reg,
>                                            register_t r)
>  {
>      struct hsr_dabt dabt = info->dabt;
> +    uint64_t reg;
>  
>      switch ( gicr_reg )
>      {
> @@ -366,36 +534,54 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu 
> *v, mmio_info_t *info,
>          goto write_impl_defined;
>  
>      case VREG64(GICR_SETLPIR):
> -        /* LPI is not implemented */
> +        /* LPIs without an ITS are not implemented */
>          goto write_ignore_64;
>  
>      case VREG64(GICR_CLRLPIR):
> -        /* LPI is not implemented */
> +        /* LPIs without an ITS are not implemented */
>          goto write_ignore_64;
>  
>      case 0x0050:
>          goto write_reserved;
>  
>      case VREG64(GICR_PROPBASER):
> -        /* LPI is not implemented */
> -        goto write_ignore_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +
> +        /* Writing PROPBASER with LPIs enabled is UNPREDICTABLE. */
> +        if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )
> +            return 1;
> +
> +        reg = v->domain->arch.vgic.rdist_propbase;
> +        vgic_reg64_update(&reg, r, info);
> +        reg = sanitize_propbaser(reg);
> +        v->domain->arch.vgic.rdist_propbase = reg;
> +        return 1;
>  
>      case VREG64(GICR_PENDBASER):
> -        /* LPI is not implemented */
> -        goto write_ignore_64;
> +        if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +
> +        /* Writing PENDBASER with LPIs enabled is UNPREDICTABLE. */
> +        if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )
> +            return 1;
> +
> +     reg = v->arch.vgic.rdist_pendbase;
> +     vgic_reg64_update(&reg, r, info);
> +     reg = sanitize_pendbaser(reg);
> +     v->arch.vgic.rdist_pendbase = reg;
> +     return 1;
>  
>      case 0x0080:
>          goto write_reserved;
>  
>      case VREG64(GICR_INVLPIR):
> -        /* LPI is not implemented */
> +        /* LPIs without an ITS are not implemented */
>          goto write_ignore_64;
>  
>      case 0x00A8:
>          goto write_reserved;
>  
>      case VREG64(GICR_INVALLR):
> -        /* LPI is not implemented */
> +        /* LPIs without an ITS are not implemented */
>          goto write_ignore_64;
>  
>      case 0x00B8:
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 7e3440f..cf444f3 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -494,6 +494,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, 
> unsigned int lpi,
>          empty->pirq.irq = lpi;
>      }
>  
> +    /* Update the enabled status */
> +    if ( vgic_lpi_is_enabled(v->domain, lpi) )
> +        set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status);
> +
>      spin_unlock(&v->arch.vgic.pending_lpi_list_lock);
>  
>      return &empty->pirq;
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index f44a84b..33c1851 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -110,6 +110,9 @@ struct arch_domain
>          } *rdist_regions;
>          int nr_regions;                     /* Number of rdist regions */
>          uint32_t rdist_stride;              /* Re-Distributor stride */
> +        int nr_lpis;
> +        uint64_t rdist_propbase;
> +        uint8_t *proptable;
>          struct rb_root its_devices;         /* devices mapped to an ITS */
>          spinlock_t its_devices_lock;        /* protects the its_devices tree 
> */
>  #endif
> @@ -255,7 +258,10 @@ struct arch_vcpu
>  
>          /* GICv3: redistributor base and flags for this vCPU */
>          paddr_t rdist_base;
> -#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_LPIS_ENABLED    (1 << 1)
> +        uint64_t rdist_pendbase;
> +        unsigned long *pendtable;
>          uint8_t flags;
>          struct list_head pending_lpi_list;
>          spinlock_t pending_lpi_list_lock;   /* protects the pending_lpi_list 
> */
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index 03d4d2e..a882fe8 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -285,6 +285,11 @@ VGIC_REG_HELPERS(32, 0x3);
>  
>  #undef VGIC_REG_HELPERS
>  
> +int get_guest_pages(struct domain *d, paddr_t gpa, int nr_pages);
> +void put_guest_pages(struct domain *d, paddr_t gpa, int nr_pages);
> +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages);
> +void unmap_guest_pages(void *va, int nr_pages);
> +
>  enum gic_sgi_mode;
>  
>  /*
> @@ -312,14 +317,23 @@ extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu 
> *v, unsigned int irq);
>  extern bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr);
>  extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n);
>  extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n);
> -/* placeholder function until the property table gets introduced */
> -static inline int vgic_lpi_get_priority(struct domain *d, uint32_t vlpi)
> -{
> -    return 0xa;
> -}
>  extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops);
>  int vgic_v2_init(struct domain *d, int *mmio_count);
>  int vgic_v3_init(struct domain *d, int *mmio_count);
> +#ifdef CONFIG_HAS_GICV3
> +extern int vgic_lpi_get_priority(struct domain *d, uint32_t vlpi);
> +extern bool vgic_lpi_is_enabled(struct domain *d, uint32_t vlpi);
> +#else
> +static inline int vgic_lpi_get_priority(struct domain *d, uint32_t vlpi)
> +{
> +    return 0xa0;
> +}
> +
> +static inline bool vgic_lpi_is_enabled(struct domain *d, uint32_t vlpi)
> +{
> +    return false;
> +}
> +#endif
>  
>  extern int domain_vgic_register(struct domain *d, int *mmio_count);
>  extern int vcpu_vgic_free(struct vcpu *v);
> -- 
> 2.9.0
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.