[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v6 12/36] ARM: GICv3: forward pending LPIs to guests



On Fri, 7 Apr 2017, Andre Przywara wrote:
> Upon receiving an LPI on the host, we need to find the right VCPU and
> virtual IRQ number to get this IRQ injected.
> Iterate our two-level LPI table to find this information quickly when
> the host takes an LPI. Call the existing injection function to let the
> GIC emulation deal with this interrupt.
> Also we enhance struct pending_irq to cache the pending bit and the
> priority information for LPIs. Reading the information from there is
> faster than accessing the property table from guest memory. Also it
> use some padding area, so does not require more memory.
> This introduces a do_LPI() as a hardware gic_ops and a function to
> retrieve the (cached) priority value of an LPI and a vgic_ops.
> 
> Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
> ---
>  xen/arch/arm/gic-v2.c            |  7 +++++
>  xen/arch/arm/gic-v3-lpi.c        | 56 
> ++++++++++++++++++++++++++++++++++++++++
>  xen/arch/arm/gic-v3.c            |  1 +
>  xen/arch/arm/gic.c               |  8 +++++-
>  xen/arch/arm/vgic-v2.c           |  7 +++++
>  xen/arch/arm/vgic-v3.c           | 12 +++++++++
>  xen/arch/arm/vgic.c              |  7 ++++-
>  xen/include/asm-arm/domain.h     |  3 ++-
>  xen/include/asm-arm/gic.h        |  2 ++
>  xen/include/asm-arm/gic_v3_its.h |  8 ++++++
>  xen/include/asm-arm/vgic.h       |  8 ++++++
>  11 files changed, 116 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
> index 270a136..ffbe47c 100644
> --- a/xen/arch/arm/gic-v2.c
> +++ b/xen/arch/arm/gic-v2.c
> @@ -1217,6 +1217,12 @@ static int __init gicv2_init(void)
>      return 0;
>  }
>  
> +static void gicv2_do_LPI(unsigned int lpi)
> +{
> +    /* No LPIs in a GICv2 */
> +    BUG();
> +}
> +
>  const static struct gic_hw_operations gicv2_ops = {
>      .info                = &gicv2_info,
>      .init                = gicv2_init,
> @@ -1244,6 +1250,7 @@ const static struct gic_hw_operations gicv2_ops = {
>      .make_hwdom_madt     = gicv2_make_hwdom_madt,
>      .map_hwdom_extra_mappings = gicv2_map_hwdown_extra_mappings,
>      .iomem_deny_access   = gicv2_iomem_deny_access,
> +    .do_LPI              = gicv2_do_LPI,
>  };
>  
>  /* Set up the GIC */
> diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c
> index 292f2d0..5f3fe2c 100644
> --- a/xen/arch/arm/gic-v3-lpi.c
> +++ b/xen/arch/arm/gic-v3-lpi.c
> @@ -136,6 +136,62 @@ uint64_t gicv3_get_redist_address(unsigned int cpu, bool 
> use_pta)
>          return per_cpu(lpi_redist, cpu).redist_id << 16;
>  }
>  
> +/*
> + * Handle incoming LPIs, which are a bit special, because they are 
> potentially
> + * numerous and also only get injected into guests. Treat them specially 
> here,
> + * by just looking up their target vCPU and virtual LPI number and hand it
> + * over to the injection function.
> + * Please note that LPIs are edge-triggered only, also have no active state,
> + * so spurious interrupts on the host side are no issue (we can just ignore
> + * them).
> + * Also a guest cannot expect that firing interrupts that haven't been
> + * fully configured yet will reach the CPU, so we don't need to care about
> + * this special case.
> + */
> +void gicv3_do_LPI(unsigned int lpi)
> +{
> +    struct domain *d;
> +    union host_lpi *hlpip, hlpi;
> +    struct vcpu *vcpu;
> +
> +    /* EOI the LPI already. */
> +    WRITE_SYSREG32(lpi, ICC_EOIR1_EL1);
> +
> +    /* Find out if a guest mapped something to this physical LPI. */
> +    hlpip = gic_get_host_lpi(lpi);
> +    if ( !hlpip )
> +        return;
> +
> +    hlpi.data = read_u64_atomic(&hlpip->data);
> +
> +    /*
> +     * Unmapped events are marked with an invalid LPI ID. We can safely
> +     * ignore them, as they have no further state and no-one can expect
> +     * to see them if they have not been mapped.
> +     */
> +    if ( hlpi.virt_lpi == INVALID_LPI )
> +        return;
> +
> +    d = rcu_lock_domain_by_id(hlpi.dom_id);
> +    if ( !d )
> +        return;
> +
> +    /* Make sure we don't step beyond the vcpu array. */
> +    if ( hlpi.vcpu_id >= d->max_vcpus )
> +    {
> +        rcu_unlock_domain(d);
> +        return;
> +    }
> +
> +    vcpu = d->vcpu[hlpi.vcpu_id];
> +
> +    /* Check if the VCPU is ready to receive LPIs. */
> +    if ( vcpu->arch.vgic.flags & VGIC_V3_LPIS_ENABLED )
> +        vgic_vcpu_inject_irq(vcpu, hlpi.virt_lpi);
> +
> +    rcu_unlock_domain(d);
> +}
> +
>  static int gicv3_lpi_allocate_pendtable(uint64_t *reg)
>  {
>      uint64_t val;
> diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c
> index a559e5e..63dbc21 100644
> --- a/xen/arch/arm/gic-v3.c
> +++ b/xen/arch/arm/gic-v3.c
> @@ -1670,6 +1670,7 @@ static const struct gic_hw_operations gicv3_ops = {
>      .make_hwdom_dt_node  = gicv3_make_hwdom_dt_node,
>      .make_hwdom_madt     = gicv3_make_hwdom_madt,
>      .iomem_deny_access   = gicv3_iomem_deny_access,
> +    .do_LPI              = gicv3_do_LPI,
>  };
>  
>  static int __init gicv3_dt_preinit(struct dt_device_node *node, const void 
> *data)
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index 44c34b1..a6fb927 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -728,7 +728,13 @@ void gic_interrupt(struct cpu_user_regs *regs, int 
> is_fiq)
>              do_IRQ(regs, irq, is_fiq);
>              local_irq_disable();
>          }
> -        else if (unlikely(irq < 16))
> +        else if ( is_lpi(irq) )
> +        {
> +            local_irq_enable();
> +            gic_hw_ops->do_LPI(irq);
> +            local_irq_disable();
> +        }
> +        else if ( unlikely(irq < 16) )
>          {
>              do_sgi(regs, irq);
>          }
> diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
> index 0587569..df91940 100644
> --- a/xen/arch/arm/vgic-v2.c
> +++ b/xen/arch/arm/vgic-v2.c
> @@ -709,11 +709,18 @@ static struct pending_irq 
> *vgic_v2_lpi_to_pending(struct domain *d,
>      BUG();
>  }
>  
> +static int vgic_v2_lpi_get_priority(struct domain *d, unsigned int vlpi)
> +{
> +    /* Dummy function, no LPIs on a VGICv2. */
> +    BUG();
> +}
> +
>  static const struct vgic_ops vgic_v2_ops = {
>      .vcpu_init   = vgic_v2_vcpu_init,
>      .domain_init = vgic_v2_domain_init,
>      .domain_free = vgic_v2_domain_free,
>      .lpi_to_pending = vgic_v2_lpi_to_pending,
> +    .lpi_get_priority = vgic_v2_lpi_get_priority,
>      .max_vcpus = 8,
>  };
>  
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index f25125e..6bc3d76 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -1553,12 +1553,24 @@ static struct pending_irq 
> *vgic_v3_lpi_to_pending(struct domain *d,
>      return pirq;
>  }
>  
> +/* Retrieve the priority of an LPI from its struct pending_irq. */
> +static int vgic_v3_lpi_get_priority(struct domain *d, uint32_t vlpi)
> +{
> +    struct pending_irq *p = vgic_v3_lpi_to_pending(d, vlpi);
> +
> +    if ( !p )
> +        return GIC_PRI_IRQ;
> +
> +    return p->lpi_priority;
> +}
> +
>  static const struct vgic_ops v3_ops = {
>      .vcpu_init   = vgic_v3_vcpu_init,
>      .domain_init = vgic_v3_domain_init,
>      .domain_free = vgic_v3_domain_free,
>      .emulate_reg  = vgic_v3_emulate_reg,
>      .lpi_to_pending = vgic_v3_lpi_to_pending,
> +    .lpi_get_priority = vgic_v3_lpi_get_priority,
>      /*
>       * We use both AFF1 and AFF0 in (v)MPIDR. Thus, the max number of CPU
>       * that can be supported is up to 4096(==256*16) in theory.
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index a7a50bc..e6c97b2 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -226,10 +226,15 @@ struct vcpu *vgic_get_target_vcpu(struct vcpu *v, 
> unsigned int virq)
>  
>  static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
>  {
> -    struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
> +    struct vgic_irq_rank *rank;
>      unsigned long flags;
>      int priority;
>  
> +    /* LPIs don't have a rank, also store their priority separately. */
> +    if ( is_lpi(virq) )
> +        return v->domain->arch.vgic.handler->lpi_get_priority(v->domain, 
> virq);
> +
> +    rank = vgic_rank_irq(v, virq);
>      vgic_lock_rank(v, rank, flags);
>      priority = rank->priority[virq & INTERRUPT_RANK_MASK];
>      vgic_unlock_rank(v, rank, flags);
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 91dfe0a..583d491 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -259,7 +259,8 @@ struct arch_vcpu
>  
>          /* GICv3: redistributor base and flags for this vCPU */
>          paddr_t rdist_base;
> -#define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_RDIST_LAST      (1 << 0)        /* last vCPU of the rdist */
> +#define VGIC_V3_LPIS_ENABLED    (1 << 1)
>          uint8_t flags;
>      } vgic;
>  
> diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
> index 836a103..42963c0 100644
> --- a/xen/include/asm-arm/gic.h
> +++ b/xen/include/asm-arm/gic.h
> @@ -366,6 +366,8 @@ struct gic_hw_operations {
>      int (*map_hwdom_extra_mappings)(struct domain *d);
>      /* Deny access to GIC regions */
>      int (*iomem_deny_access)(const struct domain *d);
> +    /* Handle LPIs, which require special handling */
> +    void (*do_LPI)(unsigned int lpi);
>  };
>  
>  void register_gic_ops(const struct gic_hw_operations *ops);
> diff --git a/xen/include/asm-arm/gic_v3_its.h 
> b/xen/include/asm-arm/gic_v3_its.h
> index 29559a3..7470779 100644
> --- a/xen/include/asm-arm/gic_v3_its.h
> +++ b/xen/include/asm-arm/gic_v3_its.h
> @@ -134,6 +134,8 @@ void gicv3_its_dt_init(const struct dt_device_node *node);
>  
>  bool gicv3_its_host_has_its(void);
>  
> +void gicv3_do_LPI(unsigned int lpi);
> +
>  int gicv3_lpi_init_rdist(void __iomem * rdist_base);
>  
>  /* Initialize the host structures for LPIs and the host ITSes. */
> @@ -175,6 +177,12 @@ static inline bool gicv3_its_host_has_its(void)
>      return false;
>  }
>  
> +static inline void gicv3_do_LPI(unsigned int lpi)
> +{
> +    /* We don't enable LPIs without an ITS. */
> +    BUG();
> +}
> +
>  static inline int gicv3_lpi_init_rdist(void __iomem * rdist_base)
>  {
>      return -ENODEV;
> diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
> index 04972d3..52856ca 100644
> --- a/xen/include/asm-arm/vgic.h
> +++ b/xen/include/asm-arm/vgic.h
> @@ -60,18 +60,25 @@ struct pending_irq
>       * vcpu while it is still inflight and on an GICH_LR register on the
>       * old vcpu.
>       *
> +     * GIC_IRQ_GUEST_LPI_PENDING: this caches the pending bit of an LPI.
> +     * On hardware the LPI pending bit is stored in a table in system
> +     * memory, which would require us to access guest memory every time
> +     * we want to learn the state. So we cache the state here.
> +     *
>       */
>  #define GIC_IRQ_GUEST_QUEUED   0
>  #define GIC_IRQ_GUEST_ACTIVE   1
>  #define GIC_IRQ_GUEST_VISIBLE  2
>  #define GIC_IRQ_GUEST_ENABLED  3
>  #define GIC_IRQ_GUEST_MIGRATING   4
> +#define GIC_IRQ_GUEST_LPI_PENDING 5

Same comment on GIC_IRQ_GUEST_LPI_PENDING


>      unsigned long status;
>      struct irq_desc *desc; /* only set it the irq corresponds to a physical 
> irq */
>      unsigned int irq;
>  #define GIC_INVALID_LR         (uint8_t)~0
>      uint8_t lr;
>      uint8_t priority;
> +    uint8_t lpi_priority;       /* Caches the priority if this is an LPI. */
>      /* inflight is used to append instances of pending_irq to
>       * vgic.inflight_irqs */
>      struct list_head inflight;
> @@ -136,6 +143,7 @@ struct vgic_ops {
>      bool (*emulate_reg)(struct cpu_user_regs *regs, union hsr hsr);
>      /* lookup the struct pending_irq for a given LPI interrupt */
>      struct pending_irq *(*lpi_to_pending)(struct domain *d, unsigned int 
> vlpi);
> +    int (*lpi_get_priority)(struct domain *d, uint32_t vlpi);
>      /* Maximum number of vCPU supported */
>      const unsigned int max_vcpus;
>  };
> -- 
> 2.9.0
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.