[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v7 21/28] xen/arm: ITS: Add GICR register emulation



Hi Vijay,

On 18/09/15 14:09, vijay.kilari@xxxxxxxxx wrote:
> diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
> index 4285af3..37c2105 100644
> --- a/xen/arch/arm/vgic-v3-its.c
> +++ b/xen/arch/arm/vgic-v3-its.c
> @@ -29,6 +29,7 @@
>  #include <asm/atomic.h>
>  #include <asm/gic_v3_defs.h>
>  #include <asm/gic.h>
> +#include <asm/gic-its.h>
>  #include <asm/vgic.h>
>  #include <asm/gic-its.h>
>  #include <asm/vits.h>
> @@ -852,6 +853,7 @@ int vits_domain_init(struct domain *d)
>      vits = d->arch.vgic.vits;
>  
>      spin_lock_init(&vits->lock);
> +    spin_lock_init(&vits->prop_lock);

Anything related to the LPI property table should have been moved in the
gic-v3.c and protected with proper check.

>  
>      vits->collections = xzalloc_array(struct its_collection,
>                                        vits_get_max_collections(d));
> @@ -877,6 +879,8 @@ int vits_domain_init(struct domain *d)
>  
>  void vits_domain_free(struct domain *d)
>  {
> +   free_xenheap_pages(d->arch.vgic.vits->prop_page,
> +                      get_order_from_bytes(d->arch.vgic.vits->prop_size));

Ditto.

>     xfree(d->arch.vgic.vits->collections);
>     xfree(d->arch.vgic.vits);
>  }
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index d99dedb..c631c7b 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -29,7 +29,10 @@
>  #include <asm/current.h>
>  #include <asm/mmio.h>
>  #include <asm/gic_v3_defs.h>
> +#include <asm/gic.h>
> +#include <asm/gic-its.h>
>  #include <asm/vgic.h>
> +#include <asm/vits.h>
>  
>  /* GICD_PIDRn register values for ARM implementations */
>  #define GICV3_GICD_PIDR0  0x92
> @@ -106,23 +109,269 @@ static struct vcpu *vgic_v3_get_target_vcpu(struct 
> vcpu *v, unsigned int irq)
>      return v_target;
>  }
>  
> +static void vits_disable_lpi(struct vcpu *v, uint32_t vlpi)

There is nothing vITS specific in this code. I was expecting you to
rename properly all the vits function you moved in the GICv3 code.

> +{
> +    struct pending_irq *p;
> +    struct vcpu *v_target = v->domain->vcpu[0];
> +
> +    p = irq_to_pending(v_target, vlpi);
> +    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
> +    {
> +        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
> +        gic_remove_from_queues(v_target, vlpi);
> +    }
> +}
> +
> +static void vits_enable_lpi(struct vcpu *v, uint32_t vlpi, uint8_t priority)

The parameter priority is never used. Please drop it.

> +{
> +    struct pending_irq *p;
> +    unsigned long flags;
> +    struct vcpu *v_target = v->domain->vcpu[0];
> +
> +    /*
> +     * We don't have vlpi to plpi mapping and hence we cannot
> +     * have target on which corresponding vlpi is enabled.
> +     * So for now we are always injecting vlpi on vcpu0.
> +     * (See vgic_vcpu_inject_lpi() function) and so we get pending_irq
> +     * structure on vcpu0.
> +     * TODO: Get correct target vcpu
> +     */

I would much prefer to see get_vcpu_target implemented for LPIs which
will always return VCPU0 rather than open-coding it everywhere.

It will help later when we will properly support LPIs on other VCPU than
VCPU0.

> +    p = irq_to_pending(v_target, vlpi);
> +
> +    set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
> +
> +    spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
> +
> +    if ( !list_empty(&p->inflight) &&
> +         !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
> +        gic_raise_guest_irq(v_target, vlpi, p->priority);
> +
> +    spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
> +}
> +
> +static int vgic_v3_gits_lpi_mmio_read(struct vcpu *v, mmio_info_t *info)
> +{
> +    uint32_t offset;
> +    void *addr;
> +    uint64_t val;
> +    unsigned long flags;
> +    struct vgic_its *vits = v->domain->arch.vgic.vits;
> +    struct hsr_dabt dabt = info->dabt;
> +    struct cpu_user_regs *regs = guest_cpu_user_regs();
> +    register_t *r = select_user_reg(regs, dabt.reg);
> +
> +    offset = info->gpa - (vits->propbase & GICR_PROPBASER_PA_MASK);
> +    addr = (void *)((u8*)vits->prop_page + offset);

Even if the current emulation doesn't allow to modify the LPI property
table, you still have to protect access to prop_page and propbase.

I would proptect those fields with the prop_lock rather than the
vits->lock. It would avoid having to take 2 locks everytime.

> +
> +    spin_lock_irqsave(&vits->prop_lock, flags);
> +    switch (dabt.size)
> +    {
> +    case DABT_DOUBLE_WORD:
> +        val = *((u64*)addr);

I'm pretty sure you need a space between the type and the *. I.e
u64 *

> +        *r = val;

There is no point to have a temporary variable. You can directly use
assign the value to *r. I.e

*r = *((u64 *)addr);

The remark is valid for all the other case in this switch.

> +        break;

[...]

> +static void vgic_v3_gits_update_lpis_state(struct vcpu *v, uint32_t vid,
> +                                           uint32_t size)
> +{
> +    struct vgic_its *vits = v->domain->arch.vgic.vits;
> +    uint32_t i;
> +    uint8_t cfg, *p;
> +    bool_t enable;
> +

I would add a ASSERT(spin_is_locked(&vits->prop_lock));

> +    p = ((u8*)vits->prop_page + vid);
> +
> +    for ( i = 0 ; i < size; i++ )
> +    {
> +        cfg = *p;
> +        enable = cfg & LPI_PROP_ENABLED;
> +
> +        /* LPIs start from 8192, So add 8192 to point to correct LPI number 
> */
> +        if ( !enable )
> +            vits_enable_lpi(v, vid + FIRST_GIC_LPI, (cfg & 
> LPI_PRIORITY_MASK));
> +        else
> +            vits_disable_lpi(v, vid + FIRST_GIC_LPI);
> +
> +        p++;
> +        vid++;
> +    }
> +}
> +
> +static int vgic_v3_gits_lpi_mmio_write(struct vcpu *v, mmio_info_t *info)
> +{
> +    uint32_t offset;
> +    uint8_t cfg, *p, *val, i, iter;
> +    bool_t enable;
> +    unsigned long flags;
> +    struct vgic_its *vits = v->domain->arch.vgic.vits;
> +    struct hsr_dabt dabt = info->dabt;
> +    struct cpu_user_regs *regs = guest_cpu_user_regs();
> +    register_t *r = select_user_reg(regs, dabt.reg);
> +
> +    offset = info->gpa - (vits->propbase & GICR_PROPBASER_PA_MASK);

Same remark as previously for propbase. It has to be protected by a lock.

> +
> +    switch (dabt.size)
> +    {
> +    case DABT_DOUBLE_WORD:
> +        iter = 8;
> +        break;
> +    case DABT_WORD:
> +        iter = 4;
> +        break;
> +    case DABT_HALF_WORD:
> +        iter = 2;
> +        break;
> +    default:

datb_size is an enum, you should enumerate all the case rather than
using the default label for the last item.

It would help the compiler to catch any missing item and make easier to
understand what is the last case.

Although, all this switch could be replace by a single line:

iter = 1 << dabt.size;

> +        iter = 1;
> +    }
> +    spin_lock_irqsave(&vits->prop_lock, flags);
> +
> +    p = ((u8*)vits->prop_page + offset);
> +    val = (u8*)r;
> +
> +    for ( i = 0 ; i < iter; i++ )
> +    {
> +        cfg = *p;
> +        enable = (cfg & *val) & LPI_PROP_ENABLED;

You could have directly use *p rather than introducing cfg.

> +
> +        if ( !enable )
> +            vits_enable_lpi(v, offset + FIRST_GIC_LPI, (*val & 
> LPI_PRIORITY_MASK));
> +        else
> +            vits_disable_lpi(v, offset + FIRST_GIC_LPI);
> +
> +        /* Update virtual prop page */
> +        *p = (*val & 0xff);

The mask is not necessary.

> +        val++;
> +        p++;
> +        offset++;
> +    }
> +
> +    spin_unlock_irqrestore(&vits->prop_lock, flags);
> +
> +    return 1;
> +}
> +
> +static const struct mmio_handler_ops vgic_gits_lpi_mmio_handler = {
> +    .read_handler  = vgic_v3_gits_lpi_mmio_read,
> +    .write_handler = vgic_v3_gits_lpi_mmio_write,
> +};
> +
> +static int vits_map_lpi_prop(struct vcpu *v)
> +{
> +    struct vgic_its *vits = v->domain->arch.vgic.vits;
> +    paddr_t gaddr, addr;
> +    unsigned long mfn, flags;
> +    uint32_t id_bits, vgic_id_bits;
> +    int i;
> +
> +    gaddr = vits->propbase & GICR_PROPBASER_PA_MASK;
> +    id_bits = ((vits->propbase & GICR_PROPBASER_IDBITS_MASK) + 1);
> +
> +    vgic_id_bits = get_count_order(v->domain->arch.vgic.nr_lpis +
> +                                   FIRST_GIC_LPI);
> +    /*
> +     * Here we limit the size of LPI property table to the number of LPIs
> +     * that domain supports.
> +     */
> +    if ( id_bits > vgic_id_bits )
> +        id_bits = vgic_id_bits;

As said on v4/v5, you are allowing the possibility to have a smaller
property table than the effective number of LPIs.

An ASSERT in vgic_v3_get_irq_priority (patch #25) doesn't ensure the
validity of the size of the property table provided by the guest. This
will surely crash Xen in debug mode, and who knows what will happen in
production mode.

We had a discussion about what to do on v5 and I was expecting from you
at least to put a TODO/comment in the code explaining what needs to be
done in order to fix the problem.

You could also have workaround the problem by always allocating the
prop_page using vgic_id_bits not matter the size provide by the caller.

> +
> +    vits->prop_size = 1 << id_bits;

The field prop_size should only be updated if we succeed to allocat the
prop_page.

> +
> +    /*
> +     * Allocate Virtual LPI Property table.
> +     * TODO: To re-use guest property table
> +     */
> +    vits->prop_page = 
> alloc_xenheap_pages(get_order_from_bytes(vits->prop_size),
> +                                          0);
> +    if ( !vits->prop_page )
> +    {
> +        printk(XENLOG_G_ERR
> +               "d%d: vITS: Fail to allocate LPI Prop page\n",
> +               v->domain->domain_id);
> +        return 0;
> +    }
> +
> +    addr = gaddr;
> +
> +    spin_lock_irqsave(&vits->prop_lock, flags);

On a previous version I clearly explain the consequence of syncing the
LPI property table. It's now getting worse with this lock because you
disable the interrupt.

Syncing the LPI property table may mean enabling an unknown amount
amount of LPIs. This could be very slow and you block the physical CPU
to receive any interrupt.

While this may be acceptable (?) for DOM0 is this unacceptable for a
guest. A valid guest could potentially block a CPU for a really long
time due to the number of LPIs.

> +    /* Copy only configuration of supported LPIs (vgic.nr_lpis) */
> +    for ( i = 0; i < v->domain->arch.vgic.nr_lpis / PAGE_SIZE; i++ )

As said above, the size of the property table may be smaller than the
number of LPIs supported. So you will copy garbagge.

Also please add a comment to explain why it's fine to assume that
nr_lpis is page aligned.

> +    {
> +        vgic_access_guest_memory(v->domain, addr,
> +            (void *)(vits->prop_page + (i * PAGE_SIZE)), PAGE_SIZE, 0);
> +
> +        vgic_v3_gits_update_lpis_state(v, (i * PAGE_SIZE), PAGE_SIZE);
> +        addr += PAGE_SIZE;
> +    }
> +    spin_unlock_irqrestore(&vits->prop_lock, flags);
> +
> +    /*
> +     * Each re-distributor shares a common LPI configuration table
> +     * So one set of mmio handlers to manage configuration table is enough
> +     *
> +     * Copy and unmap LPI property table.
> +     */
> +    addr = gaddr;
> +    for ( i = 0; i <  vits->prop_size / PAGE_SIZE; i++ )

Same remark as nr_lpis here.

> +    {
> +        mfn = gmfn_to_mfn(v->domain, paddr_to_pfn(addr));
> +        if ( unlikely(!mfn_valid(mfn)) )
> +        {
> +            printk(XENLOG_G_ERR
> +                   "vITS: Invalid propbaser address for domain %d\n",
> +                   v->domain->domain_id);

It would have been nice to have the problematic address in the commit
message.

> +            return 0;
> +        }
> +        guest_physmap_remove_page(v->domain, paddr_to_pfn(addr), mfn, 0);
> +        addr += PAGE_SIZE;
> +    }
> +
> +    /* Register mmio handlers for this region */
> +    register_mmio_handler(v->domain, &vgic_gits_lpi_mmio_handler,
> +                          gaddr, vits->prop_size);
> +
> +    return 1;
> +}
> +
>  static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
>                                           uint32_t gicr_reg)
>  {
>      struct hsr_dabt dabt = info->dabt;
>      struct cpu_user_regs *regs = guest_cpu_user_regs();
>      register_t *r = select_user_reg(regs, dabt.reg);
> +    struct vgic_its *vits;
>  
>      switch ( gicr_reg )
>      {
>      case GICR_CTLR:
> -        /* We have not implemented LPI's, read zero */
> -        goto read_as_zero_32;
> +        if ( dabt.size != DABT_WORD ) goto bad_width;
> +        vgic_lock(v);
> +        *r = vgic_reg32_read(v->domain->arch.vgic.gicr_ctlr, info);

The re-distributor is per-VCPU. Therefore, gicr_ctlr should be in arch_vcpu.

> +        vgic_unlock(v);
> +        return 1;
>      case GICR_IIDR:
>          if ( dabt.size != DABT_WORD ) goto bad_width;
>          *r = vgic_reg32_read(GICV3_GICR_IIDR_VAL, info);
>          return 1;
>      case GICR_TYPER:
> +    case GICR_TYPER + 4:
>      {
>          uint64_t typer, aff;
>  
> @@ -137,6 +386,12 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, 
> mmio_info_t *info,
>          if ( v->arch.vgic.flags & VGIC_V3_RDIST_LAST )
>              typer |= GICR_TYPER_LAST;
>  
> +        /* Set Physical LPIs support */
> +        if ( vgic_is_lpi_supported(v->domain) )
> +            typer |= GICR_TYPER_PLPIS;
> +        /* GITS_TYPER.PTA is 0. Provide vcpu number as target address */

Setting the processor in GICR_TYPER is not related to GITS_TYPER.PTA. It
could also be used for GICv2 compatibility

So please drop "GITS_TYPER.PTA is 0.".

> +        typer |= (v->vcpu_id << GICR_TYPER_PROCESSOR_SHIFT);
> +
>          *r = vgic_reg64_read(typer, info);
>  
>          return 1;
> @@ -154,10 +409,29 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu 
> *v, mmio_info_t *info,
>          /* WO. Read as zero */
>          goto read_as_zero_64;
>      case GICR_PROPBASER:
> -        /* LPI's not implemented */
> +        if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;

AFAICT, this field be accessed using 32 bit access.

> +        if ( vgic_is_lpi_supported(v->domain) )
> +        {
> +            vits = v->domain->arch.vgic.vits;
> +            vgic_lock(v);

Based on my sugesstion above, I would lock probase with prop_lock to
avoid having to take multiple lock.

> +            *r = vgic_reg64_read(vits->propbase, info);
> +            vgic_unlock(v);
> +            return 1;
> +        }

Can you invert the condition to drop one layer of indentation (same for
all similar code within this patch).

I.e

if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;
if ( !vgic_is_lpi_supported(v->domain) )
  goto read_as_zero;

/* Emulation of the register */

>          goto read_as_zero_64;
>      case GICR_PENDBASER:
> -        /* LPI's not implemented */
> +        if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;


AFAICT, this field be accessed using 32 bit access.

> +        if ( vgic_is_lpi_supported(v->domain) )
> +        {
> +            uint64_t val;
> +
> +            vgic_lock(v);
> +            val = vgic_reg64_read(v->arch.vgic.pendbase, info);
> +            /* PTZ field is WO */

The more important reason is PTZ is RAZ not WO.

> +            *r = val & ~GICR_PENDBASER_PTZ_MASK;
> +            vgic_unlock(v);
> +            return 1;
> +        }
>          goto read_as_zero_64;
>      case GICR_INVLPIR:
>          /* WO. Read as zero */
> @@ -232,7 +506,19 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu 
> *v, mmio_info_t *info,
>      switch ( gicr_reg )
>      {
>      case GICR_CTLR:
> -        /* LPI's not implemented */
> +        if ( dabt.size != DABT_WORD ) goto bad_width;
> +        if ( vgic_is_lpi_supported(v->domain) )
> +        {
> +            /*
> +             * Enable LPI's for ITS. Direct injection of LPI
> +             * by writing to GICR_{SET,CLR}LPIR is not supported.
> +             */
> +            vgic_lock(v);
> +            vgic_reg32_write(&v->domain->arch.vgic.gicr_ctlr,
> +                             (*r & GICR_CTLR_ENABLE_LPIS), info);
> +            vgic_unlock(v);
> +            return 1;
> +        }
>          goto write_ignore_32;
>      case GICR_IIDR:
>          /* RO */
> @@ -253,10 +539,42 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu 
> *v, mmio_info_t *info,
>          /* LPI is not implemented */
>          goto write_ignore_64;
>      case GICR_PROPBASER:
> -        /* LPI is not implemented */
> +        if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;

Any reason to not support 32-bit access?

> +        if ( vgic_is_lpi_supported(v->domain) )
> +        {
> +            vgic_lock(v);

See my remark above about locking

> +            /*
> +             * LPI configuration tables are shared across cpus. Should be 
> same.
> +             *
> +             * Allow updating on propbase only once with below check.
> +             * TODO: Manage change in property table.
> +             */
> +            if ( v->domain->arch.vgic.vits->propbase != 0 )
> +            {
> +                printk(XENLOG_G_WARNING
> +                       "%pv: vGICR: Updating LPI propbase is not allowed\n", 
> v);
> +                vgic_unlock(v);

As said on v5, AFAICT this warning will be printed even in valid case.
i.e every VCPU except VCPU0.

This warning should only be printed when it's necessary to avoid
question from user about false positive.

If you disagree please explain why.

> +                return 1;
> +            }
> +            /*
> +             * TODO: As per spec, updating GICR_PROPBASER when 
> GICR_CTLR.EnableLPIs = 1

This line is too long. If you invert the condition, it would drop one
layer of indentation.

> +             * is unpredictable. Handle this scenario
> +             */
> +            vgic_reg64_write(&v->domain->arch.vgic.vits->propbase, *r, info);
> +            vgic_unlock(v);
> +            return vits_map_lpi_prop(v);
> +        }
>          goto write_ignore_64;
>      case GICR_PENDBASER:
> -        /* LPI is not implemented */
> +        if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;

Any reason to not support 32-bit access?

> +        if ( vgic_is_lpi_supported(v->domain) )
> +        {
> +            /* Just hold pendbaser value for guest read */
> +            vgic_lock(v);
> +            vgic_reg64_write(&v->arch.vgic.pendbase, *r, info);
> +            vgic_unlock(v);
> +            return 1;
> +        }
>          goto write_ignore_64;
>      case GICR_INVLPIR:
>          /* LPI is not implemented */

[...]

> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 0f6f9d8..6a5c6a0 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -116,6 +116,7 @@ struct arch_domain
>          uint32_t rdist_stride;              /* Re-Distributor stride */
>          /* Virtual ITS */
>          struct vgic_its *vits;
> +        uint32_t gicr_ctlr;

GICR_CTLR is per-redistributor i.e per-VCPU.

>  #endif
>      } vgic;
>  
> @@ -249,6 +250,8 @@ struct arch_vcpu
>  
>          /* GICv3: redistributor base and flags for this vCPU */
>          paddr_t rdist_base;
> +        /* GICv3-ITS: LPI pending table for this vCPU */
> +        paddr_t pendbase;

pendbase store a 64-bit register so it should be uint64_t.

>  #define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
>          uint8_t flags;
>      } vgic;
> diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
> index 7936256..8f3d0fe 100644
> --- a/xen/include/asm-arm/gic-its.h
> +++ b/xen/include/asm-arm/gic-its.h
> @@ -114,6 +114,7 @@
>  
>  #define LPI_PROP_ENABLED                (1 << 0)
>  #define LPI_PROP_GROUP1                 (1 << 1)
> +#define LPI_PRIORITY_MASK               (0xfc)
>  
>  /*
>   * Collection structure - just an ID, and a redistributor address to
> diff --git a/xen/include/asm-arm/gic_v3_defs.h 
> b/xen/include/asm-arm/gic_v3_defs.h
> index f819589..5d39033 100644
> --- a/xen/include/asm-arm/gic_v3_defs.h
> +++ b/xen/include/asm-arm/gic_v3_defs.h
> @@ -126,14 +126,17 @@
>  #define GICR_PROPBASER_WaWb              (5U << 7)
>  #define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
>  #define GICR_PROPBASER_IDBITS_MASK       (0x1f)
> +#define GICR_PROPBASER_PA_MASK           (0xfffffffff000UL)
>  #define GICR_TYPER_PLPIS             (1U << 0)
>  #define GICR_TYPER_VLPIS             (1U << 1)
>  #define GICR_TYPER_LAST              (1U << 4)
> +#define GICR_TYPER_PROCESSOR_SHIFT   (8)
>  
>  #define GICR_PENDBASER_InnerShareable    (1U << 10)
>  #define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
>  #define GICR_PENDBASER_nC                (1U << 7)
>  #define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
> +#define GICR_PENDBASER_PTZ_MASK          (1UL << 62)
>  
>  #define DEFAULT_PMR_VALUE            0xff
>  
> diff --git a/xen/include/asm-arm/vits.h b/xen/include/asm-arm/vits.h
> index d864c1e..e1188f7 100644
> --- a/xen/include/asm-arm/vits.h
> +++ b/xen/include/asm-arm/vits.h
> @@ -36,6 +36,14 @@ struct vgic_its
>     atomic_t cmd_read;
>     /* Command queue size */
>     unsigned long cmd_qsize;
> +   /* LPI propbase */
> +   paddr_t propbase;

GICR_PROPBASER is a register so uint64_t

> +   /* Virtual LPI property table */
> +   void *prop_page;
> +   /* Virtual LPI property size */
> +   uint32_t prop_size;
> +   /* spinlock to protect lpi property table */
> +   spinlock_t prop_lock;

All those fields are not vITS specific but GICv3 specific. This should
go with the other GICv3 fields.

>     /* ITS mmio physical base */
>     paddr_t gits_base;
>     /* ITS mmio physical size */
> 

Regards,

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.