[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 24/31] xen/arm: ITS: Add GICR register emulation



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>

Emulate LPI related changes to GICR registers

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
v6: - Moved LPI handling code to vgic-v3.c
    - parse LPI property table on GICR_PROPBASER update
    - use vgic_is_lpi_supported()
v5: - Handled all sizes access to LPI configuration table
    - Rename vits_unmap_lpi_prop as  vits_map_lpi_prop
v4: - Added LPI configuration table emulation
    - Rename function inline with vits
    - Copied guest lpi configuration table to xen
---
 xen/arch/arm/vgic-v3-its.c        |    4 +
 xen/arch/arm/vgic-v3.c            |  354 ++++++++++++++++++++++++++++++++++++-
 xen/include/asm-arm/domain.h      |    3 +
 xen/include/asm-arm/gic-its.h     |    9 +
 xen/include/asm-arm/gic_v3_defs.h |    4 +
 5 files changed, 366 insertions(+), 8 deletions(-)

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index c384ecf..7c0375a 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -28,6 +28,7 @@
 #include <asm/io.h>
 #include <asm/gic_v3_defs.h>
 #include <asm/gic.h>
+#include <asm/gic-its.h>
 #include <asm/vgic.h>
 #include <asm/gic-its.h>
 #include <asm/atomic.h>
@@ -908,6 +909,7 @@ int vits_domain_init(struct domain *d)
     vits = d->arch.vgic.vits;
 
     spin_lock_init(&vits->lock);
+    spin_lock_init(&vits->prop_lock);
 
     vits->collections = xzalloc_array(struct its_collection,
                                       vits_get_max_collections(d));
@@ -933,6 +935,8 @@ int vits_domain_init(struct domain *d)
 
 void vits_domain_free(struct domain *d)
 {
+   free_xenheap_pages(d->arch.vgic.vits->prop_page,
+                      get_order_from_bytes(d->arch.vgic.vits->prop_size));
    xfree(d->arch.vgic.vits->collections);
    xfree(d->arch.vgic.vits);
 }
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 4caac5e..dd7dc32 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -29,6 +29,8 @@
 #include <asm/current.h>
 #include <asm/mmio.h>
 #include <asm/gic_v3_defs.h>
+#include <asm/gic.h>
+#include <asm/gic-its.h>
 #include <asm/vgic.h>
 
 /* GICD_PIDRn register values for ARM implementations */
@@ -105,18 +107,287 @@ static struct vcpu *vgic_v3_get_target_vcpu(struct vcpu 
*v, unsigned int irq)
     return v_target;
 }
 
+static void vits_disable_lpi(struct vcpu *v, uint32_t vlpi)
+{
+    struct pending_irq *p;
+    struct vcpu *v_target = v->domain->vcpu[0];
+
+    p = irq_to_pending(v_target, vlpi);
+    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
+    {
+        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+        gic_remove_from_queues(v_target, vlpi);
+    }
+}
+
+static void vits_enable_lpi(struct vcpu *v, uint32_t vlpi, uint8_t priority)
+{
+    struct pending_irq *p;
+    unsigned long flags;
+    struct vcpu *v_target = v->domain->vcpu[0];
+
+    /*
+     * We don't have vlpi to plpi mapping and hence we cannot
+     * have target on which corresponding vlpi is enabled.
+     * So for now we are always injecting vlpi on vcpu0.
+     * (See vgic_vcpu_inject_lpi() function) and so we get pending_irq
+     * structure on vcpu0.
+     * TODO: Get correct target vcpu
+     */
+    p = irq_to_pending(v_target, vlpi);
+
+    set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+
+    spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
+
+    if ( !list_empty(&p->inflight) &&
+         !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
+        gic_raise_guest_irq(v_target, vlpi, p->priority);
+
+    spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
+}
+
+static int vgic_v3_gits_lpi_mmio_read(struct vcpu *v, mmio_info_t *info)
+{
+    uint32_t offset;
+    void *addr;
+    uint64_t val;
+    unsigned long flags;
+    struct vgic_its *vits = v->domain->arch.vgic.vits;
+    struct hsr_dabt dabt = info->dabt;
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+    register_t *r = select_user_reg(regs, dabt.reg);
+
+    offset = info->gpa - (vits->propbase & GICR_PROPBASER_PA_MASK);
+    if ( offset > vits->prop_size )
+    {
+        dprintk(XENLOG_G_ERR, "%pv: vITS: LPI property table out of range\n",
+                current);
+        return 1;
+    }
+
+    /* Neglect if not LPI. */
+    if ( offset < FIRST_GIC_LPI )
+    {
+        *r = 0;
+        return 1;
+    }
+
+    addr = (void *)((u8*)vits->prop_page + offset);
+    spin_lock_irqsave(&vits->prop_lock, flags);
+    switch (dabt.size)
+    {
+    case DABT_DOUBLE_WORD:
+        val = *((u64*)addr);
+        *r = val;
+        break;
+    case DABT_WORD:
+        val = *((u32*)addr);
+        *r = (u32)val;
+        break;
+    case DABT_HALF_WORD:
+        val = *((u16*)addr);
+        *r = (u16)val;
+        break;
+    default:
+        val = *((u8*)addr);
+        *r = (u8)val;
+    }
+    spin_unlock_irqrestore(&vits->prop_lock, flags);
+
+    return 1;
+}
+
+static void vgic_v3_gits_update_lpis_state(struct vcpu *v, uint32_t vid,
+                                           uint32_t size)
+{
+    struct vgic_its *vits = v->domain->arch.vgic.vits;
+    uint32_t i;
+    uint8_t cfg, *p;
+    bool_t enable;
+
+    p = ((u8*)vits->prop_page + vid);
+
+    for ( i = 0 ; i < size; i++ )
+    {
+        cfg = *p;
+        enable = cfg & LPI_PROP_ENABLED;
+
+        if ( !enable )
+            vits_enable_lpi(v, vid,  (cfg & LPI_PRIORITY_MASK));
+        else
+            vits_disable_lpi(v, vid);
+
+        p++;
+        vid++;
+    }
+}
+
+static int vgic_v3_gits_lpi_mmio_write(struct vcpu *v, mmio_info_t *info)
+{
+    uint32_t offset;
+    uint8_t cfg, *p, *val, i, iter;
+    bool_t enable;
+    unsigned long flags;
+    struct vgic_its *vits = v->domain->arch.vgic.vits;
+    struct hsr_dabt dabt = info->dabt;
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+    register_t *r = select_user_reg(regs, dabt.reg);
+
+    offset = info->gpa - (vits->propbase & GICR_PROPBASER_PA_MASK);
+    if ( offset > vits->prop_size )
+    {
+        dprintk(XENLOG_G_ERR, "%pv: vITS: LPI property table out of range\n",
+                current);
+        return 1;
+    }
+
+    /* Neglect if not LPI. */
+    if ( offset < FIRST_GIC_LPI )
+        return 1;
+
+    switch (dabt.size)
+    {
+    case DABT_DOUBLE_WORD:
+        iter = 8;
+        break;
+    case DABT_WORD:
+        iter = 4;
+        break;
+    case DABT_HALF_WORD:
+        iter = 2;
+        break;
+    default:
+        iter = 1;
+    }
+    spin_lock_irqsave(&vits->prop_lock, flags);
+
+    p = ((u8*)vits->prop_page + offset);
+    val = (u8*)r;
+
+    for ( i = 0 ; i < iter; i++ )
+    {
+        cfg = *p;
+        enable = (cfg & *val) & LPI_PROP_ENABLED;
+
+        if ( !enable )
+            vits_enable_lpi(v, offset,  (*val & LPI_PRIORITY_MASK));
+        else
+            vits_disable_lpi(v, offset);
+
+        /* Update virtual prop page */
+        *p = (*val & 0xff);
+        val++;
+        p++;
+        offset++;
+    }
+
+    spin_unlock_irqrestore(&vits->prop_lock, flags);
+
+    return 1;
+}
+
+static const struct mmio_handler_ops vgic_gits_lpi_mmio_handler = {
+    .read_handler  = vgic_v3_gits_lpi_mmio_read,
+    .write_handler = vgic_v3_gits_lpi_mmio_write,
+};
+
+static int vits_map_lpi_prop(struct vcpu *v)
+{
+    struct vgic_its *vits = v->domain->arch.vgic.vits;
+    paddr_t gaddr, addr;
+    unsigned long mfn, flags;
+    uint32_t lpi_size, id_bits;
+    int i;
+
+    gaddr = vits->propbase & GICR_PROPBASER_PA_MASK;
+    id_bits = ((vits->propbase & GICR_PROPBASER_IDBITS_MASK) + 1);
+
+    if ( id_bits > v->domain->arch.vgic.id_bits )
+        id_bits = v->domain->arch.vgic.id_bits;
+
+    lpi_size = 1UL << id_bits;
+    if ( lpi_size < FIRST_GIC_LPI )
+    {
+        dprintk(XENLOG_G_ERR,
+                "d%"PRIu16": vITS: LPI property table does not hold LPI 
config\n",
+                v->domain->domain_id);
+        return 0;
+    }
+
+    vits->prop_size = lpi_size;
+    /* Allocate Virtual LPI Property table */
+    /* TODO: To re-use guest property table? */
+    vits->prop_page = alloc_xenheap_pages(get_order_from_bytes(lpi_size), 0);
+    if ( !vits->prop_page )
+    {
+        dprintk(XENLOG_G_ERR,
+                "d%"PRIu16": vITS: Fail to allocate LPI Prop page\n",
+                v->domain->domain_id);
+        return 0;
+    }
+
+    addr = gaddr;
+
+    /*
+     * LPIs start from 8192 index in LPI property table.
+     * So subtract 8192 from lpi_size.
+     */
+    spin_lock_irqsave(&vits->prop_lock, flags);
+    for ( i = 0; i < (lpi_size - FIRST_GIC_LPI) / PAGE_SIZE; i++ )
+    {
+        vits_access_guest_table(v->domain, addr,
+            (void *)(vits->prop_page + FIRST_GIC_LPI + (i * PAGE_SIZE)),
+            PAGE_SIZE, 0);
+
+        vgic_v3_gits_update_lpis_state(v,  FIRST_GIC_LPI + (i * PAGE_SIZE),
+                                       PAGE_SIZE);
+        addr += PAGE_SIZE;
+    }
+    spin_unlock_irqrestore(&vits->prop_lock, flags);
+
+    /*
+     * Each re-distributor shares a common LPI configuration table
+     * So one set of mmio handlers to manage configuration table is enough
+     */
+    addr = gaddr;
+    for ( i = 0; i < lpi_size / PAGE_SIZE; i++ )
+    {
+        mfn = gmfn_to_mfn(v->domain, paddr_to_pfn(addr));
+        if ( unlikely(!mfn_valid(mfn)) )
+        {
+            dprintk(XENLOG_G_ERR,
+                    "vITS: Invalid propbaser address for domain %"PRIu16"\n",
+                    v->domain->domain_id);
+            return 0;
+        }
+        guest_physmap_remove_page(v->domain, paddr_to_pfn(addr), mfn, 0);
+        addr += PAGE_SIZE;
+    }
+
+    /* Register mmio handlers for this region */
+    register_mmio_handler(v->domain, &vgic_gits_lpi_mmio_handler,
+                          gaddr, lpi_size);
+
+    return 1;
+}
+
 static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
                                          uint32_t gicr_reg)
 {
     struct hsr_dabt dabt = info->dabt;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     register_t *r = select_user_reg(regs, dabt.reg);
+    struct vgic_its *vits;
 
     switch ( gicr_reg )
     {
     case GICR_CTLR:
-        /* We have not implemented LPI's, read zero */
-        goto read_as_zero_32;
+        if ( dabt.size != DABT_WORD ) goto bad_width;
+        vgic_lock(v);
+        *r = vgic_reg32_read(v->domain->arch.vgic.gicr_ctlr, info);
+        vgic_unlock(v);
+        return 1;
     case GICR_IIDR:
         if ( dabt.size != DABT_WORD ) goto bad_width;
         *r = vgic_reg32_read(GICV3_GICR_IIDR_VAL, info);
@@ -137,6 +408,12 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, 
mmio_info_t *info,
         if ( v->arch.vgic.flags & VGIC_V3_RDIST_LAST )
             typer |= GICR_TYPER_LAST;
 
+        /* Set Physical LPIs support */
+        if ( vgic_is_lpi_supported(v->domain) )
+            typer |= GICR_TYPER_PLPIS;
+        /* GITS_TYPER.PTA is 0. Provide vcpu number as target address */
+        typer |= (v->vcpu_id << GICR_TYPER_PROCESSOR_SHIFT);
+
         if ( dabt.size == DABT_DOUBLE_WORD )
             *r = vgic_reg64_read(typer, info);
         else
@@ -157,10 +434,29 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, 
mmio_info_t *info,
         /* WO. Read as zero */
         goto read_as_zero_64;
     case GICR_PROPBASER:
-        /* LPI's not implemented */
+        if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;
+        if ( vgic_is_lpi_supported(v->domain) )
+        {
+            vits = v->domain->arch.vgic.vits;
+            vgic_lock(v);
+            *r = vgic_reg64_read(vits->propbase, info);
+            vgic_unlock(v);
+            return 1;
+        }
         goto read_as_zero_64;
     case GICR_PENDBASER:
-        /* LPI's not implemented */
+        if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;
+        if ( vgic_is_lpi_supported(v->domain) )
+        {
+            uint64_t val;
+
+            vgic_lock(v);
+            val = vgic_reg64_read(v->arch.vgic.pendbase, info);
+            /* PTZ field is WO */
+            *r = val & ~GICR_PENDBASER_PTZ_MASK;
+            vgic_unlock(v);
+            return 1;
+        }
         goto read_as_zero_64;
     case GICR_INVLPIR:
         /* WO. Read as zero */
@@ -235,7 +531,19 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, 
mmio_info_t *info,
     switch ( gicr_reg )
     {
     case GICR_CTLR:
-        /* LPI's not implemented */
+        if ( dabt.size != DABT_WORD ) goto bad_width;
+        if ( vgic_is_lpi_supported(v->domain) )
+        {
+            /*
+             * Enable LPI's for ITS. Direct injection of LPI
+             * by writing to GICR_{SET,CLR}LPIR is not supported.
+             */
+            vgic_lock(v);
+            vgic_reg32_write(&v->domain->arch.vgic.gicr_ctlr,
+                             (*r & GICR_CTLR_ENABLE_LPIS), info);
+            vgic_unlock(v);
+            return 1;
+        }
         goto write_ignore_32;
     case GICR_IIDR:
         /* RO */
@@ -256,10 +564,37 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, 
mmio_info_t *info,
         /* LPI is not implemented */
         goto write_ignore_64;
     case GICR_PROPBASER:
-        /* LPI is not implemented */
+        if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;
+        if ( vgic_is_lpi_supported(v->domain) )
+        {
+            vgic_lock(v);
+            /* LPI configuration tables are shared across cpus. Should be same 
*/
+            /*
+             * Allow updating on propbase only once with below check.
+             * TODO: Manage change in property table.
+             */
+            if ( v->domain->arch.vgic.vits->propbase != 0 )
+            {
+                printk(XENLOG_G_WARNING
+                       "%pv: vGICR: Updating LPI propbase is not allowed\n", 
v);
+                vgic_unlock(v);
+                return 1;
+            }
+            vgic_reg64_write(&v->domain->arch.vgic.vits->propbase, *r, info);
+            vgic_unlock(v);
+            return vits_map_lpi_prop(v);
+        }
         goto write_ignore_64;
     case GICR_PENDBASER:
-        /* LPI is not implemented */
+        if ( dabt.size != DABT_DOUBLE_WORD ) goto bad_width;
+        if ( vgic_is_lpi_supported(v->domain) )
+        {
+            /* Just hold pendbaser value for guest read */
+            vgic_lock(v);
+            vgic_reg64_write(&v->arch.vgic.pendbase, *r, info);
+            vgic_unlock(v);
+           return 1;
+        }
         goto write_ignore_64;
     case GICR_INVLPIR:
         /* LPI is not implemented */
@@ -740,10 +1075,13 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, 
mmio_info_t *info)
         typer = ((ncpus - 1) << GICD_TYPE_CPUS_SHIFT |
                  DIV_ROUND_UP(v->domain->arch.vgic.nr_spis, 32));
 
+        irq_bits = v->domain->arch.vgic.id_bits;
         typer |= (irq_bits - 1) << GICD_TYPE_ID_BITS_SHIFT;
 
-        *r = vgic_reg32_read(typer, info);
+        if ( vgic_is_lpi_supported(v->domain) )
+            typer |= GICD_TYPE_LPIS;
 
+        *r = vgic_reg32_read(typer, info);
         return 1;
     }
     case GICD_STATUSR:
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 269e4bb..b48a1d9 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -109,6 +109,7 @@ struct arch_domain
 #ifdef HAS_GICV3
         /* Virtual ITS */
         struct vgic_its *vits;
+        uint32_t gicr_ctlr;
         /* GIC V3 addressing */
         /* List of contiguous occupied by the redistributors */
         struct vgic_rdist_region {
@@ -251,6 +252,8 @@ struct arch_vcpu
 
         /* GICv3: redistributor base and flags for this vCPU */
         paddr_t rdist_base;
+        /* GICv3-ITS: LPI pending table for this vCPU */
+        paddr_t pendbase;
 #define VGIC_V3_RDIST_LAST  (1 << 0)        /* last vCPU of the rdist */
         uint8_t flags;
     } vgic;
diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h
index db1530e..11f1cd9 100644
--- a/xen/include/asm-arm/gic-its.h
+++ b/xen/include/asm-arm/gic-its.h
@@ -115,6 +115,7 @@
 
 #define LPI_PROP_ENABLED                (1 << 0)
 #define LPI_PROP_GROUP1                 (1 << 1)
+#define LPI_PRIORITY_MASK               (0xfc)
 
 /*
  * Collection structure - just an ID, and a redistributor address to
@@ -148,6 +149,14 @@ struct vgic_its
    unsigned long gits_size;
    /* GICR ctrl register */
    uint32_t ctrl;
+   /* LPI propbase */
+   paddr_t propbase;
+   /* Virtual LPI property table */
+   void *prop_page;
+   /* Virtual LPI property size */
+   uint32_t prop_size;
+   /* spinlock to protect lpi property table */
+   spinlock_t prop_lock;
    /* vITT device table ipa */
    paddr_t dt_ipa;
    /* vITT device table size */
diff --git a/xen/include/asm-arm/gic_v3_defs.h 
b/xen/include/asm-arm/gic_v3_defs.h
index d950a1f..44f59a3 100644
--- a/xen/include/asm-arm/gic_v3_defs.h
+++ b/xen/include/asm-arm/gic_v3_defs.h
@@ -48,6 +48,7 @@
 /* Additional bits in GICD_TYPER defined by GICv3 */
 #define GICD_TYPE_ID_BITS_SHIFT      19
 #define GICD_TYPE_ID_BITS_MASK       0x1f
+#define GICD_TYPE_LPIS               (0x1UL << 17)
 
 #define GICD_TYPER_LPIS_SUPPORTED    (1U << 17)
 #define GICD_CTLR_RWP                (1UL << 31)
@@ -125,14 +126,17 @@
 #define GICR_PROPBASER_WaWb              (5U << 7)
 #define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
 #define GICR_PROPBASER_IDBITS_MASK       (0x1f)
+#define GICR_PROPBASER_PA_MASK           (0xfffffffff000UL)
 #define GICR_TYPER_PLPIS             (1U << 0)
 #define GICR_TYPER_VLPIS             (1U << 1)
 #define GICR_TYPER_LAST              (1U << 4)
+#define GICR_TYPER_PROCESSOR_SHIFT   (8)
 
 #define GICR_PENDBASER_InnerShareable    (1U << 10)
 #define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
 #define GICR_PENDBASER_nC                (1U << 7)
 #define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+#define GICR_PENDBASER_PTZ_MASK          (1UL << 62)
 
 #define DEFAULT_PMR_VALUE            0xff
 
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.