[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH v2 12/26] ARM: vGICv3: handle virtual LPI pending and property tables
On Thu, 22 Dec 2016, Andre Przywara wrote: > Allow a guest to provide the address and size for the memory regions > it has reserved for the GICv3 pending and property tables. > We sanitise the various fields of the respective redistributor > registers and map those pages into Xen's address space to have easy > access. Many comments still unaddressed > Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> > --- > xen/arch/arm/vgic-v3.c | 202 > ++++++++++++++++++++++++++++++++++++++++--- > xen/arch/arm/vgic.c | 4 + > xen/include/asm-arm/domain.h | 8 +- > xen/include/asm-arm/vgic.h | 4 + > 4 files changed, 203 insertions(+), 15 deletions(-) > > diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c > index 0ffde74..b981d4e 100644 > --- a/xen/arch/arm/vgic-v3.c > +++ b/xen/arch/arm/vgic-v3.c > @@ -20,12 +20,14 @@ > > #include <xen/bitops.h> > #include <xen/config.h> > +#include <xen/domain_page.h> > #include <xen/lib.h> > #include <xen/init.h> > #include <xen/softirq.h> > #include <xen/irq.h> > #include <xen/sched.h> > #include <xen/sizes.h> > +#include <xen/vmap.h> > #include <asm/current.h> > #include <asm/mmio.h> > #include <asm/gic_v3_defs.h> > @@ -229,12 +231,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu > *v, mmio_info_t *info, > goto read_reserved; > > case VREG64(GICR_PROPBASER): > - /* LPI's not implemented */ > - goto read_as_zero_64; > + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > + *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info); > + return 1; > > case VREG64(GICR_PENDBASER): > - /* LPI's not implemented */ > - goto read_as_zero_64; > + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > + *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info); > + return 1; > > case 0x0080: > goto read_reserved; > @@ -302,11 +306,6 @@ bad_width: > domain_crash_synchronous(); > return 0; > > -read_as_zero_64: > - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > - *r = 0; > - return 1; > - > read_as_zero_32: > if ( dabt.size != DABT_WORD ) goto bad_width; > *r = 0; > @@ -331,6 +330,143 @@ read_unknown: > return 1; > } > > +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask, > + int field_shift, > + uint64_t (*sanitise_fn)(uint64_t)) > +{ > + uint64_t field = (reg & field_mask) >> field_shift; > + > + field = sanitise_fn(field) << field_shift; > + return (reg & ~field_mask) | field; > +} > + > +/* We want to avoid outer shareable. */ > +static uint64_t vgic_sanitise_shareability(uint64_t field) > +{ > + switch (field) { > + case GIC_BASER_OuterShareable: > + return GIC_BASER_InnerShareable; > + default: > + return field; > + } > +} > + > +/* Avoid any inner non-cacheable mapping. */ > +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) > +{ > + switch (field) { > + case GIC_BASER_CACHE_nCnB: > + case GIC_BASER_CACHE_nC: > + return GIC_BASER_CACHE_RaWb; > + default: > + return field; > + } > +} > + > +/* Non-cacheable or same-as-inner are OK. */ > +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field) > +{ > + switch (field) { > + case GIC_BASER_CACHE_SameAsInner: > + case GIC_BASER_CACHE_nC: > + return field; > + default: > + return GIC_BASER_CACHE_nC; > + } > +} > + > +static uint64_t sanitize_propbaser(uint64_t reg) > +{ > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK, > + GICR_PROPBASER_SHAREABILITY_SHIFT, > + vgic_sanitise_shareability); > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK, > + GICR_PROPBASER_INNER_CACHEABILITY_SHIFT, > + vgic_sanitise_inner_cacheability); > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK, > + GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT, > + vgic_sanitise_outer_cacheability); > + > + reg &= ~PROPBASER_RES0_MASK; > + reg &= ~GENMASK(51, 48); > + return reg; > +} > + > +static uint64_t sanitize_pendbaser(uint64_t reg) > +{ > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK, > + GICR_PENDBASER_SHAREABILITY_SHIFT, > + vgic_sanitise_shareability); > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK, > + GICR_PENDBASER_INNER_CACHEABILITY_SHIFT, > + vgic_sanitise_inner_cacheability); > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK, > + GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT, > + vgic_sanitise_outer_cacheability); > + > + reg &= ~PENDBASER_RES0_MASK; > + reg &= ~GENMASK(51, 48); > + return reg; > +} > + > +/* > + * Allow mapping some parts of guest memory into Xen's VA space to have easy > + * access to it. This is to allow ITS configuration data to be held in > + * guest memory and avoid using Xen memory for that. > + */ > +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages) > +{ > + mfn_t onepage; > + mfn_t *pages; > + int i; > + void *ptr; > + > + /* TODO: free previous mapping, change prototype? use get-put-put? */ > + > + guest_addr &= PAGE_MASK; > + > + if ( nr_pages == 1 ) > + { > + pages = &onepage; > + } else > + { > + pages = xmalloc_array(mfn_t, nr_pages); > + if ( !pages ) > + return NULL; > + } > + > + for ( i = 0; i < nr_pages; i++ ) > + { > + get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL, > P2M_ALLOC); > + pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT); > + } > + > + ptr = vmap(pages, nr_pages); > + > + if ( nr_pages > 1 ) > + xfree(pages); > + > + return ptr; > +} > + > +void unmap_guest_pages(void *va, int nr_pages) > +{ > + paddr_t pa; > + unsigned long i; > + > + if ( !va ) > + return; > + > + va = (void *)((uintptr_t)va & PAGE_MASK); > + pa = virt_to_maddr(va); > + > + vunmap(va); > + for ( i = 0; i < nr_pages; i++ ) > + put_page(mfn_to_page((pa >> PAGE_SHIFT) + i)); > + > + return; > +} > + > int vgic_lpi_get_priority(struct domain *d, uint32_t vlpi) > { > if ( vlpi >= d->arch.vgic.nr_lpis ) > @@ -339,11 +475,20 @@ int vgic_lpi_get_priority(struct domain *d, uint32_t > vlpi) > return d->arch.vgic.proptable[vlpi - 8192] & 0xfc; > } > > +bool vgic_lpi_is_enabled(struct domain *d, uint32_t vlpi) > +{ > + if ( vlpi >= d->arch.vgic.nr_lpis ) > + return false; > + > + return d->arch.vgic.proptable[vlpi - 8192] & LPI_PROP_ENABLED; > +} > + > static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, > uint32_t gicr_reg, > register_t r) > { > struct hsr_dabt dabt = info->dabt; > + uint64_t reg; > > switch ( gicr_reg ) > { > @@ -384,13 +529,42 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu > *v, mmio_info_t *info, > case 0x0050: > goto write_reserved; > > - case VREG64(GICR_PROPBASER): > - /* LPI is not implemented */ > - goto write_ignore_64; > + case VREG64(GICR_PROPBASER): { > + int nr_pages; > + > + if ( info->dabt.size < DABT_WORD ) goto bad_width; > + if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED ) > + return 1; > > + reg = v->domain->arch.vgic.rdist_propbase; > + if ( v->domain->arch.vgic.proptable ) > + { > + nr_pages = BIT((reg & 0x1f) + 1) - 8192; > + nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE); > + unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages); > + } > + vgic_reg64_update(®, r, info); > + reg = sanitize_propbaser(reg); > + v->domain->arch.vgic.rdist_propbase = reg; > + > + v->domain->arch.vgic.nr_lpis = BIT((reg & 0x1f) + 1) - 8192; > + nr_pages = DIV_ROUND_UP(v->domain->arch.vgic.nr_lpis, PAGE_SIZE); > + v->domain->arch.vgic.proptable = map_guest_pages(v->domain, > + reg & GENMASK(47, > 12), > + nr_pages); > + return 1; > + } > case VREG64(GICR_PENDBASER): > - /* LPI is not implemented */ > - goto write_ignore_64; > + if ( info->dabt.size < DABT_WORD ) goto bad_width; > + reg = v->arch.vgic.rdist_pendbase; > + vgic_reg64_update(®, r, info); > + reg = sanitize_pendbaser(reg); > + v->arch.vgic.rdist_pendbase = reg; > + > + unmap_guest_pages(v->arch.vgic.pendtable, 16); > + v->arch.vgic.pendtable = map_guest_pages(v->domain, > + reg & GENMASK(47, 12), 16); > + return 1; > > case 0x0080: > goto write_reserved; > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > index f15eb3e..2a33a0e 100644 > --- a/xen/arch/arm/vgic.c > +++ b/xen/arch/arm/vgic.c > @@ -497,6 +497,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, > unsigned int lpi, > empty->pirq.irq = lpi; > } > > + /* Update the enabled status */ > + if ( vgic_lpi_is_enabled(v->domain, lpi) ) > + set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status); > + > spin_unlock(&v->arch.vgic.pending_lpi_list_lock); > > return &empty->pirq; > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h > index 02ae2cd..543d058 100644 > --- a/xen/include/asm-arm/domain.h > +++ b/xen/include/asm-arm/domain.h > @@ -109,6 +109,9 @@ struct arch_domain > } *rdist_regions; > int nr_regions; /* Number of rdist regions */ > uint32_t rdist_stride; /* Re-Distributor stride */ > + int nr_lpis; > + uint64_t rdist_propbase; > + uint8_t *proptable; > struct list_head its_devices; /* devices mapped to an ITS */ > spinlock_t its_devices_lock; /* protects the its_devices list > */ > #endif > @@ -254,7 +257,10 @@ struct arch_vcpu > > /* GICv3: redistributor base and flags for this vCPU */ > paddr_t rdist_base; > -#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ > +#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ > +#define VGIC_V3_LPIS_ENABLED (1 << 1) > + uint64_t rdist_pendbase; > + unsigned long *pendtable; > uint8_t flags; > struct list_head pending_lpi_list; > spinlock_t pending_lpi_list_lock; /* protects the pending_lpi_list > */ > diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h > index a503321..1c157d3 100644 > --- a/xen/include/asm-arm/vgic.h > +++ b/xen/include/asm-arm/vgic.h > @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3); > > #undef VGIC_REG_HELPERS > > +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages); > +void unmap_guest_pages(void *va, int nr_pages); > + > enum gic_sgi_mode; > > /* > @@ -312,6 +315,7 @@ extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu > *v, unsigned int irq); > extern bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr); > extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n); > extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n); > +extern bool vgic_lpi_is_enabled(struct domain *d, uint32_t vlpi); > extern int vgic_lpi_get_priority(struct domain *d, uint32_t vlpi); > extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops); > int vgic_v2_init(struct domain *d, int *mmio_count); > -- > 2.9.0 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |