[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH 11/24] ARM: vGICv3: handle virtual LPI pending and property tables
On Wed, 28 Sep 2016, Andre Przywara wrote: > Allow a guest to provide the address and size for the memory regions > it has reserved for the GICv3 pending and property tables. > We sanitise the various fields of the respective redistributor > registers and map those pages into Xen's address space to have easy > access. > > Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> > --- > xen/arch/arm/vgic-v3.c | 189 > ++++++++++++++++++++++++++++++++++++++---- > xen/arch/arm/vgic.c | 4 + > xen/include/asm-arm/domain.h | 7 +- > xen/include/asm-arm/gic-its.h | 10 ++- > xen/include/asm-arm/vgic.h | 3 + > 5 files changed, 197 insertions(+), 16 deletions(-) > > diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c > index e9b6490..8fe8386 100644 > --- a/xen/arch/arm/vgic-v3.c > +++ b/xen/arch/arm/vgic-v3.c > @@ -20,12 +20,14 @@ > > #include <xen/bitops.h> > #include <xen/config.h> > +#include <xen/domain_page.h> > #include <xen/lib.h> > #include <xen/init.h> > #include <xen/softirq.h> > #include <xen/irq.h> > #include <xen/sched.h> > #include <xen/sizes.h> > +#include <xen/vmap.h> > #include <asm/current.h> > #include <asm/mmio.h> > #include <asm/gic_v3_defs.h> > @@ -228,12 +230,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu > *v, mmio_info_t *info, > goto read_reserved; > > case VREG64(GICR_PROPBASER): > - /* LPI's not implemented */ > - goto read_as_zero_64; > + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > + *r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info); > + return 1; > > case VREG64(GICR_PENDBASER): > - /* LPI's not implemented */ > - goto read_as_zero_64; > + if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > + *r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info); > + return 1; > > case 0x0080: > goto read_reserved; > @@ -301,11 +305,6 @@ bad_width: > domain_crash_synchronous(); > return 0; > > -read_as_zero_64: > - if ( !vgic_reg64_check_access(dabt) ) goto bad_width; > - *r = 0; > - return 1; > - > read_as_zero_32: > if ( dabt.size != DABT_WORD ) goto bad_width; > *r = 0; > @@ -330,11 +329,149 @@ read_unknown: > return 1; > } > > +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask, > + int field_shift, > + uint64_t (*sanitise_fn)(uint64_t)) > +{ > + uint64_t field = (reg & field_mask) >> field_shift; > + > + field = sanitise_fn(field) << field_shift; > + return (reg & ~field_mask) | field; > +} > + > +/* We want to avoid outer shareable. */ > +static uint64_t vgic_sanitise_shareability(uint64_t field) > +{ > + switch (field) { > + case GIC_BASER_OuterShareable: > + return GIC_BASER_InnerShareable; > + default: > + return field; > + } > +} > + > +/* Avoid any inner non-cacheable mapping. */ > +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field) > +{ > + switch (field) { > + case GIC_BASER_CACHE_nCnB: > + case GIC_BASER_CACHE_nC: > + return GIC_BASER_CACHE_RaWb; > + default: > + return field; > + } > +} > + > +/* Non-cacheable or same-as-inner are OK. */ > +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field) > +{ > + switch (field) { > + case GIC_BASER_CACHE_SameAsInner: > + case GIC_BASER_CACHE_nC: > + return field; > + default: > + return GIC_BASER_CACHE_nC; > + } > +} > + > +static uint64_t sanitize_propbaser(uint64_t reg) > +{ > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK, > + GICR_PROPBASER_SHAREABILITY_SHIFT, > + vgic_sanitise_shareability); > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK, > + GICR_PROPBASER_INNER_CACHEABILITY_SHIFT, > + vgic_sanitise_inner_cacheability); > + reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK, > + GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT, > + vgic_sanitise_outer_cacheability); > + > + reg &= ~PROPBASER_RES0_MASK; > + reg &= ~GENMASK(51, 48); > + return reg; > +} > + > +static uint64_t sanitize_pendbaser(uint64_t reg) > +{ > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK, > + GICR_PENDBASER_SHAREABILITY_SHIFT, > + vgic_sanitise_shareability); > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK, > + GICR_PENDBASER_INNER_CACHEABILITY_SHIFT, > + vgic_sanitise_inner_cacheability); > + reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK, > + GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT, > + vgic_sanitise_outer_cacheability); > + > + reg &= ~PENDBASER_RES0_MASK; > + reg &= ~GENMASK(51, 48); > + return reg; > +} > + > +/* > + * Allow mapping some parts of guest memory into Xen's VA space to have easy > + * access to it. This is to allow ITS configuration data to be held in > + * guest memory and avoid using Xen memory for that. > + */ > +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages) map_guest_pages and unmap_guest_pages are not ARM specific and should live somewhere in xen/common, maybe xen/common/vmap.c. > +{ > + mfn_t onepage; > + mfn_t *pages; > + int i; > + void *ptr; > + > + /* TODO: free previous mapping, change prototype? use get-put-put? */ No need: the caller is already doing that, right? > + guest_addr &= PAGE_MASK; > + > + if ( nr_pages == 1 ) > + { > + pages = &onepage; > + } else > + { > + pages = xmalloc_array(mfn_t, nr_pages); > + if ( !pages ) > + return NULL; > + } How often do you think only 1 page will be used? If the answer is not often, I would get rid of the onepage optimization. > + for (i = 0; i < nr_pages; i++) > + { > + get_page_from_gfn(d, (guest_addr >> PAGE_SHIFT) + i, NULL, > P2M_ALLOC); > + pages[i] = _mfn((guest_addr + i * PAGE_SIZE) >> PAGE_SHIFT); Don't you need to call page_to_mfn (or gfn_to_mfn) to get the mfn? > + } > + > + ptr = vmap(pages, nr_pages); It is possible for vmap to fail and return NULL. Maybe because the guest intentionally tried to break the hypervisor by passing an array of pages that ends in an MMIO region. We need to check vmap errors and handle them. > + if ( nr_pages > 1 ) > + xfree(pages); > + > + return ptr; > +} > + > +void unmap_guest_pages(void *va, int nr_pages) > +{ > + paddr_t pa; > + unsigned long i; > + > + if ( !va ) > + return; > + > + va = (void *)((uintptr_t)va & PAGE_MASK); > + pa = virt_to_maddr(va); > + > + vunmap(va); > + for (i = 0; i < nr_pages; i++) > + put_page(mfn_to_page((pa >> PAGE_SHIFT) + i)); > + > + return; > +} > + > static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info, > uint32_t gicr_reg, > register_t r) > { > struct hsr_dabt dabt = info->dabt; > + uint64_t reg; > > switch ( gicr_reg ) > { > @@ -375,13 +512,37 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu > *v, mmio_info_t *info, > case 0x0050: > goto write_reserved; > > - case VREG64(GICR_PROPBASER): > - /* LPI is not implemented */ > - goto write_ignore_64; > + case VREG64(GICR_PROPBASER): { > + int nr_pages; unsigned int > + if ( info->dabt.size < DABT_WORD ) goto bad_width; Why not use vgic_reg64_check_access? > + if ( v->arch.vgic.flags & VGIC_V3_LPIS_ENABLED ) > + return 1; Why? If there is a good reason for this, it is probably worth writing it in an in-code comment. > + reg = v->domain->arch.vgic.rdist_propbase; > + vgic_reg64_update(®, r, info); > + reg = sanitize_propbaser(reg); > + v->domain->arch.vgic.rdist_propbase = reg; > > + nr_pages = BIT((v->domain->arch.vgic.rdist_propbase & 0x1f) + 1) - > 8192; > + nr_pages = DIV_ROUND_UP(nr_pages, PAGE_SIZE); Do we need to set an upper limit on nr_pages? We don't really want to allow (2^0x1f)/4096 pages, right? > + unmap_guest_pages(v->domain->arch.vgic.proptable, nr_pages); > + v->domain->arch.vgic.proptable = map_guest_pages(v->domain, > + reg & GENMASK(47, > 12), > + nr_pages); I am pretty sure I am reading the right spec now and it should be GENMASK(51, 12). Also, don't we need to sanitize the table too before using it? > + return 1; > + } > case VREG64(GICR_PENDBASER): > - /* LPI is not implemented */ > - goto write_ignore_64; > + if ( info->dabt.size < DABT_WORD ) goto bad_width; Why not use vgic_reg64_check_access? > + reg = v->arch.vgic.rdist_pendbase; > + vgic_reg64_update(®, r, info); > + reg = sanitize_pendbaser(reg); > + v->arch.vgic.rdist_pendbase = reg; > + unmap_guest_pages(v->arch.vgic.pendtable, 16); > + v->arch.vgic.pendtable = map_guest_pages(v->domain, > + reg & GENMASK(47, 12), 16); Indentation. We need to make sure the vmap mapping is correct. Also we need to sanitize this table too before using it. > + return 1; > > case 0x0080: > goto write_reserved; > diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c > index b961551..4d9304f 100644 > --- a/xen/arch/arm/vgic.c > +++ b/xen/arch/arm/vgic.c > @@ -488,6 +488,10 @@ struct pending_irq *lpi_to_pending(struct vcpu *v, > unsigned int lpi, > empty->pirq.irq = lpi; > } > > + /* Update the enabled status */ > + if ( gicv3_lpi_is_enabled(v->domain, lpi) ) > + set_bit(GIC_IRQ_GUEST_ENABLED, &empty->pirq.status); Where is the GIC_IRQ_GUEST_ENABLED unset? > return &empty->pirq; > } > > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h > index ae8a9de..0cd3500 100644 > --- a/xen/include/asm-arm/domain.h > +++ b/xen/include/asm-arm/domain.h > @@ -109,6 +109,8 @@ struct arch_domain > } *rdist_regions; > int nr_regions; /* Number of rdist regions */ > uint32_t rdist_stride; /* Re-Distributor stride */ > + uint64_t rdist_propbase; > + uint8_t *proptable; Do we need to keep both rdist_propbase and proptable? It is easy to go from proptable to rdist_propbase and I guess it is not an operation that is done often? If so, we could save some memory and remove it. > #endif > } vgic; > > @@ -247,7 +249,10 @@ struct arch_vcpu > > /* GICv3: redistributor base and flags for this vCPU */ > paddr_t rdist_base; > -#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ > +#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ > +#define VGIC_V3_LPIS_ENABLED (1 << 1) > + uint64_t rdist_pendbase; > + unsigned long *pendtable; Same here. > uint8_t flags; > struct list_head pending_lpi_list; > } vgic; > diff --git a/xen/include/asm-arm/gic-its.h b/xen/include/asm-arm/gic-its.h > index 1f881c0..3b2e5c0 100644 > --- a/xen/include/asm-arm/gic-its.h > +++ b/xen/include/asm-arm/gic-its.h > @@ -139,7 +139,11 @@ int gicv3_lpi_drop_host_lpi(struct host_its *its, > > static inline int gicv3_lpi_get_priority(struct domain *d, uint32_t lpi) > { > - return GIC_PRI_IRQ; > + return d->arch.vgic.proptable[lpi - 8192] & 0xfc; Please #define 0xfc. Do we need to check for lpi overflows? As in lpi numbers larger than proptable size? > +} > +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi) > +{ > + return d->arch.vgic.proptable[lpi - 8192] & LPI_PROP_ENABLED; > } > > #else > @@ -185,6 +189,10 @@ static inline int gicv3_lpi_get_priority(struct domain > *d, uint32_t lpi) > { > return GIC_PRI_IRQ; > } > +static inline bool gicv3_lpi_is_enabled(struct domain *d, uint32_t lpi) > +{ > + return false; > +} > > #endif /* CONFIG_HAS_ITS */ > > diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h > index 4e29ba6..2b216cc 100644 > --- a/xen/include/asm-arm/vgic.h > +++ b/xen/include/asm-arm/vgic.h > @@ -285,6 +285,9 @@ VGIC_REG_HELPERS(32, 0x3); > > #undef VGIC_REG_HELPERS > > +void *map_guest_pages(struct domain *d, paddr_t guest_addr, int nr_pages); > +void unmap_guest_pages(void *va, int nr_pages); > + > enum gic_sgi_mode; > > /* _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |