[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] xen/arm: vgic-v3: Emulate correctly the re-distributor
commit acf65e5a25a791223a49884f803d82aeecc74606 Author: Julien Grall <julien.grall@xxxxxxxxxx> AuthorDate: Mon Feb 16 14:50:48 2015 +0000 Commit: Ian Campbell <ian.campbell@xxxxxxxxxx> CommitDate: Thu Feb 19 16:55:39 2015 +0000 xen/arm: vgic-v3: Emulate correctly the re-distributor There is a one-to-one mapping between each re-distributors and processors. Each re-distributors can be accessed by any processor at any time. For instance during the initialization of the GIC, the drivers will browse the re-distributor to find the one associated to the current processor (via GICR_TYPER). So each re-distributor has its own MMIO region. The current implementation of the vGICv3 emulation assumes that the re-distributor region is banked. Therefore, the processor won't be able to access other re-distributor. While this is working fine for Linux, a processor will only access GICR_TYPER to find the associated re-distributor, we have to implement correctly the re-distributor emulation in order to boot other operating systems. All emulated registers of the re-distributors take a vCPU in parameter and necessary lock. Therefore concurrent access is already properly handled. The missing bit is retrieving the right vCPU following the region accessed. Retrieving the right vCPU could be slow, so it has been divided in 2 paths: - fast path: The current vCPU is accessing its own re-distributor - slow path: The current vCPU is accessing another re-distributor As the processor needs to initialize itself, the former case is very common. To handle the access quickly, the base address of the re-distributor is computed and stored per-vCPU during the vCPU initialization. The latter is less common and more complicate to handle. The re-distributors can be spread across multiple regions in the memory. During the domain creation, Xen will browse those regions to find the first vCPU handled by this region. When an access hits the slow path, Xen will: 1) Retrieve the region using the base address of the re-distributor accessed 2) Find the vCPU ID attached to the redistributor 3) Check the validity of the vCPU. If it's not valid, a data abort will be injected to the guest Finally, this patch also correctly support the bit GICR_TYPER.LAST which indicates if the redistributor is the last one of the contiguous region. Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> --- xen/arch/arm/gic-v3.c | 24 ++++++++++- xen/arch/arm/vgic-v3.c | 99 +++++++++++++++++++++++++++++++++++++++++- xen/include/asm-arm/domain.h | 6 +++ 3 files changed, 126 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c index fdfda0b..e7a7789 100644 --- a/xen/arch/arm/gic-v3.c +++ b/xen/arch/arm/gic-v3.c @@ -33,6 +33,7 @@ #include <xen/device_tree.h> #include <xen/sizes.h> #include <xen/libfdt/libfdt.h> +#include <xen/sort.h> #include <asm/p2m.h> #include <asm/domain.h> #include <asm/io.h> @@ -895,6 +896,8 @@ static int gicv_v3_init(struct domain *d) */ if ( is_hardware_domain(d) ) { + unsigned int first_cpu = 0; + d->arch.vgic.dbase = gicv3.dbase; d->arch.vgic.dbase_size = gicv3.dbase_size; @@ -909,8 +912,15 @@ static int gicv_v3_init(struct domain *d) for ( i = 0; i < gicv3.rdist_count; i++ ) { + paddr_t size = gicv3.rdist_regions[i].size; + d->arch.vgic.rdist_regions[i].base = gicv3.rdist_regions[i].base; - d->arch.vgic.rdist_regions[i].size = gicv3.rdist_regions[i].size; + d->arch.vgic.rdist_regions[i].size = size; + + /* Set the first CPU handled by this region */ + d->arch.vgic.rdist_regions[i].first_cpu = first_cpu; + + first_cpu += size / d->arch.vgic.rdist_stride; } d->arch.vgic.nr_regions = gicv3.rdist_count; } @@ -929,6 +939,7 @@ static int gicv_v3_init(struct domain *d) BUILD_BUG_ON((GUEST_GICV3_GICR0_SIZE / GUEST_GICV3_RDIST_STRIDE) < MAX_VIRT_CPUS); d->arch.vgic.rdist_regions[0].base = GUEST_GICV3_GICR0_BASE; d->arch.vgic.rdist_regions[0].size = GUEST_GICV3_GICR0_SIZE; + d->arch.vgic.rdist_regions[0].first_cpu = 0; } return 0; @@ -1173,6 +1184,14 @@ static const struct gic_hw_operations gicv3_ops = { .make_dt_node = gicv3_make_dt_node, }; +static int __init cmp_rdist(const void *a, const void *b) +{ + const struct rdist_region *l = a, *r = a; + + /* We assume that re-distributor regions can never overlap */ + return ( l->base < r->base) ? -1 : 0; +} + /* Set up the GIC */ static int __init gicv3_init(struct dt_device_node *node, const void *data) { @@ -1228,6 +1247,9 @@ static int __init gicv3_init(struct dt_device_node *node, const void *data) rdist_regs[i].size = rdist_size; } + /* The vGIC code requires the region to be sorted */ + sort(rdist_regs, gicv3.rdist_count, sizeof(*rdist_regs), cmp_rdist, NULL); + /* If stride is not set in dt. Set default to 2 * SZ_64K */ if ( !dt_property_read_u32(node, "redistributor-stride", &gicv3.rdist_stride) ) gicv3.rdist_stride = 0; diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c index 1d0e52d..97249db 100644 --- a/xen/arch/arm/vgic-v3.c +++ b/xen/arch/arm/vgic-v3.c @@ -114,6 +114,10 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info, MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 1) << 40 | MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 0) << 32); *r = aff; + + if ( v->arch.vgic.flags & VGIC_V3_RDIST_LAST ) + *r |= GICR_TYPER_LAST; + return 1; case GICR_STATUSR: /* Not implemented */ @@ -619,13 +623,56 @@ write_ignore: return 1; } +static inline struct vcpu *get_vcpu_from_rdist(paddr_t gpa, + struct vcpu *v, + uint32_t *offset) +{ + struct domain *d = v->domain; + uint32_t stride = d->arch.vgic.rdist_stride; + paddr_t base; + int i, vcpu_id; + struct vgic_rdist_region *region; + + *offset = gpa & (stride - 1); + base = gpa & ~((paddr_t)stride - 1); + + /* Fast path: the VCPU is trying to access its re-distributor */ + if ( likely(v->arch.vgic.rdist_base == base) ) + return v; + + /* Slow path: the VCPU is trying to access another re-distributor */ + + /* + * Find the region where the re-distributor lives. For this purpose, + * we look one region ahead as only MMIO range for redistributors + * traps here. + * Note: The region has been ordered during the GIC initialization + */ + for ( i = 1; i < d->arch.vgic.nr_regions; i++ ) + { + if ( base < d->arch.vgic.rdist_regions[i].base ) + break; + } + + region = &d->arch.vgic.rdist_regions[i - 1]; + + vcpu_id = region->first_cpu + ((base - region->base) / stride); + + if ( unlikely(vcpu_id >= d->max_vcpus) ) + return NULL; + + return d->vcpu[vcpu_id]; +} + static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info) { uint32_t offset; perfc_incr(vgicr_reads); - offset = info->gpa & (v->domain->arch.vgic.rdist_stride - 1); + v = get_vcpu_from_rdist(info->gpa, v, &offset); + if ( unlikely(!v) ) + return 0; if ( offset < SZ_64K ) return __vgic_v3_rdistr_rd_mmio_read(v, info, offset); @@ -645,7 +692,9 @@ static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info) perfc_incr(vgicr_writes); - offset = info->gpa & (v->domain->arch.vgic.rdist_stride - 1); + v = get_vcpu_from_rdist(info->gpa, v, &offset); + if ( unlikely(!v) ) + return 0; if ( offset < SZ_64K ) return __vgic_v3_rdistr_rd_mmio_write(v, info, offset); @@ -1080,6 +1129,13 @@ static int vgic_v3_vcpu_init(struct vcpu *v) { int i; uint64_t affinity; + paddr_t rdist_base; + struct vgic_rdist_region *region; + unsigned int last_cpu; + + /* Convenient alias */ + struct domain *d = v->domain; + uint32_t rdist_stride = d->arch.vgic.rdist_stride; /* For SGI and PPI the target is always this CPU */ affinity = (MPIDR_AFFINITY_LEVEL(v->arch.vmpidr, 3) << 32 | @@ -1090,6 +1146,45 @@ static int vgic_v3_vcpu_init(struct vcpu *v) for ( i = 0 ; i < 32 ; i++ ) v->arch.vgic.private_irqs->v3.irouter[i] = affinity; + /* + * Find the region where the re-distributor lives. For this purpose, + * we look one region ahead as we have only the first CPU in hand. + */ + for ( i = 1; i < d->arch.vgic.nr_regions; i++ ) + { + if ( v->vcpu_id < d->arch.vgic.rdist_regions[i].first_cpu ) + break; + } + + region = &d->arch.vgic.rdist_regions[i - 1]; + + /* Get the base address of the redistributor */ + rdist_base = region->base; + rdist_base += (v->vcpu_id - region->first_cpu) * rdist_stride; + + /* Check if a valid region was found for the re-distributor */ + if ( (rdist_base < region->base) || + ((rdist_base + rdist_stride) > (region->base + region->size)) ) + { + dprintk(XENLOG_ERR, + "d%u: Unable to find a re-distributor for VCPU %u\n", + d->domain_id, v->vcpu_id); + return -EINVAL; + } + + v->arch.vgic.rdist_base = rdist_base; + + /* + * If the redistributor is the last one of the + * contiguous region of the vCPU is the last of the domain, set + * VGIC_V3_RDIST_LAST flags. + * Note that we are assuming max_vcpus will never change. + */ + last_cpu = (region->size / rdist_stride) + region->first_cpu - 1; + + if ( v->vcpu_id == last_cpu || (v->vcpu_id == (d->max_vcpus - 1)) ) + v->arch.vgic.flags |= VGIC_V3_RDIST_LAST; + return 0; } diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 3eaa7f0..81e3185 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -106,6 +106,7 @@ struct arch_domain struct vgic_rdist_region { paddr_t base; /* Base address */ paddr_t size; /* Size */ + unsigned int first_cpu; /* First CPU handled */ } rdist_regions[MAX_RDIST_COUNT]; int nr_regions; /* Number of rdist regions */ uint32_t rdist_stride; /* Re-Distributor stride */ @@ -239,6 +240,11 @@ struct arch_vcpu * lr_pending is a subset of vgic.inflight_irqs. */ struct list_head lr_pending; spinlock_t lock; + + /* GICv3: redistributor base and flags for this vCPU */ + paddr_t rdist_base; +#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */ + uint8_t flags; } vgic; /* Timer registers */ -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |