[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 6/6] arm: implement event injection
Implement vcpu_mark_events_pending using the vgic to inject PPI 31, that we reserve for Xen usage. In the future the interrupt used for event injection might be dynamic and could be written into the device tree. Otherwise it could be an SGI choosen by the guest and passed to Xen through an hypercall. Considering that: - it is easy to determine if an event notification interrupt has already been EOI'd by the guest just looking at the evtchn_upcall_pending bit in the shared_info page; - we can safely assume that there is at most one event notification interrupt pending at any time in any set of LR registers because we never inject more than a single event notification interrupt in one vcpu (see vcpu_mark_events_pending); we can avoid requesting maintenance interrupts for VGIC_IRQ_EVTCHN_CALLBACK, provided that we check for event notification interrupts that need to be cleared in the following places: - maintenance interrupt entry; - gic_set_guest_irq; that is every time we are about to write to an LR. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> --- xen/arch/arm/domain.c | 11 +++++++++++ xen/arch/arm/dummy.S | 1 - xen/arch/arm/gic.c | 40 +++++++++++++++++++++++++++++++++++++++- xen/arch/arm/gic.h | 3 +++ 4 files changed, 53 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 3a726c8..5702399 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -232,6 +232,17 @@ void arch_dump_vcpu_info(struct vcpu *v) { } +void vcpu_mark_events_pending(struct vcpu *v) +{ + int already_pending = test_and_set_bit( + 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending)); + + if ( already_pending ) + return; + + vgic_vcpu_inject_irq(v, VGIC_IRQ_EVTCHN_CALLBACK, 1); +} + /* * Local variables: * mode: C diff --git a/xen/arch/arm/dummy.S b/xen/arch/arm/dummy.S index 8c6151c..016340c 100644 --- a/xen/arch/arm/dummy.S +++ b/xen/arch/arm/dummy.S @@ -27,7 +27,6 @@ DUMMY(arch_vcpu_reset); DUMMY(free_vcpu_guest_context); DUMMY(sync_vcpu_execstate); NOP(update_vcpu_system_time); -DUMMY(vcpu_mark_events_pending); DUMMY(vcpu_show_execution_state); /* Page Reference & Type Maintenance */ diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index cdb4e4a..cc9d37b 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -37,6 +37,7 @@ + (GIC_CR_OFFSET & 0xfff))) #define GICH ((volatile uint32_t *) (FIXMAP_ADDR(FIXMAP_GICH) \ + (GIC_HR_OFFSET & 0xfff))) +static void events_maintenance(struct vcpu *v); /* Global state */ static struct { @@ -46,6 +47,7 @@ static struct { unsigned int lines; unsigned int cpus; spinlock_t lock; + uint64_t event_mask; uint64_t lr_mask; /* lr_pending is used to queue IRQs (struct pending_irq) that the * vgic tried to inject in the guest (calling gic_set_guest_irq) but @@ -293,6 +295,7 @@ int __init gic_init(void) gic_hyp_init(); gic.lr_mask = 0ULL; + gic.event_mask = 0ULL; INIT_LIST_HEAD(&gic.lr_pending); spin_unlock(&gic.lock); @@ -392,9 +395,15 @@ int __init setup_irq(unsigned int irq, struct irqaction *new) static inline void gic_set_lr(int lr, unsigned int virtual_irq, unsigned int state, unsigned int priority) { + int maintenance_int = GICH_LR_MAINTENANCE_IRQ; + BUG_ON(lr > nr_lrs); + + if (virtual_irq == VGIC_IRQ_EVTCHN_CALLBACK && nr_lrs > 1) + maintenance_int = 0; + GICH[GICH_LR + lr] = state | - GICH_LR_MAINTENANCE_IRQ | + maintenance_int | ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) | ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT); } @@ -405,6 +414,8 @@ void gic_set_guest_irq(unsigned int virtual_irq, int i; struct pending_irq *iter, *n; + events_maintenance(current); + spin_lock(&gic.lock); if ( list_empty(&gic.lr_pending) ) @@ -412,6 +423,8 @@ void gic_set_guest_irq(unsigned int virtual_irq, i = find_first_zero_bit(&gic.lr_mask, nr_lrs); if (i < nr_lrs) { set_bit(i, &gic.lr_mask); + if ( virtual_irq == VGIC_IRQ_EVTCHN_CALLBACK ) + set_bit(i, &gic.event_mask); gic_set_lr(i, virtual_irq, state, priority); goto out; } @@ -515,12 +528,35 @@ void gicv_setup(struct domain *d) GIC_BASE_ADDRESS + GIC_VR_OFFSET); } +static void events_maintenance(struct vcpu *v) +{ + int i = 0; + int already_pending = test_bit(0, + (unsigned long *)&vcpu_info(v, evtchn_upcall_pending)); + + if (!already_pending && gic.event_mask != 0) { + spin_lock(&gic.lock); + while ((i = find_next_bit((const long unsigned int *) &gic.event_mask, + sizeof(uint64_t), i)) < sizeof(uint64_t)) { + + GICH[GICH_LR + i] = 0; + clear_bit(i, &gic.lr_mask); + clear_bit(i, &gic.event_mask); + + i++; + } + spin_unlock(&gic.lock); + } +} + static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) { int i = 0, virq; uint32_t lr; uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32); + events_maintenance(current); + while ((i = find_next_bit((const long unsigned int *) &eisr, sizeof(eisr), i)) < sizeof(eisr)) { struct pending_irq *p; @@ -536,6 +572,8 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority); list_del_init(&p->lr_queue); set_bit(i, &gic.lr_mask); + if ( p->irq == VGIC_IRQ_EVTCHN_CALLBACK ) + set_bit(i, &gic.event_mask); } else { gic_inject_irq_stop(); } diff --git a/xen/arch/arm/gic.h b/xen/arch/arm/gic.h index 2c5922e..ff8d0a2 100644 --- a/xen/arch/arm/gic.h +++ b/xen/arch/arm/gic.h @@ -121,6 +121,9 @@ #define GICH_LR_CPUID_SHIFT 9 #define GICH_VTR_NRLRGS 0x3f +/* XXX: write this into the DT */ +#define VGIC_IRQ_EVTCHN_CALLBACK 31 + extern int domain_vgic_init(struct domain *d); extern int vcpu_vgic_init(struct vcpu *v); extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq,int virtual); -- 1.7.2.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |