[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] VIRQs and IPIs on VCPU#0 are automatically re-bound on save/restore.
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 805ee053e61f4c25e642ff3bb9e657966a15bc33 # Parent c317e0aca9f12b086bdfe1f442a7c2221605a2bd VIRQs and IPIs on VCPU#0 are automatically re-bound on save/restore. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r c317e0aca9f1 -r 805ee053e61f linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c Thu Sep 29 10:10:27 2005 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c Thu Sep 29 12:05:43 2005 @@ -131,21 +131,9 @@ static inline void __send_IPI_one(unsigned int cpu, int vector) { - unsigned int evtchn; - - evtchn = per_cpu(ipi_to_evtchn, cpu)[vector]; - // printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu, vector, evtchn); - if (evtchn) { -#if 0 - shared_info_t *s = HYPERVISOR_shared_info; - while (synch_test_bit(evtchn, &s->evtchn_pending[0]) || - synch_test_bit(evtchn, &s->evtchn_mask[0])) - ; -#endif - notify_via_evtchn(evtchn); - } else - printk("send_IPI to unbound port %d/%d", - cpu, vector); + int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector]; + BUG_ON(evtchn < 0); + notify_via_evtchn(evtchn); } void __send_IPI_shortcut(unsigned int shortcut, int vector) diff -r c317e0aca9f1 -r 805ee053e61f linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Thu Sep 29 10:10:27 2005 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Thu Sep 29 12:05:43 2005 @@ -836,13 +836,6 @@ cpu_clear(smp_processor_id(), nohz_cpu_mask); } -void time_suspend(void) -{ - /* nothing */ - teardown_irq(per_cpu(timer_irq, 0), &irq_timer); - unbind_virq_from_irq(VIRQ_TIMER); -} - /* No locking required. We are only CPU running, and interrupts are off. */ void time_resume(void) { @@ -854,9 +847,6 @@ per_cpu(processed_system_time, 0) = processed_system_time; update_wallclock(); - - per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER); - (void)setup_irq(per_cpu(timer_irq, 0), &irq_timer); } #ifdef CONFIG_SMP diff -r c317e0aca9f1 -r 805ee053e61f linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Thu Sep 29 10:10:27 2005 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Thu Sep 29 12:05:43 2005 @@ -78,22 +78,28 @@ cpu_evtchn_mask[cpu][idx] & \ ~(sh)->evtchn_mask[idx]) -void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) +static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) { clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]); set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]); cpu_evtchn[chn] = cpu; } +static void init_evtchn_cpu_bindings(void) +{ + /* By default all event channels notify CPU#0. */ + memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); + memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); +} + #else #define active_evtchns(cpu,sh,idx) \ ((sh)->evtchn_pending[idx] & \ ~(sh)->evtchn_mask[idx]) - -void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) -{ -} +#define bind_evtchn_to_cpu(chn,cpu) ((void)0) +#define init_evtchn_cpu_bindings() ((void)0) + #endif /* Upcall to generic IRQ layer. */ @@ -244,7 +250,7 @@ spin_lock(&irq_mapping_update_lock); - if ((evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0) { + if ((evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == -1) { op.cmd = EVTCHNOP_bind_ipi; BUG_ON(HYPERVISOR_event_channel_op(&op) != 0); evtchn = op.u.bind_ipi.port; @@ -287,7 +293,7 @@ bind_evtchn_to_cpu(evtchn, 0); evtchn_to_irq[evtchn] = -1; irq_to_evtchn[irq] = -1; - per_cpu(ipi_to_evtchn, cpu)[ipi] = 0; + per_cpu(ipi_to_evtchn, cpu)[ipi] = -1; } spin_unlock(&irq_mapping_update_lock); @@ -608,41 +614,32 @@ synch_set_bit(evtchn, &s->evtchn_pending[0]); } -void irq_suspend(void) -{ - int pirq, virq, irq, evtchn; - int cpu = smp_processor_id(); /* XXX */ - - /* Unbind VIRQs from event channels. */ - for (virq = 0; virq < NR_VIRQS; virq++) { - if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) - continue; - evtchn = irq_to_evtchn[irq]; - - /* Mark the event channel as unused in our table. */ - evtchn_to_irq[evtchn] = -1; - irq_to_evtchn[irq] = -1; - } - - /* Check that no PIRQs are still bound. */ - for (pirq = 0; pirq < NR_PIRQS; pirq++) - if ((evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1) - panic("Suspend attempted while PIRQ %d bound " - "to evtchn %d.\n", pirq, evtchn); -} - void irq_resume(void) { evtchn_op_t op; - int virq, irq, evtchn; - int cpu = smp_processor_id(); /* XXX */ + int cpu, pirq, virq, ipi, irq, evtchn; + + init_evtchn_cpu_bindings(); /* New event-channel space is not 'live' yet. */ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) mask_evtchn(evtchn); + /* Check that no PIRQs are still bound. */ + for (pirq = 0; pirq < NR_PIRQS; pirq++) + BUG_ON(irq_to_evtchn[pirq_to_irq(pirq)] != -1); + + /* Secondary CPUs must have no VIRQ or IPI bindings. */ + for (cpu = 1; cpu < NR_CPUS; cpu++) { + for (virq = 0; virq < NR_VIRQS; virq++) + BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1); + for (ipi = 0; ipi < NR_IPIS; ipi++) + BUG_ON(per_cpu(ipi_to_evtchn, cpu)[ipi] != -1); + } + + /* Primary CPU: rebind VIRQs automatically. */ for (virq = 0; virq < NR_VIRQS; virq++) { - if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) + if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1) continue; /* Get a new binding from Xen. */ @@ -652,13 +649,33 @@ evtchn = op.u.bind_virq.port; /* Record the new mapping. */ - bind_evtchn_to_cpu(evtchn, 0); evtchn_to_irq[evtchn] = irq; irq_to_evtchn[irq] = evtchn; /* Ready for use. */ unmask_evtchn(evtchn); } + + /* Primary CPU: rebind IPIs automatically. */ + for (ipi = 0; ipi < NR_IPIS; ipi++) { + if ((evtchn = per_cpu(ipi_to_evtchn, 0)[ipi]) == -1) + continue; + + irq = evtchn_to_irq[evtchn]; + evtchn_to_irq[evtchn] = -1; + + /* Get a new binding from Xen. */ + op.cmd = EVTCHNOP_bind_ipi; + BUG_ON(HYPERVISOR_event_channel_op(&op) != 0); + evtchn = op.u.bind_ipi.port; + + /* Record the new mapping. */ + evtchn_to_irq[evtchn] = irq; + irq_to_evtchn[irq] = evtchn; + + /* Ready for use. */ + unmask_evtchn(evtchn); + } } void __init init_IRQ(void) @@ -670,15 +687,15 @@ spin_lock_init(&irq_mapping_update_lock); -#ifdef CONFIG_SMP - /* By default all event channels notify CPU#0. */ - memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); -#endif + init_evtchn_cpu_bindings(); for (cpu = 0; cpu < NR_CPUS; cpu++) { /* No VIRQ -> IRQ mappings. */ for (i = 0; i < NR_VIRQS; i++) per_cpu(virq_to_irq, cpu)[i] = -1; + /* No VIRQ -> IRQ mappings. */ + for (i = 0; i < NR_IPIS; i++) + per_cpu(ipi_to_evtchn, cpu)[i] = -1; } /* No event-channel -> IRQ mappings. */ diff -r c317e0aca9f1 -r 805ee053e61f linux-2.6-xen-sparse/arch/xen/kernel/reboot.c --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Thu Sep 29 10:10:27 2005 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Thu Sep 29 12:05:43 2005 @@ -74,16 +74,12 @@ extern int gnttab_suspend(void); extern int gnttab_resume(void); - extern void time_suspend(void); extern void time_resume(void); extern unsigned long max_pfn; extern unsigned long *pfn_to_mfn_frame_list_list; extern unsigned long *pfn_to_mfn_frame_list[]; #ifdef CONFIG_SMP - extern void smp_suspend(void); - extern void smp_resume(void); - static vcpu_guest_context_t suspended_cpu_records[NR_CPUS]; cpumask_t prev_online_cpus, prev_present_cpus; @@ -156,17 +152,9 @@ kmem_cache_shrink(pgd_cache); #endif - time_suspend(); - -#ifdef CONFIG_SMP - smp_suspend(); -#endif - xenbus_suspend(); xencons_suspend(); - - irq_suspend(); gnttab_suspend(); @@ -212,10 +200,6 @@ xenbus_resume(); -#ifdef CONFIG_SMP - smp_resume(); -#endif - time_resume(); usbif_resume(); diff -r c317e0aca9f1 -r 805ee053e61f linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c Thu Sep 29 10:10:27 2005 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c Thu Sep 29 12:05:43 2005 @@ -31,14 +31,9 @@ static inline void __send_IPI_one(unsigned int cpu, int vector) { - unsigned int evtchn; - Dprintk("%s\n", __FUNCTION__); - - evtchn = per_cpu(ipi_to_evtchn, cpu)[vector]; - if (evtchn) - notify_via_evtchn(evtchn); - else - printk("send_IPI to unbound port %d/%d", cpu, vector); + int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector]; + BUG_ON(evtchn < 0); + notify_via_evtchn(evtchn); } void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) diff -r c317e0aca9f1 -r 805ee053e61f linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h Thu Sep 29 10:10:27 2005 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h Thu Sep 29 12:05:43 2005 @@ -80,11 +80,9 @@ * the usable vector space is 0x20-0xff (224 vectors) */ -#define NR_IPIS 8 - -#define RESCHEDULE_VECTOR 1 -#define INVALIDATE_TLB_VECTOR 2 -#define CALL_FUNCTION_VECTOR 3 +#define RESCHEDULE_VECTOR 0 +#define CALL_FUNCTION_VECTOR 1 +#define NR_IPIS 2 /* * The maximum number of vectors supported by i386 processors diff -r c317e0aca9f1 -r 805ee053e61f linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h Thu Sep 29 10:10:27 2005 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h Thu Sep 29 12:05:43 2005 @@ -78,11 +78,9 @@ * the usable vector space is 0x20-0xff (224 vectors) */ -#define NR_IPIS 8 - -#define RESCHEDULE_VECTOR 1 -#define INVALIDATE_TLB_VECTOR 2 -#define CALL_FUNCTION_VECTOR 3 +#define RESCHEDULE_VECTOR 0 +#define CALL_FUNCTION_VECTOR 1 +#define NR_IPIS 2 /* * The maximum number of vectors supported by i386 processors diff -r c317e0aca9f1 -r 805ee053e61f linux-2.6-xen-sparse/include/asm-xen/evtchn.h --- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h Thu Sep 29 10:10:27 2005 +++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h Thu Sep 29 12:05:43 2005 @@ -68,7 +68,6 @@ void *dev_id); extern void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id); -extern void irq_suspend(void); extern void irq_resume(void); /* Entry point for notifications into Linux subsystems. */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |