[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86/IRQ: pass CPU masks by reference rather than by value in more places
Additionally simplify operations on them in a few cases. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- a/xen/arch/x86/genapic/delivery.c +++ b/xen/arch/x86/genapic/delivery.c @@ -26,19 +26,19 @@ void clustered_apic_check_flat(void) printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics); } -cpumask_t target_cpus_flat(void) +const cpumask_t *target_cpus_flat(void) { - return cpu_online_map; + return &cpu_online_map; } -cpumask_t vector_allocation_cpumask_flat(int cpu) +const cpumask_t *vector_allocation_cpumask_flat(int cpu) { - return cpu_online_map; + return &cpu_online_map; } -unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask) +unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask) { - return cpus_addr(cpumask)[0]&0xFF; + return cpus_addr(*cpumask)[0]&0xFF; } /* @@ -59,18 +59,18 @@ void clustered_apic_check_phys(void) printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics); } -cpumask_t target_cpus_phys(void) +const cpumask_t *target_cpus_phys(void) { - return cpu_online_map; + return &cpu_online_map; } -cpumask_t vector_allocation_cpumask_phys(int cpu) +const cpumask_t *vector_allocation_cpumask_phys(int cpu) { - return cpumask_of_cpu(cpu); + return cpumask_of(cpu); } -unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask) +unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask) { /* As we are using single CPU as destination, pick only one CPU here */ - return cpu_physical_id(first_cpu(cpumask)); + return cpu_physical_id(cpumask_first(cpumask)); } --- a/xen/arch/x86/genapic/x2apic.c +++ b/xen/arch/x86/genapic/x2apic.c @@ -89,24 +89,24 @@ void clustered_apic_check_x2apic(void) return; } -cpumask_t target_cpus_x2apic(void) +const cpumask_t *target_cpus_x2apic(void) { - return cpu_online_map; + return &cpu_online_map; } -cpumask_t vector_allocation_cpumask_x2apic(int cpu) +const cpumask_t *vector_allocation_cpumask_x2apic(int cpu) { - return cpumask_of_cpu(cpu); + return cpumask_of(cpu); } -unsigned int cpu_mask_to_apicid_x2apic_phys(cpumask_t cpumask) +unsigned int cpu_mask_to_apicid_x2apic_phys(const cpumask_t *cpumask) { - return cpu_physical_id(first_cpu(cpumask)); + return cpu_physical_id(cpumask_first(cpumask)); } -unsigned int cpu_mask_to_apicid_x2apic_cluster(cpumask_t cpumask) +unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask) { - return cpu_2_logical_apicid[first_cpu(cpumask)]; + return cpu_2_logical_apicid[cpumask_first(cpumask)]; } static void __send_IPI_mask_x2apic( --- a/xen/arch/x86/hpet.c +++ b/xen/arch/x86/hpet.c @@ -336,7 +336,7 @@ static void hpet_msi_set_affinity(unsign struct irq_desc * desc = irq_to_desc(irq); struct irq_cfg *cfg= desc->chip_data; - dest = set_desc_affinity(desc, mask); + dest = set_desc_affinity(desc, &mask); if (dest == BAD_APICID) return; --- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -460,7 +460,7 @@ void irq_complete_move(struct irq_desc * send_cleanup_vector(cfg); } -unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask) +unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask) { struct irq_cfg *cfg; unsigned int irq; @@ -468,7 +468,7 @@ unsigned int set_desc_affinity(struct ir unsigned long flags; cpumask_t dest_mask; - if (!cpus_intersects(mask, cpu_online_map)) + if (!cpus_intersects(*mask, cpu_online_map)) return BAD_APICID; irq = desc->irq; @@ -483,15 +483,14 @@ unsigned int set_desc_affinity(struct ir if (ret < 0) return BAD_APICID; - cpus_copy(desc->affinity, mask); - cpus_and(dest_mask, desc->affinity, cfg->cpu_mask); + cpus_copy(desc->affinity, *mask); + cpus_and(dest_mask, *mask, cfg->cpu_mask); - return cpu_mask_to_apicid(dest_mask); + return cpu_mask_to_apicid(&dest_mask); } static void -set_ioapic_affinity_irq_desc(struct irq_desc *desc, - const struct cpumask mask) +set_ioapic_affinity_irq_desc(struct irq_desc *desc, const cpumask_t *mask) { unsigned long flags; unsigned int dest; @@ -536,7 +535,7 @@ set_ioapic_affinity_irq(unsigned int irq desc = irq_to_desc(irq); - set_ioapic_affinity_irq_desc(desc, mask); + set_ioapic_affinity_irq_desc(desc, &mask); } #endif /* CONFIG_SMP */ @@ -992,7 +991,7 @@ static void __init setup_IO_APIC_irqs(vo } cfg = irq_cfg(irq); SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest, - cpu_mask_to_apicid(cfg->cpu_mask)); + cpu_mask_to_apicid(&cfg->cpu_mask)); spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); @@ -2434,7 +2434,7 @@ int ioapic_guest_write(unsigned long phy rte.vector = cfg->vector; SET_DEST(rte.dest.dest32, rte.dest.logical.logical_dest, - cpu_mask_to_apicid(cfg->cpu_mask)); + cpu_mask_to_apicid(&cfg->cpu_mask)); io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&rte) + 0)); io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&rte) + 1)); --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -330,7 +330,7 @@ hw_irq_controller no_irq_type = { atomic_t irq_err_count; -int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) +int __assign_irq_vector(int irq, struct irq_cfg *cfg, const cpumask_t *mask) { /* * NOTE! The local APIC isn't very good at handling @@ -350,9 +350,8 @@ int __assign_irq_vector(int irq, struct old_vector = irq_to_vector(irq); if (old_vector) { - cpus_and(tmp_mask, mask, cpu_online_map); - cpus_and(tmp_mask, cfg->cpu_mask, tmp_mask); - if (!cpus_empty(tmp_mask)) { + cpus_and(tmp_mask, *mask, cpu_online_map); + if (cpus_intersects(tmp_mask, cfg->cpu_mask)) { cfg->vector = old_vector; return 0; } @@ -361,16 +361,16 @@ int __assign_irq_vector(int irq, struct if ((cfg->move_in_progress) || cfg->move_cleanup_count) return -EAGAIN; - /* Only try and allocate irqs on cpus that are present */ - cpus_and(mask, mask, cpu_online_map); - err = -ENOSPC; - for_each_cpu_mask(cpu, mask) { + for_each_cpu_mask(cpu, *mask) { int new_cpu; int vector, offset; - tmp_mask = vector_allocation_cpumask(cpu); - cpus_and(tmp_mask, tmp_mask, cpu_online_map); + /* Only try and allocate irqs on cpus that are present. */ + if (!cpu_online(cpu)) + continue; + + cpus_and(tmp_mask, *vector_allocation_cpumask(cpu), cpu_online_map); vector = current_vector; offset = current_offset; @@ -1747,14 +1746,14 @@ void fixup_irqs(void) spin_lock(&desc->lock); affinity = desc->affinity; - if ( !desc->action || cpus_equal(affinity, cpu_online_map) ) + if ( !desc->action || cpus_subset(affinity, cpu_online_map) ) { spin_unlock(&desc->lock); continue; } cpus_and(affinity, affinity, cpu_online_map); - if ( any_online_cpu(affinity) == NR_CPUS ) + if ( cpus_empty(affinity) ) { break_affinity = 1; affinity = cpu_online_map; --- a/xen/arch/x86/msi.c +++ b/xen/arch/x86/msi.c @@ -134,8 +134,7 @@ void msi_compose_msg(struct pci_dev *pde } if ( vector ) { - - dest = cpu_mask_to_apicid(domain); + dest = cpu_mask_to_apicid(&domain); msg->address_hi = MSI_ADDR_BASE_HI; msg->address_lo = @@ -275,7 +274,7 @@ void set_msi_affinity(unsigned int irq, struct msi_desc *msi_desc = desc->msi_desc; struct irq_cfg *cfg = desc->chip_data; - dest = set_desc_affinity(desc, mask); + dest = set_desc_affinity(desc, &mask); if (dest == BAD_APICID || !msi_desc) return; --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -356,7 +356,7 @@ static void iommu_msi_set_affinity(unsig u8 dev = PCI_SLOT(iommu->bdf & 0xff); u8 func = PCI_FUNC(iommu->bdf & 0xff); - dest = set_desc_affinity(desc, mask); + dest = set_desc_affinity(desc, &mask); if (dest == BAD_APICID){ dprintk(XENLOG_ERR, "Set iommu interrupt affinity error!\n"); return; --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -1011,7 +1011,7 @@ static void dma_msi_set_affinity(unsigne struct irq_cfg *cfg = desc->chip_data; #ifdef CONFIG_X86 - dest = set_desc_affinity(desc, mask); + dest = set_desc_affinity(desc, &mask); if (dest == BAD_APICID){ dprintk(XENLOG_ERR VTDPREFIX, "Set iommu interrupt affinity error!\n"); return; --- a/xen/include/asm-x86/genapic.h +++ b/xen/include/asm-x86/genapic.h @@ -33,9 +33,9 @@ struct genapic { int int_dest_mode; void (*init_apic_ldr)(void); void (*clustered_apic_check)(void); - cpumask_t (*target_cpus)(void); - cpumask_t (*vector_allocation_cpumask)(int cpu); - unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); + const cpumask_t *(*target_cpus)(void); + const cpumask_t *(*vector_allocation_cpumask)(int cpu); + unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); void (*send_IPI_mask)(const cpumask_t *mask, int vector); void (*send_IPI_self)(int vector); }; @@ -54,11 +54,11 @@ extern const struct genapic apic_x2apic_ void init_apic_ldr_flat(void); void clustered_apic_check_flat(void); -cpumask_t target_cpus_flat(void); -unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask); +const cpumask_t *target_cpus_flat(void); +unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask); void send_IPI_mask_flat(const cpumask_t *mask, int vector); void send_IPI_self_flat(int vector); -cpumask_t vector_allocation_cpumask_flat(int cpu); +const cpumask_t *vector_allocation_cpumask_flat(int cpu); #define GENAPIC_FLAT \ .int_delivery_mode = dest_LowestPrio, \ .int_dest_mode = 1 /* logical delivery */, \ @@ -74,13 +74,13 @@ const struct genapic *apic_x2apic_probe( void init_apic_ldr_x2apic_phys(void); void init_apic_ldr_x2apic_cluster(void); void clustered_apic_check_x2apic(void); -cpumask_t target_cpus_x2apic(void); -unsigned int cpu_mask_to_apicid_x2apic_phys(cpumask_t cpumask); -unsigned int cpu_mask_to_apicid_x2apic_cluster(cpumask_t cpumask); +const cpumask_t *target_cpus_x2apic(void); +unsigned int cpu_mask_to_apicid_x2apic_phys(const cpumask_t *cpumask); +unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask); void send_IPI_mask_x2apic_phys(const cpumask_t *mask, int vector); void send_IPI_mask_x2apic_cluster(const cpumask_t *mask, int vector); void send_IPI_self_x2apic(int vector); -cpumask_t vector_allocation_cpumask_x2apic(int cpu); +const cpumask_t *vector_allocation_cpumask_x2apic(int cpu); #define GENAPIC_X2APIC_PHYS \ .int_delivery_mode = dest_Fixed, \ .int_dest_mode = 0 /* physical delivery */, \ @@ -105,11 +105,11 @@ cpumask_t vector_allocation_cpumask_x2ap void init_apic_ldr_phys(void); void clustered_apic_check_phys(void); -cpumask_t target_cpus_phys(void); -unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask); +const cpumask_t *target_cpus_phys(void); +unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask); void send_IPI_mask_phys(const cpumask_t *mask, int vector); void send_IPI_self_phys(int vector); -cpumask_t vector_allocation_cpumask_phys(int cpu); +const cpumask_t *vector_allocation_cpumask_phys(int cpu); #define GENAPIC_PHYS \ .int_delivery_mode = dest_Fixed, \ .int_dest_mode = 0 /* physical delivery */, \ --- a/xen/include/asm-x86/irq.h +++ b/xen/include/asm-x86/irq.h @@ -138,7 +138,7 @@ void unlock_vector_lock(void); void move_native_irq(int irq); void move_masked_irq(int irq); -int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); +int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *); int bind_irq_vector(int irq, int vector, cpumask_t domain); --- a/xen/include/xen/irq.h +++ b/xen/include/xen/irq.h @@ -143,16 +143,16 @@ extern void pirq_set_affinity(struct dom extern irq_desc_t *domain_spin_lock_irq_desc( struct domain *d, int irq, unsigned long *pflags); -static inline void set_native_irq_info(unsigned int irq, cpumask_t mask) +static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask) { - irq_desc[irq].affinity = mask; + irq_desc[irq].affinity = *mask; } static inline void set_irq_info(int irq, cpumask_t mask) { - set_native_irq_info(irq, mask); + set_native_irq_info(irq, &mask); } -unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask); +unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *); #endif /* __XEN_IRQ_H__ */ Attachment:
x86-irq-cpumask.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |