[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 3/7] x86/hvm: convert gsi_assert_count into a variable size array
Rearrange the fields of hvm_irq so that gsi_assert_count can be converted into a variable size array and add a new field to account the number of GSIs. Due to this changes the irq member in the hvm_domain struct also needs to become a pointer set at runtime. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 13 +++++++++++-- xen/arch/x86/hvm/irq.c | 27 +++++++++++++++++++++------ xen/drivers/passthrough/io.c | 8 ++++---- xen/drivers/passthrough/pci.c | 2 +- xen/include/asm-x86/domain.h | 2 +- xen/include/asm-x86/hvm/domain.h | 2 +- xen/include/asm-x86/hvm/irq.h | 28 +++++++++++++++------------- 7 files changed, 54 insertions(+), 28 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 0282986738..9a6cd9c9bf 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -457,7 +457,7 @@ void hvm_migrate_pirqs(struct vcpu *v) { struct domain *d = v->domain; - if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci ) + if ( !iommu_enabled || !d->arch.hvm_domain.irq->dpci ) return; spin_lock(&d->event_lock); @@ -619,11 +619,16 @@ int hvm_domain_initialise(struct domain *d) d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler, NR_IO_HANDLERS); + d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(VIOAPIC_NUM_PINS)); + rc = -ENOMEM; - if ( !d->arch.hvm_domain.pl_time || + if ( !d->arch.hvm_domain.pl_time || !d->arch.hvm_domain.irq || !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ) goto fail1; + /* Set the default number of GSIs */ + hvm_domain_irq(d)->nr_gsis = VIOAPIC_NUM_PINS; + /* need link to containing domain */ d->arch.hvm_domain.pl_time->domain = d; @@ -680,6 +685,7 @@ int hvm_domain_initialise(struct domain *d) xfree(d->arch.hvm_domain.io_handler); xfree(d->arch.hvm_domain.params); xfree(d->arch.hvm_domain.pl_time); + xfree(d->arch.hvm_domain.irq); fail0: hvm_destroy_cacheattr_region_list(d); return rc; @@ -722,6 +728,9 @@ void hvm_domain_destroy(struct domain *d) xfree(d->arch.hvm_domain.pl_time); d->arch.hvm_domain.pl_time = NULL; + + xfree(d->arch.hvm_domain.irq); + d->arch.hvm_domain.irq = NULL; } static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h) diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index c2951ccf8a..6e67cae9bd 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -69,6 +69,7 @@ static void __hvm_pci_intx_assert( return; gsi = hvm_pci_intx_gsi(device, intx); + ASSERT(gsi < hvm_irq->nr_gsis); if ( hvm_irq->gsi_assert_count[gsi]++ == 0 ) assert_gsi(d, gsi); @@ -99,6 +100,7 @@ static void __hvm_pci_intx_deassert( return; gsi = hvm_pci_intx_gsi(device, intx); + ASSERT(gsi < hvm_irq->nr_gsis); --hvm_irq->gsi_assert_count[gsi]; link = hvm_pci_intx_link(device, intx); @@ -122,7 +124,7 @@ void hvm_isa_irq_assert( struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); - ASSERT(isa_irq <= 15); + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); spin_lock(&d->arch.hvm_domain.irq_lock); @@ -139,7 +141,7 @@ void hvm_isa_irq_deassert( struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); - ASSERT(isa_irq <= 15); + ASSERT(isa_irq <= 15 && isa_irq < hvm_irq->nr_gsis); spin_lock(&d->arch.hvm_domain.irq_lock); @@ -363,7 +365,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via) { case HVMIRQ_callback_gsi: gsi = hvm_irq->callback_via.gsi = (uint8_t)via; - if ( (gsi == 0) || (gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count)) ) + if ( (gsi == 0) || (gsi >= hvm_irq->nr_gsis) ) hvm_irq->callback_via_type = HVMIRQ_callback_none; else if ( hvm_irq->callback_via_asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) ) @@ -419,9 +421,9 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) if ( unlikely(v->mce_pending) ) return hvm_intack_mce; - if ( (plat->irq.callback_via_type == HVMIRQ_callback_vector) + if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector) && vcpu_info(v, evtchn_upcall_pending) ) - return hvm_intack_vector(plat->irq.callback_via.vector); + return hvm_intack_vector(plat->irq->callback_via.vector); if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output ) return hvm_intack_pic(0); @@ -495,7 +497,7 @@ static void irq_dump(struct domain *d) (uint32_t) hvm_irq->isa_irq.pad[0], hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); - for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 ) + for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", i, i+7, @@ -507,6 +509,13 @@ static void irq_dump(struct domain *d) hvm_irq->gsi_assert_count[i+5], hvm_irq->gsi_assert_count[i+6], hvm_irq->gsi_assert_count[i+7]); + if ( i != hvm_irq->nr_gsis ) + { + printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); + for ( ; i < hvm_irq->nr_gsis; i++) + printk(" %2.2"PRIu8, hvm_irq->gsi_assert_count[i]); + printk("\n"); + } printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", hvm_irq->pci_link_assert_count[0], hvm_irq->pci_link_assert_count[1], @@ -545,6 +554,9 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) unsigned int asserted, pdev, pintx; int rc; + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) + return -EOPNOTSUPP; + spin_lock(&d->arch.hvm_domain.irq_lock); pdev = hvm_irq->callback_via.pci.dev; @@ -592,6 +604,9 @@ static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) struct hvm_irq *hvm_irq = hvm_domain_irq(d); int link, dev, intx, gsi; + if ( hvm_irq->nr_gsis != VIOAPIC_NUM_PINS ) + return -EOPNOTSUPP; + /* Load the PCI IRQ lines */ if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 ) return -EINVAL; diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c index 080183ea31..50e2f00214 100644 --- a/xen/drivers/passthrough/io.c +++ b/xen/drivers/passthrough/io.c @@ -195,7 +195,7 @@ struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *d) if ( !d || !is_hvm_domain(d) ) return NULL; - return d->arch.hvm_domain.irq.dpci; + return d->arch.hvm_domain.irq->dpci; } void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci) @@ -333,7 +333,7 @@ int pt_irq_create_bind( for ( i = 0; i < NR_HVM_IRQS; i++ ) INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]); - d->arch.hvm_domain.irq.dpci = hvm_irq_dpci; + d->arch.hvm_domain.irq->dpci = hvm_irq_dpci; } info = pirq_get_info(d, pirq); @@ -788,7 +788,7 @@ static int _hvm_dpci_msi_eoi(struct domain *d, void hvm_dpci_msi_eoi(struct domain *d, int vector) { - if ( !iommu_enabled || !d->arch.hvm_domain.irq.dpci ) + if ( !iommu_enabled || !d->arch.hvm_domain.irq->dpci ) return; spin_lock(&d->event_lock); @@ -798,7 +798,7 @@ void hvm_dpci_msi_eoi(struct domain *d, int vector) static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci) { - if ( unlikely(!d->arch.hvm_domain.irq.dpci) ) + if ( unlikely(!d->arch.hvm_domain.irq->dpci) ) { ASSERT_UNREACHABLE(); return; diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c index beddd42701..b5b865a2d4 100644 --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -815,7 +815,7 @@ static int pci_clean_dpci_irqs(struct domain *d) return ret; } - d->arch.hvm_domain.irq.dpci = NULL; + d->arch.hvm_domain.irq->dpci = NULL; free_hvm_irq_dpci(hvm_irq_dpci); } spin_unlock(&d->event_lock); diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index ec14cce81f..0b7e43fa16 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -17,7 +17,7 @@ #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain)) #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \ - d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) + d->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector) #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain)) #define is_domain_direct_mapped(d) ((void)(d), 0) diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index 420cbdc609..c3cca94a97 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -125,7 +125,7 @@ struct hvm_domain { /* Lock protects access to irq, vpic and vioapic. */ spinlock_t irq_lock; - struct hvm_irq irq; + struct hvm_irq *irq; struct hvm_hw_vpic vpic[2]; /* 0=master; 1=slave */ struct hvm_vioapic *vioapic; struct hvm_hw_stdvga stdvga; diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h index 17a957d4b5..7d45293aed 100644 --- a/xen/include/asm-x86/hvm/irq.h +++ b/xen/include/asm-x86/hvm/irq.h @@ -67,18 +67,6 @@ struct hvm_irq { u8 pci_link_assert_count[4]; /* - * Number of wires asserting each GSI. - * - * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space - * except ISA IRQ 0, which is connected to GSI 2. - * PCI links map into this space via the PCI-ISA bridge. - * - * GSIs 16+ are used only be PCI devices. The mapping from PCI device to - * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 - */ - u8 gsi_assert_count[VIOAPIC_NUM_PINS]; - - /* * GSIs map onto PIC/IO-APIC in the usual way: * 0-7: Master 8259 PIC, IO-APIC pins 0-7 * 8-15: Slave 8259 PIC, IO-APIC pins 8-15 @@ -89,13 +77,27 @@ struct hvm_irq { u8 round_robin_prev_vcpu; struct hvm_irq_dpci *dpci; + + /* + * Number of wires asserting each GSI. + * + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space + * except ISA IRQ 0, which is connected to GSI 2. + * PCI links map into this space via the PCI-ISA bridge. + * + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 + */ + unsigned int nr_gsis; + u8 gsi_assert_count[]; }; #define hvm_pci_intx_gsi(dev, intx) \ (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16) #define hvm_pci_intx_link(dev, intx) \ (((dev) + (intx)) & 3) -#define hvm_domain_irq(d) (&(d)->arch.hvm_domain.irq) +#define hvm_domain_irq(d) ((d)->arch.hvm_domain.irq) +#define hvm_irq_size(cnt) offsetof(struct hvm_irq, gsi_assert_count[cnt]) #define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2) -- 2.12.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |