[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 7/7] x86/vioapic: allow PVHv2 Dom0 to have more than one IO APIC
The base address, id and number of pins of the vIO APICs exposed to PVHv2 Dom0 is the same as the values found on bare metal. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/dom0_build.c | 33 ++++++++++++--------------------- xen/arch/x86/hvm/hvm.c | 8 +++++--- xen/arch/x86/hvm/vioapic.c | 30 ++++++++++++++++++++++++------ 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index daa791d3f4..db9be87612 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -681,12 +681,7 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr) max_vcpus = dom0_max_vcpus(); /* Calculate the size of the crafted MADT. */ size = sizeof(*madt); - /* - * FIXME: the current vIO-APIC code just supports one IO-APIC instance - * per domain. This must be fixed in order to provide the same amount of - * IO APICs as available on bare metal. - */ - size += sizeof(*io_apic); + size += sizeof(*io_apic) * nr_ioapics; size += sizeof(*intsrcovr) * acpi_intr_overrides; size += sizeof(*nmisrc) * acpi_nmi_sources; size += sizeof(*x2apic) * max_vcpus; @@ -716,23 +711,19 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr) */ madt->header.revision = min_t(unsigned char, table->revision, 4); - /* - * Setup the IO APIC entry. - * FIXME: the current vIO-APIC code just supports one IO-APIC instance - * per domain. This must be fixed in order to provide the same amount of - * IO APICs as available on bare metal, and with the same IDs as found in - * the native IO APIC MADT entries. - */ - if ( nr_ioapics > 1 ) - printk("WARNING: found %d IO APICs, Dom0 will only have access to 1 emulated IO APIC\n", - nr_ioapics); + /* Setup the IO APIC entries. */ io_apic = (void *)(madt + 1); - io_apic->header.type = ACPI_MADT_TYPE_IO_APIC; - io_apic->header.length = sizeof(*io_apic); - io_apic->id = domain_vioapic(d, 0)->id; - io_apic->address = VIOAPIC_DEFAULT_BASE_ADDRESS; + for ( i = 0; i < nr_ioapics; i++ ) + { + io_apic->header.type = ACPI_MADT_TYPE_IO_APIC; + io_apic->header.length = sizeof(*io_apic); + io_apic->id = domain_vioapic(d, i)->id; + io_apic->address = domain_vioapic(d, i)->base_address; + io_apic->global_irq_base = io_apic_gsi_base(i); + io_apic++; + } - x2apic = (void *)(io_apic + 1); + x2apic = (void *)io_apic; for ( i = 0; i < max_vcpus; i++ ) { x2apic->header.type = ACPI_MADT_TYPE_LOCAL_X2APIC; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 9a6cd9c9bf..322b3b8235 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -595,6 +595,7 @@ static int hvm_print_line( int hvm_domain_initialise(struct domain *d) { + unsigned int nr_gsis; int rc; if ( !hvm_enabled ) @@ -615,19 +616,20 @@ int hvm_domain_initialise(struct domain *d) if ( rc != 0 ) goto fail0; + nr_gsis = is_hardware_domain(d) ? nr_irqs_gsi : VIOAPIC_NUM_PINS; d->arch.hvm_domain.pl_time = xzalloc(struct pl_time); d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS); d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler, NR_IO_HANDLERS); - d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(VIOAPIC_NUM_PINS)); + d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(nr_gsis)); rc = -ENOMEM; if ( !d->arch.hvm_domain.pl_time || !d->arch.hvm_domain.irq || !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler ) goto fail1; - /* Set the default number of GSIs */ - hvm_domain_irq(d)->nr_gsis = VIOAPIC_NUM_PINS; + /* Set the number of GSIs */ + hvm_domain_irq(d)->nr_gsis = nr_gsis; /* need link to containing domain */ d->arch.hvm_domain.pl_time->domain = d; diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index 327a9758e0..c349a3ee61 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -533,9 +533,19 @@ void vioapic_reset(struct domain *d) memset(vioapic, 0, hvm_vioapic_size(nr_pins)); for ( j = 0; j < nr_pins; j++ ) vioapic->redirtbl[j].fields.mask = 1; - vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS + - VIOAPIC_MEM_LENGTH * i; - vioapic->id = i; + + if ( !is_hardware_domain(d) ) + { + vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS + + VIOAPIC_MEM_LENGTH * i; + vioapic->id = i; + } + else + { + vioapic->base_address = mp_ioapics[i].mpc_apicaddr; + vioapic->id = mp_ioapics[i].mpc_apicid; + } + vioapic->nr_pins = nr_pins; vioapic->domain = d; } @@ -556,7 +566,7 @@ static void vioapic_free(const struct domain *d, unsigned int nr_vioapics) int vioapic_init(struct domain *d) { - unsigned int i, nr_vioapics = 1; + unsigned int i, nr_vioapics, nr_gsis = 0; if ( !has_vioapic(d) ) { @@ -564,6 +574,8 @@ int vioapic_init(struct domain *d) return 0; } + nr_vioapics = is_hardware_domain(d) ? nr_ioapics : 1; + if ( (d->arch.hvm_domain.vioapic == NULL) && ((d->arch.hvm_domain.vioapic = xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) ) @@ -571,15 +583,21 @@ int vioapic_init(struct domain *d) for ( i = 0; i < nr_vioapics; i++ ) { + unsigned int nr_pins = is_hardware_domain(d) ? nr_ioapic_entries[i] + : VIOAPIC_NUM_PINS; + if ( (domain_vioapic(d, i) = - xmalloc_bytes(hvm_vioapic_size(VIOAPIC_NUM_PINS))) == NULL ) + xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL ) { vioapic_free(d, nr_vioapics); return -ENOMEM; } - domain_vioapic(d, i)->nr_pins = VIOAPIC_NUM_PINS; + domain_vioapic(d, i)->nr_pins = nr_pins; + nr_gsis += nr_pins; } + ASSERT(hvm_domain_irq(d)->nr_gsis == nr_gsis); + d->arch.hvm_domain.nr_vioapics = nr_vioapics; vioapic_reset(d); -- 2.12.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |