[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 07/12] pvh: Send an SCI on VCPU hotplug event
Send and SCI when VCPU map is updated by domctl or when guest sets GPE0 enable bit and status bit is already set. Also update send_guest_global_virq() to handle cases when VCPU0 is offlined. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- Changes in v6: * Change conditions causing the SCI to be generated: - domctl write to VCPU map - Enabling a pending GPE0 event xen/arch/x86/hvm/acpi.c | 20 ++++++++++++++++++++ xen/common/event_channel.c | 7 +++++-- xen/include/xen/domain.h | 1 + xen/include/xen/event.h | 8 ++++++++ 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/hvm/acpi.c b/xen/arch/x86/hvm/acpi.c index 9f0578e..946640e 100644 --- a/xen/arch/x86/hvm/acpi.c +++ b/xen/arch/x86/hvm/acpi.c @@ -4,6 +4,7 @@ */ #include <xen/acpi.h> #include <xen/errno.h> +#include <xen/event.h> #include <xen/lib.h> #include <xen/sched.h> @@ -85,6 +86,17 @@ int hvm_acpi_domctl_access(struct domain *d, return -EFAULT; } + /* + * For simplicity don't verify whether CPU map changed and + * always send an SCI on a write (provided it's enabled). + */ + if ( is_write ) + { + d->arch.hvm_domain.acpi.gpe0_sts |= 1U << XEN_ACPI_GPE0_CPUHP_BIT; + if ( d->arch.hvm_domain.acpi.gpe0_en & (1U << XEN_ACPI_GPE0_CPUHP_BIT) ) + send_guest_global_virq(d, VIRQ_SCI); + } + return 0; } @@ -144,6 +156,7 @@ static int acpi_guest_access(int dir, unsigned int port, else { uint32_t v = *val; + uint16_t en_orig = *en; /* Status register is write-1-to-clear */ switch ( port & 3 ) @@ -170,6 +183,13 @@ static int acpi_guest_access(int dir, unsigned int port, *en = (((v & 0xff) << 8) | (*en & 0xff)) & *mask_en; break; } + + /* + * If an event became enabled and corresponding status bit is set + * then send an SCI to the guest. + */ + if ( (*en & ~en_orig) & *sts ) + send_guest_global_virq(d, VIRQ_SCI); } return X86EMUL_OKAY; diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 638dc5e..1d77373 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -727,7 +727,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq) spin_unlock_irqrestore(&v->virq_lock, flags); } -static void send_guest_global_virq(struct domain *d, uint32_t virq) +void send_guest_global_virq(struct domain *d, uint32_t virq) { unsigned long flags; int port; @@ -739,7 +739,10 @@ static void send_guest_global_virq(struct domain *d, uint32_t virq) if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) ) return; - v = d->vcpu[0]; + /* Send to first available VCPU */ + for_each_vcpu(d, v) + if ( is_vcpu_online(v) ) + break; if ( unlikely(v == NULL) ) return; diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index bce0ea1..b386038 100644 --- a/xen/include/xen/domain.h +++ b/xen/include/xen/domain.h @@ -52,6 +52,7 @@ void vcpu_destroy(struct vcpu *v); int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset); void unmap_vcpu_info(struct vcpu *v); +int arch_update_avail_vcpus(struct domain *d); int arch_domain_create(struct domain *d, unsigned int domcr_flags, struct xen_arch_domainconfig *config); diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h index 5008c80..74bd605 100644 --- a/xen/include/xen/event.h +++ b/xen/include/xen/event.h @@ -23,6 +23,14 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq); /* + * send_guest_global_virq: Notify guest via a global VIRQ. + * @d: domain to which virtual IRQ should be sent. First + * online VCPU will be selected. + * @virq: Virtual IRQ number (VIRQ_*) + */ +void send_guest_global_virq(struct domain *d, uint32_t virq); + +/* * send_global_virq: Notify the domain handling a global VIRQ. * @virq: Virtual IRQ number (VIRQ_*) */ -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |