[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 04/12] pvh/acpi: Handle ACPI accesses for PVH guests
Subsequent domctl access VCPU map will use the same code. We create acpi_cpumap_access_common() routines in anticipation of these changes. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- Changes in v6: * ACPI registers are only accessed by guest code (not by domctl), thus acpi_access_common() is no longer needed * Adjusted access direction (RW) to be a boolean. * Dropped unnecessary masking of status register xen/arch/x86/hvm/acpi.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++- xen/common/domain.c | 1 + xen/common/domctl.c | 5 +++ xen/include/xen/sched.h | 3 ++ 4 files changed, 117 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/hvm/acpi.c b/xen/arch/x86/hvm/acpi.c index 15a9a0e..f0a84f9 100644 --- a/xen/arch/x86/hvm/acpi.c +++ b/xen/arch/x86/hvm/acpi.c @@ -2,12 +2,43 @@ * * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved. */ +#include <xen/acpi.h> #include <xen/errno.h> #include <xen/lib.h> #include <xen/sched.h> #include <public/arch-x86/xen.h> +static int acpi_cpumap_access_common(struct domain *d, bool is_write, + unsigned int port, + unsigned int bytes, uint32_t *val) +{ + unsigned int first_byte = port - XEN_ACPI_CPU_MAP; + + BUILD_BUG_ON(XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN + > ACPI_GPE0_BLK_ADDRESS_V1); + + if ( !is_write ) + { + uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0; + + /* + * Clear bits that we are about to read to in case we + * copy fewer than @bytes. + */ + *val &= mask; + + if ( ((d->max_vcpus + 7) / 8) > first_byte ) + memcpy(val, (uint8_t *)d->avail_vcpus + first_byte, + min(bytes, ((d->max_vcpus + 7) / 8) - first_byte)); + } + else + /* Guests do not write CPU map */ + return X86EMUL_UNHANDLEABLE; + + return X86EMUL_OKAY; +} + int hvm_acpi_domctl_access(struct domain *d, const struct xen_domctl_acpi_access *access) { @@ -17,13 +48,88 @@ int hvm_acpi_domctl_access(struct domain *d, static int acpi_cpumap_guest_access(int dir, unsigned int port, unsigned int bytes, uint32_t *val) { - return X86EMUL_UNHANDLEABLE; + return acpi_cpumap_access_common(current->domain, + (dir == IOREQ_WRITE) ? true : false, + port, bytes, val); } static int acpi_guest_access(int dir, unsigned int port, unsigned int bytes, uint32_t *val) { - return X86EMUL_UNHANDLEABLE; + struct domain *d = current->domain; + uint16_t *sts = NULL, *en = NULL; + const uint16_t *mask_en = NULL; + static const uint16_t pm1a_en_mask = ACPI_BITMASK_GLOBAL_LOCK_ENABLE; + static const uint16_t gpe0_en_mask = 1U << XEN_ACPI_GPE0_CPUHP_BIT; + + ASSERT(!has_acpi_dm_ff(d)); + + switch ( port ) + { + case ACPI_PM1A_EVT_BLK_ADDRESS_V1 ... + ACPI_PM1A_EVT_BLK_ADDRESS_V1 + + sizeof(d->arch.hvm_domain.acpi.pm1a_sts) + + sizeof(d->arch.hvm_domain.acpi.pm1a_en): + + sts = &d->arch.hvm_domain.acpi.pm1a_sts; + en = &d->arch.hvm_domain.acpi.pm1a_en; + mask_en = &pm1a_en_mask; + break; + + case ACPI_GPE0_BLK_ADDRESS_V1 ... + ACPI_GPE0_BLK_ADDRESS_V1 + + sizeof(d->arch.hvm_domain.acpi.gpe0_sts) + + sizeof(d->arch.hvm_domain.acpi.gpe0_en): + + sts = &d->arch.hvm_domain.acpi.gpe0_sts; + en = &d->arch.hvm_domain.acpi.gpe0_en; + mask_en = &gpe0_en_mask; + break; + + default: + return X86EMUL_UNHANDLEABLE; + } + + if ( dir == IOREQ_READ ) + { + uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0; + uint32_t data = (((uint32_t)*en) << 16) | *sts; + + data >>= 8 * (port & 3); + *val = (*val & mask) | (data & ~mask); + } + else + { + uint32_t v = *val; + + /* Status register is write-1-to-clear */ + switch ( port & 3 ) + { + case 0: + *sts &= ~(v & 0xff); + if ( !--bytes ) + break; + v >>= 8; + /* fallthrough */ + case 1: + *sts &= ~((v & 0xff) << 8); + if ( !--bytes ) + break; + v >>= 8; + /* fallthrough */ + case 2: + *en = ((*en & 0xff00) | (v & 0xff)) & *mask_en; + if ( !--bytes ) + break; + v >>= 8; + /* fallthrough */ + case 3: + *en = (((v & 0xff) << 8) | (*en & 0xff)) & *mask_en; + break; + } + } + + return X86EMUL_OKAY; } void hvm_acpi_init(struct domain *d) diff --git a/xen/common/domain.c b/xen/common/domain.c index 05130e2..ca1f0ed 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -847,6 +847,7 @@ static void complete_domain_destroy(struct rcu_head *head) xsm_free_security_domain(d); free_cpumask_var(d->domain_dirty_cpumask); xfree(d->vcpu); + xfree(d->avail_vcpus); free_domain_struct(d); send_global_virq(VIRQ_DOM_EXC); diff --git a/xen/common/domctl.c b/xen/common/domctl.c index b0ee961..0a08b83 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -651,6 +651,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) goto maxvcpu_out; } + d->avail_vcpus = xzalloc_array(unsigned long, + BITS_TO_LONGS(d->max_vcpus)); + if ( !d->avail_vcpus ) + goto maxvcpu_out; + ret = 0; maxvcpu_out: diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 063efe6..bee190f 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -315,6 +315,9 @@ struct domain unsigned int max_vcpus; struct vcpu **vcpu; + /* Bitmap of available VCPUs. */ + unsigned long *avail_vcpus; + shared_info_t *shared_info; /* shared data area */ spinlock_t domain_lock; -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |