[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 08/15] pvh/acpi: Handle ACPI accesses for PVH guests



Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
Changes in v4:
* Registers are now explicitly split into 2-byte status and enable.

 xen/arch/x86/hvm/acpi.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++-
 xen/common/domctl.c     |   5 ++
 xen/include/xen/sched.h |   3 ++
 3 files changed, 129 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/hvm/acpi.c b/xen/arch/x86/hvm/acpi.c
index a1f7fd2..c80c464 100644
--- a/xen/arch/x86/hvm/acpi.c
+++ b/xen/arch/x86/hvm/acpi.c
@@ -2,12 +2,129 @@
  *
  * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
  */
+#include <xen/acpi.h>
 #include <xen/errno.h>
 #include <xen/lib.h>
 #include <xen/sched.h>
 
 #include <public/arch-x86/xen.h>
 
+static int acpi_access_common(struct domain *d,
+                              int dir, unsigned int port,
+                              unsigned int bytes, uint32_t *val)
+{
+    uint16_t *sts = NULL, *en = NULL;
+    const uint16_t *mask_sts = NULL, *mask_en = NULL;
+    static const uint16_t pm1a_sts_mask = ACPI_BITMASK_GLOBAL_LOCK_STATUS;
+    static const uint16_t pm1a_en_mask = ACPI_BITMASK_GLOBAL_LOCK_ENABLE;
+    static const uint16_t gpe0_sts_mask = 1U << XEN_GPE0_CPUHP_BIT;
+    static const uint16_t gpe0_en_mask = 1U << XEN_GPE0_CPUHP_BIT;
+
+    BUILD_BUG_ON(XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN
+                 >= ACPI_GPE0_BLK_ADDRESS_V1);
+
+    ASSERT(!has_acpi_dm_ff(d));
+
+    switch ( port )
+    {
+    case ACPI_PM1A_EVT_BLK_ADDRESS_V1 ...
+        ACPI_PM1A_EVT_BLK_ADDRESS_V1 +
+        sizeof (d->arch.hvm_domain.acpi.pm1a_sts) +
+        sizeof (d->arch.hvm_domain.acpi.pm1a_en):
+
+        sts = &d->arch.hvm_domain.acpi.pm1a_sts;
+        en = &d->arch.hvm_domain.acpi.pm1a_en;
+        mask_sts = &pm1a_sts_mask;
+        mask_en = &pm1a_en_mask;
+        break;
+
+    case ACPI_GPE0_BLK_ADDRESS_V1 ...
+        ACPI_GPE0_BLK_ADDRESS_V1 +
+        sizeof (d->arch.hvm_domain.acpi.gpe0_sts) +
+        sizeof (d->arch.hvm_domain.acpi.gpe0_en):
+
+        sts = &d->arch.hvm_domain.acpi.gpe0_sts;
+        en = &d->arch.hvm_domain.acpi.gpe0_en;
+        mask_sts = &gpe0_sts_mask;
+        mask_en = &gpe0_en_mask;
+        break;
+
+    case XEN_ACPI_CPU_MAP ...
+         XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN - 1:
+        break;
+
+    default:
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    if ( dir == XEN_DOMCTL_ACPI_READ )
+    {
+        uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
+
+        if ( !mask_sts )
+        {
+            unsigned int first_byte = port - XEN_ACPI_CPU_MAP;
+
+            /*
+             * Clear bits that we are about to read to in case we
+             * copy fewer than @bytes.
+             */
+            *val &= mask;
+
+            if ( ((d->max_vcpus + 7) / 8) > first_byte )
+            {
+                memcpy(val, (uint8_t *)d->avail_vcpus + first_byte,
+                       min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
+            }
+        }
+        else
+        {
+            uint32_t data = (((uint32_t)*en) << 16) | *sts;
+            data >>= 8 * (port & 3);
+            *val = (*val & mask) | (data & ~mask);
+        }
+    }
+    else
+    {
+        /* Guests do not write CPU map */
+        if ( !mask_sts )
+            return X86EMUL_UNHANDLEABLE;
+        else if ( mask_sts )
+        {
+            uint32_t v = *val;
+
+            /* Status register is write-1-to-clear by guests */
+            switch ( port & 3 )
+            {
+            case 0:
+                *sts &= ~(v & 0xff);
+                *sts &= *mask_sts;
+                if ( !--bytes )
+                    break;
+                v >>= 8;
+
+            case 1:
+                *sts &= ~((v & 0xff) << 8);
+                *sts &= *mask_sts;
+                if ( !--bytes )
+                    break;
+                v >>= 8;
+
+            case 2:
+                *en = ((*en & 0xff00) | (v & 0xff)) & *mask_en;
+                if ( !--bytes )
+                    break;
+                v >>= 8;
+
+            case 3:
+                *en = (((v & 0xff) << 8) | (*en & 0xff)) & *mask_en;
+            }
+        }
+    }
+
+    return X86EMUL_OKAY;
+}
+
 int hvm_acpi_domctl_access(struct domain *d, uint8_t rw, gas_t *gas,
                            XEN_GUEST_HANDLE_PARAM(uint8) arg)
 {
@@ -17,7 +134,10 @@ int hvm_acpi_domctl_access(struct domain *d, uint8_t rw, 
gas_t *gas,
 static int acpi_guest_access(int dir, unsigned int port,
                              unsigned int bytes, uint32_t *val)
 {
-    return X86EMUL_UNHANDLEABLE;
+    return  acpi_access_common(current->domain,
+                               (dir == IOREQ_READ) ?
+                               XEN_DOMCTL_ACPI_READ: XEN_DOMCTL_ACPI_WRITE,
+                               port, bytes, val);
 }
 
 void hvm_acpi_init(struct domain *d)
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index b0ee961..0a08b83 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -651,6 +651,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
                 goto maxvcpu_out;
         }
 
+        d->avail_vcpus = xzalloc_array(unsigned long,
+                                       BITS_TO_LONGS(d->max_vcpus));
+        if ( !d->avail_vcpus )
+            goto maxvcpu_out;
+
         ret = 0;
 
     maxvcpu_out:
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 1fbda87..3ef9c9e 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -315,6 +315,9 @@ struct domain
     unsigned int     max_vcpus;
     struct vcpu    **vcpu;
 
+    /* Bitmap of available VCPUs. */
+    unsigned long   *avail_vcpus;
+
     shared_info_t   *shared_info;     /* shared data area */
 
     spinlock_t       domain_lock;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.