[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 08/11] pvh/acpi: Handle ACPI accesses for PVH guests



Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Changes in v3:
* Introduce a mask for pm1a and gpe0 that lists bits that a
  guest can operate on.
* Lots of small changes.

 xen/arch/x86/hvm/ioreq.c         | 87 +++++++++++++++++++++++++++++++++++++++-
 xen/include/asm-x86/hvm/domain.h |  6 +++
 2 files changed, 92 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 51bb399..4ab0d0a 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -16,6 +16,7 @@
  * this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <xen/acpi.h>
 #include <xen/config.h>
 #include <xen/ctype.h>
 #include <xen/init.h>
@@ -1383,7 +1384,91 @@ static int hvm_access_cf8(
 static int acpi_ioaccess(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
-    return X86EMUL_UNHANDLEABLE;
+    uint8_t *reg = NULL;
+    const uint8_t *mask = NULL;
+    bool is_cpu_map = false;
+    struct domain *currd = current->domain;
+    const static uint8_t pm1a_mask[4] = {ACPI_BITMASK_GLOBAL_LOCK_STATUS, 0,
+                                         ACPI_BITMASK_GLOBAL_LOCK_ENABLE, 0};
+    const static uint8_t gpe0_mask[4] = {1U << XEN_GPE0_CPUHP_BIT, 0,
+                                         1U << XEN_GPE0_CPUHP_BIT, 0};
+
+    BUILD_BUG_ON((ACPI_PM1A_EVT_BLK_LEN != 4) ||
+                 (ACPI_GPE0_BLK_LEN_V1 != 4));
+
+    ASSERT(!has_acpi_ff(currd));
+
+    switch ( port )
+    {
+    case ACPI_PM1A_EVT_BLK_ADDRESS_V1 ...
+         ACPI_PM1A_EVT_BLK_ADDRESS_V1 + ACPI_PM1A_EVT_BLK_LEN - 1:
+        reg = currd->arch.hvm_domain.acpi_io.pm1a;
+        mask = pm1a_mask;
+        break;
+
+    case ACPI_GPE0_BLK_ADDRESS_V1 ...
+         ACPI_GPE0_BLK_ADDRESS_V1 + ACPI_GPE0_BLK_LEN_V1 - 1:
+        reg = currd->arch.hvm_domain.acpi_io.gpe;
+        mask = gpe0_mask;
+        break;
+
+    case XEN_ACPI_CPU_MAP ...
+         XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN - 1:
+        is_cpu_map = true;
+        break;
+
+    default:
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    if ( bytes == 0 )
+        return X86EMUL_OKAY;
+
+    if ( dir == IOREQ_READ )
+    {
+        if ( is_cpu_map )
+        {
+            unsigned int first_byte = port - XEN_ACPI_CPU_MAP;
+
+            /*
+             * Clear bits that we are about to read to in case we
+             * copy fewer than @bytes.
+             */
+            *val &= (~((1ULL << (bytes * 8)) - 1)) & 0xffffffff;
+
+            if ( ((currd->max_vcpus + 7) / 8) > first_byte )
+            {
+                memcpy(val, (uint8_t *)currd->avail_vcpus + first_byte,
+                       min(bytes, ((currd->max_vcpus + 7) / 8) - first_byte));
+            }
+        }
+        else
+            memcpy(val, &reg[port & 3], bytes);
+    }
+    else
+    {
+        unsigned int idx = port & 3;
+        unsigned int i;
+        uint8_t *ptr;
+
+        if ( is_cpu_map )
+            /*
+             * CPU map is only read by DSDT's PRSC method and should never
+             * be written by a guest.
+             */
+            return X86EMUL_UNHANDLEABLE;
+
+        ptr = (uint8_t *)val;
+        for ( i = 0; i < bytes; i++, idx++ )
+        {
+            if ( idx < 2 ) /* status, write 1 to clear. */
+                reg[idx] &= ~(mask[i] & ptr[i]);
+            else           /* enable */
+                reg[idx] |= (mask[i] & ptr[i]);
+        }
+    }
+
+    return X86EMUL_OKAY;
 }
 
 void hvm_ioreq_init(struct domain *d)
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index f34d784..f492a2b 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -87,6 +87,12 @@ struct hvm_domain {
     } ioreq_server;
     struct hvm_ioreq_server *default_ioreq_server;
 
+    /* PVH guests */
+    struct {
+        uint8_t pm1a[ACPI_PM1A_EVT_BLK_LEN];
+        uint8_t gpe[ACPI_GPE0_BLK_LEN_V1];
+    } acpi_io;
+
     /* Cached CF8 for guest PCI config cycles */
     uint32_t                pci_cf8;
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.