[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen stable-4.16] x86/HVM: drop stdvga's "lock" struct member



commit 1eb843cd2401ea8c228d8e9061521751d5c7df44
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Nov 12 14:14:43 2024 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Nov 12 14:14:43 2024 +0100

    x86/HVM: drop stdvga's "lock" struct member
    
    No state is left to protect. It being the last field, drop the struct
    itself as well. Similarly for then ending up empty, drop the .complete
    handler.
    
    This is part of XSA-463 / CVE-2024-45818
    
    Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    (cherry picked from commit b180a50326c8a2c171f37c1940a0fbbdcad4be90)
---
 xen/arch/x86/hvm/stdvga.c        | 30 ++----------------------------
 xen/include/asm-x86/hvm/domain.h |  1 -
 xen/include/asm-x86/hvm/io.h     |  4 ----
 3 files changed, 2 insertions(+), 33 deletions(-)

diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 3890d69c82..3c5340ad92 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -69,61 +69,35 @@ static int stdvga_mem_write(const struct hvm_io_handler 
*handler,
 static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
                                 const ioreq_t *p)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
-
-    /*
-     * The range check must be done without taking the lock, to avoid
-     * deadlock when hvm_mmio_internal() is called from
-     * hvm_copy_to/from_guest_phys() in hvm_process_io_intercept().
-     */
     if ( (ioreq_mmio_first_byte(p) < VGA_MEM_BASE) ||
          (ioreq_mmio_last_byte(p) >= (VGA_MEM_BASE + VGA_MEM_SIZE)) )
         return 0;
 
-    spin_lock(&s->lock);
-
     if ( p->dir != IOREQ_WRITE || p->data_is_ptr || p->count != 1 )
     {
         /*
          * Only accept single direct writes, as that's the only thing we can
          * accelerate using buffered ioreq handling.
          */
-        goto reject;
+        return false;
     }
 
-    /* s->lock intentionally held */
-    return 1;
-
- reject:
-    spin_unlock(&s->lock);
-    return 0;
-}
-
-static void stdvga_mem_complete(const struct hvm_io_handler *handler)
-{
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
-
-    spin_unlock(&s->lock);
+    return true;
 }
 
 static const struct hvm_io_ops stdvga_mem_ops = {
     .accept = stdvga_mem_accept,
     .read = stdvga_mem_read,
     .write = stdvga_mem_write,
-    .complete = stdvga_mem_complete
 };
 
 void stdvga_init(struct domain *d)
 {
-    struct hvm_hw_stdvga *s = &d->arch.hvm.stdvga;
     struct hvm_io_handler *handler;
 
     if ( !has_vvga(d) )
         return;
 
-    memset(s, 0, sizeof(*s));
-    spin_lock_init(&s->lock);
-    
     /* VGA memory */
     handler = hvm_next_io_handler(d);
     if ( handler )
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 698455444e..1e5d7db6ed 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -83,7 +83,6 @@ struct hvm_domain {
     struct hvm_hw_vpic     vpic[2]; /* 0=master; 1=slave */
     struct hvm_vioapic    **vioapic;
     unsigned int           nr_vioapics;
-    struct hvm_hw_stdvga   stdvga;
 
     /*
      * hvm_hw_pmtimer is a publicly-visible name. We will defer renaming
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 2f5bfeccae..784bd0c83f 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -121,10 +121,6 @@ struct vpci_arch_msix_entry {
     int pirq;
 };
 
-struct hvm_hw_stdvga {
-    spinlock_t lock;
-};
-
 void stdvga_init(struct domain *d);
 
 extern void hvm_dpci_msi_eoi(struct domain *d, int vector);
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.16



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.