|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 10/18] x86/hvm: unify stdvga mmio intercept with standard mmio intercept
It's clear from the following check in hvmemul_rep_movs:
if ( sp2mt == p2m_mmio_direct || dp2mt == p2m_mmio_direct ||
(sp2mt == p2m_mmio_dm && dp2mt == p2m_mmio_dm) )
return X86EMUL_UNHANDLEABLE;
that mmio <-> mmio copy is not handled. This means the code in the
stdvga mmio intercept that explicitly handles mmio <-> mmio copy when
hvm_copy_to/from_guest_phys() fails is never going to be executed.
This patch therefore adds a check in hvmemul_do_io_addr() to make sure
mmio <-> mmio is disallowed and then registers standard mmio intercept ops
in stdvga_init().
With this patch all mmio and portio handled within Xen now goes through
process_io_intercept().
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 9 +++
xen/arch/x86/hvm/intercept.c | 7 --
xen/arch/x86/hvm/stdvga.c | 173 +++++++++---------------------------------
xen/include/asm-x86/hvm/io.h | 1 -
4 files changed, 44 insertions(+), 146 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 91e57b0..7d6c0eb 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -267,6 +267,15 @@ static int hvmemul_acquire_page(unsigned long gmfn, struct
page_info **page)
return X86EMUL_RETRY;
}
+ /* This code should not be reached if the gmfn is not RAM */
+ if ( p2m_is_mmio(p2mt) )
+ {
+ domain_crash(curr_d);
+
+ put_page(*page);
+ return X86EMUL_UNHANDLEABLE;
+ }
+
return X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 5633959..625e585 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -279,13 +279,6 @@ int hvm_io_intercept(ioreq_t *p)
{
struct hvm_io_handler *handler;
- if ( p->type == IOREQ_TYPE_COPY )
- {
- int rc = stdvga_intercept_mmio(p);
- if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) )
- return rc;
- }
-
handler = hvm_find_io_handler(p);
if ( handler == NULL )
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index dcd532a..639da6a 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -275,9 +275,10 @@ static uint8_t stdvga_mem_readb(uint64_t addr)
return ret;
}
-static uint64_t stdvga_mem_read(uint64_t addr, uint64_t size)
+static int stdvga_mem_read(struct vcpu *v, unsigned long addr,
+ unsigned long size, unsigned long *p_data)
{
- uint64_t data = 0;
+ unsigned long data = 0;
switch ( size )
{
@@ -313,7 +314,9 @@ static uint64_t stdvga_mem_read(uint64_t addr, uint64_t
size)
break;
}
- return data;
+ *p_data = data;
+
+ return X86EMUL_OKAY;
}
static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
@@ -424,8 +427,17 @@ static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
}
}
-static void stdvga_mem_write(uint64_t addr, uint64_t data, uint64_t size)
+static int stdvga_mem_write(struct vcpu *v, unsigned long addr,
+ unsigned long size, unsigned long data)
{
+ ioreq_t p = { .type = IOREQ_TYPE_COPY,
+ .addr = addr,
+ .size = size,
+ .count = 1,
+ .dir = IOREQ_WRITE,
+ .data = data,
+ };
+
/* Intercept mmio write */
switch ( size )
{
@@ -460,153 +472,36 @@ static void stdvga_mem_write(uint64_t addr, uint64_t
data, uint64_t size)
gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size);
break;
}
-}
-
-static uint32_t read_data;
-
-static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
-{
- int i;
- uint64_t addr = p->addr;
- p2m_type_t p2mt;
- struct domain *d = current->domain;
- int step = p->df ? -p->size : p->size;
- if ( p->data_is_ptr )
- {
- uint64_t data = p->data, tmp;
-
- if ( p->dir == IOREQ_READ )
- {
- for ( i = 0; i < p->count; i++ )
- {
- tmp = stdvga_mem_read(addr, p->size);
- if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
- HVMCOPY_okay )
- {
- struct page_info *dp = get_page_from_gfn(
- d, data >> PAGE_SHIFT, &p2mt, P2M_ALLOC);
- /*
- * The only case we handle is vga_mem <-> vga_mem.
- * Anything else disables caching and leaves it to qemu-dm.
- */
- if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
- ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
- {
- if ( dp )
- put_page(dp);
- return 0;
- }
- ASSERT(!dp);
- stdvga_mem_write(data, tmp, p->size);
- }
- data += step;
- addr += step;
- }
- }
- else
- {
- for ( i = 0; i < p->count; i++ )
- {
- if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
- HVMCOPY_okay )
- {
- struct page_info *dp = get_page_from_gfn(
- d, data >> PAGE_SHIFT, &p2mt, P2M_ALLOC);
- if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
- ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
- {
- if ( dp )
- put_page(dp);
- return 0;
- }
- ASSERT(!dp);
- tmp = stdvga_mem_read(data, p->size);
- }
- stdvga_mem_write(addr, tmp, p->size);
- data += step;
- addr += step;
- }
- }
- }
- else if ( p->dir == IOREQ_WRITE )
- {
- for ( i = 0; i < p->count; i++ )
- {
- stdvga_mem_write(addr, p->data, p->size);
- addr += step;
- }
- }
- else
- {
- ASSERT(p->count == 1);
- p->data = stdvga_mem_read(addr, p->size);
- }
+ if ( !hvm_buffered_io_send(&p) )
+ return X86EMUL_UNHANDLEABLE;
- read_data = p->data;
- return 1;
+ return X86EMUL_OKAY;
}
-int stdvga_intercept_mmio(ioreq_t *p)
+static int stdvga_mem_check(struct vcpu *v, unsigned long addr,
+ unsigned long length)
{
- struct domain *d = current->domain;
- struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
- uint64_t start, end, count = p->count, size = p->size;
- int buf = 0, rc;
-
- if ( p->df )
- {
- start = (p->addr - (count - 1) * size);
- end = p->addr + size;
- }
- else
- {
- start = p->addr;
- end = p->addr + count * size;
- }
-
- if ( (start < VGA_MEM_BASE) || (end > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
- return X86EMUL_UNHANDLEABLE;
-
- if ( p->size > 8 )
- {
- gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size);
- return X86EMUL_UNHANDLEABLE;
- }
+ struct hvm_hw_stdvga *s = &v->domain->arch.hvm_domain.stdvga;
+ int rc;
spin_lock(&s->lock);
- if ( s->stdvga && s->cache )
- {
- switch ( p->type )
- {
- case IOREQ_TYPE_COPY:
- buf = mmio_move(s, p);
- if ( !buf )
- s->cache = 0;
- break;
- default:
- gdprintk(XENLOG_WARNING, "unsupported mmio request type:%d "
- "addr:0x%04x data:0x%04x size:%d count:%d state:%d "
- "isptr:%d dir:%d df:%d\n",
- p->type, (int)p->addr, (int)p->data, (int)p->size,
- (int)p->count, p->state,
- p->data_is_ptr, p->dir, p->df);
- s->cache = 0;
- }
- }
- else
- {
- buf = (p->dir == IOREQ_WRITE);
- }
-
- rc = (buf && hvm_buffered_io_send(p));
+ rc = s->stdvga && s->cache &&
+ (addr >= VGA_MEM_BASE) &&
+ ((addr + length) < (VGA_MEM_BASE + VGA_MEM_SIZE));
spin_unlock(&s->lock);
- return rc ? X86EMUL_OKAY : X86EMUL_UNHANDLEABLE;
+ return rc;
}
+static const struct hvm_mmio_ops stdvga_mem_ops = {
+ .check = stdvga_mem_check,
+ .read = stdvga_mem_read,
+ .write = stdvga_mem_write
+};
+
void stdvga_init(struct domain *d)
{
struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
@@ -634,6 +529,8 @@ void stdvga_init(struct domain *d)
register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio);
/* Graphics registers. */
register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio);
+ /* VGA memory */
+ register_mmio_handler(d, &stdvga_mem_ops);
}
}
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index afa27bf..00b4a89 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -122,7 +122,6 @@ struct hvm_hw_stdvga {
};
void stdvga_init(struct domain *d);
-int stdvga_intercept_mmio(ioreq_t *p);
void stdvga_deinit(struct domain *d);
extern void hvm_dpci_msi_eoi(struct domain *d, int vector);
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |