|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 13/17] x86/hvm: only acquire RAM pages for emulation when we need to
If hvmemul_do_io_addr() is called to complete a previously issued
emulation then there is no need to acquire the RAM pages again. There
is also no need to re-calculate the value of *reps, providing
hvmemul_do_io() updates it when returning X86EMUL_OKAY.
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 86 ++++++++++++++++++++++++--------------------
1 file changed, 48 insertions(+), 38 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index d91cd74..ab7c716 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -145,7 +145,6 @@ static int hvmemul_do_io(
ASSERT(p.type == is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO);
ASSERT(p.addr == addr);
ASSERT(p.size == size);
- ASSERT(p.count == *reps);
ASSERT(p.dir == dir);
ASSERT(p.df == df);
ASSERT(p.data_is_ptr == data_is_addr);
@@ -189,6 +188,7 @@ static int hvmemul_do_io(
}
}
+ *reps = p.count;
return X86EMUL_OKAY;
default:
return X86EMUL_UNHANDLEABLE;
@@ -285,56 +285,66 @@ int hvmemul_do_io_addr(
uint8_t dir, bool_t df, paddr_t ram_addr)
{
struct vcpu *v = current;
+ struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
unsigned long ram_gmfn = paddr_to_pfn(ram_addr);
struct page_info *ram_page[2];
int nr_pages = 0;
unsigned long count;
int rc;
- rc = hvmemul_acquire_page(ram_gmfn, &ram_page[nr_pages]);
- if ( rc != X86EMUL_OKAY )
- goto out;
-
- nr_pages++;
-
- /* Detemine how many reps will fit within this page */
- for ( count = 0; count < *reps; count++ )
+ switch ( vio->io_req.state )
{
- paddr_t start, end;
+ case STATE_IOREQ_NONE:
+ rc = hvmemul_acquire_page(ram_gmfn, &ram_page[nr_pages]);
+ if ( rc != X86EMUL_OKAY )
+ goto out;
- if ( df )
- {
- start = ram_addr - count * size;
- end = ram_addr + size - 1;
- }
- else
+ nr_pages++;
+
+ /* Detemine how many reps will fit within this page */
+ for ( count = 0; count < *reps; count++ )
{
- start = ram_addr;
- end = ram_addr + (count + 1) * size - 1;
- }
+ paddr_t start, end;
- if ( paddr_to_pfn(start) != ram_gmfn ||
- paddr_to_pfn(end) != ram_gmfn )
- break;
- }
+ if ( df )
+ {
+ start = ram_addr - count * size;
+ end = ram_addr + size - 1;
+ }
+ else
+ {
+ start = ram_addr;
+ end = ram_addr + (count + 1) * size - 1;
+ }
- if ( count == 0 )
- {
- /*
- * This access must span two pages, so grab a reference to
- * the next page and do a single rep.
- */
- rc = hvmemul_acquire_page(df ? ram_gmfn - 1 : ram_gmfn + 1,
- &ram_page[nr_pages]);
- if ( rc != X86EMUL_OKAY )
- goto out;
+ if ( paddr_to_pfn(start) != ram_gmfn ||
+ paddr_to_pfn(end) != ram_gmfn )
+ break;
+ }
- nr_pages++;
- count = 1;
- }
+ if ( count == 0 )
+ {
+ /*
+ * This access must span two pages, so grab a reference
+ * to the next page and do a single rep.
+ */
+ rc = hvmemul_acquire_page(df ? ram_gmfn - 1 : ram_gmfn + 1,
+ &ram_page[nr_pages]);
+ if ( rc != X86EMUL_OKAY )
+ goto out;
+
+ nr_pages++;
+ count = 1;
+ }
- v->arch.hvm_vcpu.hvm_io.io_retry = (count < *reps);
- *reps = count;
+ v->arch.hvm_vcpu.hvm_io.io_retry = (count < *reps);
+ *reps = count;
+ break;
+ case STATE_IORESP_READY:
+ break;
+ default:
+ return X86EMUL_UNHANDLEABLE;
+ }
rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 1,
(uint64_t)ram_addr);
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |