|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/HVM: reduce recursion in linear_{read,write}()
commit 18053054b7583810dd356efc8d7018bbc8720f36
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Sep 9 13:40:47 2024 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Sep 9 13:40:47 2024 +0200
x86/HVM: reduce recursion in linear_{read,write}()
Let's make explicit what the compiler may or may not do on our behalf:
The 2nd of the recursive invocations each can fall through rather than
re-invoking the function. This will save us from adding yet another
parameter (or more) to the function, just for the recursive invocations.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/emulate.c | 28 ++++++++++++++++++----------
1 file changed, 18 insertions(+), 10 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index feb4792cc5..ecf83795fa 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1146,7 +1146,7 @@ static int linear_read(unsigned long addr, unsigned int
bytes, void *p_data,
pagefault_info_t pfinfo;
struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io;
unsigned int offset = addr & ~PAGE_MASK;
- int rc = HVMTRANS_bad_gfn_to_mfn;
+ int rc;
if ( offset + bytes > PAGE_SIZE )
{
@@ -1154,12 +1154,16 @@ static int linear_read(unsigned long addr, unsigned int
bytes, void *p_data,
/* Split the access at the page boundary. */
rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
- if ( rc == X86EMUL_OKAY )
- rc = linear_read(addr + part1, bytes - part1, p_data + part1,
- pfec, hvmemul_ctxt);
- return rc;
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ addr += part1;
+ bytes -= part1;
+ p_data += part1;
}
+ rc = HVMTRANS_bad_gfn_to_mfn;
+
/*
* If there is an MMIO cache entry for the access then we must be
re-issuing
* an access that was previously handled as MMIO. Thus it is imperative
that
@@ -1201,7 +1205,7 @@ static int linear_write(unsigned long addr, unsigned int
bytes, void *p_data,
pagefault_info_t pfinfo;
struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io;
unsigned int offset = addr & ~PAGE_MASK;
- int rc = HVMTRANS_bad_gfn_to_mfn;
+ int rc;
if ( offset + bytes > PAGE_SIZE )
{
@@ -1209,12 +1213,16 @@ static int linear_write(unsigned long addr, unsigned
int bytes, void *p_data,
/* Split the access at the page boundary. */
rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
- if ( rc == X86EMUL_OKAY )
- rc = linear_write(addr + part1, bytes - part1, p_data + part1,
- pfec, hvmemul_ctxt);
- return rc;
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ addr += part1;
+ bytes -= part1;
+ p_data += part1;
}
+ rc = HVMTRANS_bad_gfn_to_mfn;
+
/*
* If there is an MMIO cache entry for the access then we must be
re-issuing
* an access that was previously handled as MMIO. Thus it is imperative
that
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |