|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86/pv: Minor improvements to guest_get_eff_{, kern}_l1e()
* These functions work in terms of linear addresses, not virtual addresses.
Update the comments and parameter names.
* Drop unnecessary inlines.
* Drop vcpu parameter from guest_get_eff_kern_l1e(). Its sole caller passes
current, and its callee strictly operates on current.
* Switch guest_get_eff_kern_l1e()'s parameter from void * to l1_pgentry_t *.
Both its caller and callee already use the correct type already.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/arch/x86/mm.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 3262499..8993e6d 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -561,15 +561,15 @@ static inline void guest_unmap_l1e(void *p)
unmap_domain_page(p);
}
-/* Read a PV guest's l1e that maps this virtual address. */
-static inline void guest_get_eff_l1e(unsigned long addr, l1_pgentry_t *eff_l1e)
+/* Read a PV guest's l1e that maps this linear address. */
+static void guest_get_eff_l1e(unsigned long linear, l1_pgentry_t *eff_l1e)
{
ASSERT(!paging_mode_translate(current->domain));
ASSERT(!paging_mode_external(current->domain));
- if ( unlikely(!__addr_ok(addr)) ||
+ if ( unlikely(!__addr_ok(linear)) ||
__copy_from_user(eff_l1e,
- &__linear_l1_table[l1_linear_offset(addr)],
+ &__linear_l1_table[l1_linear_offset(linear)],
sizeof(l1_pgentry_t)) )
*eff_l1e = l1e_empty();
}
@@ -578,18 +578,18 @@ static inline void guest_get_eff_l1e(unsigned long addr,
l1_pgentry_t *eff_l1e)
* Read the guest's l1e that maps this address, from the kernel-mode
* page tables.
*/
-static inline void guest_get_eff_kern_l1e(struct vcpu *v, unsigned long addr,
- void *eff_l1e)
+static void guest_get_eff_kern_l1e(unsigned long linear, l1_pgentry_t *eff_l1e)
{
- const bool user_mode = !(v->arch.flags & TF_kernel_mode);
+ struct vcpu *curr = current;
+ const bool user_mode = !(curr->arch.flags & TF_kernel_mode);
if ( user_mode )
- toggle_guest_mode(v);
+ toggle_guest_mode(curr);
- guest_get_eff_l1e(addr, eff_l1e);
+ guest_get_eff_l1e(linear, eff_l1e);
if ( user_mode )
- toggle_guest_mode(v);
+ toggle_guest_mode(curr);
}
static inline void page_set_tlbflush_timestamp(struct page_info *page)
@@ -676,7 +676,7 @@ int map_ldt_shadow_page(unsigned int off)
if ( is_pv_32bit_domain(d) )
gva = (u32)gva;
- guest_get_eff_kern_l1e(v, gva, &l1e);
+ guest_get_eff_kern_l1e(gva, &l1e);
if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
return 0;
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |