|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/5] x86/pv: Switch {fill, zap}_ro_mpt() to using mfn_t
And update all affected callers. Fix the fill_ro_mpt() prototype to be bool
like its implementation.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
xen/arch/x86/domain.c | 6 +++---
xen/arch/x86/mm.c | 12 ++++++------
xen/arch/x86/mm/shadow/multi.c | 4 ++--
xen/include/asm-x86/mm.h | 4 ++--
4 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 9b4b959..57c44b1 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -967,7 +967,7 @@ int arch_set_info_guest(
{
if ( (page->u.inuse.type_info & PGT_type_mask) ==
PGT_l4_page_table )
- done = !fill_ro_mpt(page_to_mfn(page));
+ done = !fill_ro_mpt(_mfn(page_to_mfn(page)));
page_unlock(page);
}
@@ -1041,7 +1041,7 @@ int arch_set_info_guest(
case 0:
if ( !compat && !VM_ASSIST(d, m2p_strict) &&
!paging_mode_refcounts(d) )
- fill_ro_mpt(cr3_gfn);
+ fill_ro_mpt(_mfn(cr3_gfn));
break;
default:
if ( cr3_page == current->arch.old_guest_table )
@@ -1080,7 +1080,7 @@ int arch_set_info_guest(
break;
case 0:
if ( VM_ASSIST(d, m2p_strict) )
- zap_ro_mpt(cr3_gfn);
+ zap_ro_mpt(_mfn(cr3_gfn));
break;
}
}
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index b6d6ae3..79780da 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1616,9 +1616,9 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const
struct domain *d,
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
}
-bool fill_ro_mpt(unsigned long mfn)
+bool fill_ro_mpt(mfn_t mfn)
{
- l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+ l4_pgentry_t *l4tab = map_domain_page(mfn);
bool ret = false;
if ( !l4e_get_intpte(l4tab[l4_table_offset(RO_MPT_VIRT_START)]) )
@@ -1632,9 +1632,9 @@ bool fill_ro_mpt(unsigned long mfn)
return ret;
}
-void zap_ro_mpt(unsigned long mfn)
+void zap_ro_mpt(mfn_t mfn)
{
- l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+ l4_pgentry_t *l4tab = map_domain_page(mfn);
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
unmap_domain_page(l4tab);
@@ -2820,7 +2820,7 @@ int new_guest_cr3(unsigned long mfn)
invalidate_shadow_ldt(curr, 0);
if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
- fill_ro_mpt(mfn);
+ fill_ro_mpt(_mfn(mfn));
curr->arch.guest_table = pagetable_from_pfn(mfn);
update_cr3(curr);
@@ -3194,7 +3194,7 @@ long do_mmuext_op(
}
if ( VM_ASSIST(currd, m2p_strict) )
- zap_ro_mpt(op.arg1.mfn);
+ zap_ro_mpt(_mfn(op.arg1.mfn));
}
curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index f0eabb6..c5c0af8 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4232,10 +4232,10 @@ sh_update_cr3(struct vcpu *v, int do_locking)
mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]);
if ( !(v->arch.flags & TF_kernel_mode) && VM_ASSIST(d, m2p_strict) )
- zap_ro_mpt(mfn_x(smfn));
+ zap_ro_mpt(smfn);
else if ( (v->arch.flags & TF_kernel_mode) &&
!VM_ASSIST(d, m2p_strict) )
- fill_ro_mpt(mfn_x(smfn));
+ fill_ro_mpt(smfn);
}
#else
#error This should never happen
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index b738c89..5760e05 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -331,8 +331,8 @@ int free_page_type(struct page_info *page, unsigned long
type,
void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
bool_t zap_ro_mpt);
-bool_t fill_ro_mpt(unsigned long mfn);
-void zap_ro_mpt(unsigned long mfn);
+bool fill_ro_mpt(mfn_t mfn);
+void zap_ro_mpt(mfn_t mfn);
bool is_iomem_page(mfn_t mfn);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |