|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/2] x86/p2m: make p2m_alloc_ptp() return an MFN
None of the callers really needs the struct page_info pointer.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -569,7 +569,7 @@ int p2m_set_entry(struct p2m_domain *p2m
return rc;
}
-struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
+mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
{
struct page_info *pg;
@@ -577,13 +577,13 @@ struct page_info *p2m_alloc_ptp(struct p
ASSERT(p2m->domain);
ASSERT(p2m->domain->arch.paging.alloc_page);
pg = p2m->domain->arch.paging.alloc_page(p2m->domain);
- if (pg == NULL)
- return NULL;
+ if ( !pg )
+ return INVALID_MFN;
page_list_add_tail(pg, &p2m->pages);
pg->u.inuse.type_info = type | 1 | PGT_validated;
- return pg;
+ return page_to_mfn(pg);
}
void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg)
@@ -609,7 +609,7 @@ void p2m_free_ptp(struct p2m_domain *p2m
*/
int p2m_alloc_table(struct p2m_domain *p2m)
{
- struct page_info *p2m_top;
+ mfn_t top_mfn;
struct domain *d = p2m->domain;
int rc = 0;
@@ -632,14 +632,14 @@ int p2m_alloc_table(struct p2m_domain *p
P2M_PRINTK("allocating p2m table\n");
- p2m_top = p2m_alloc_ptp(p2m, PGT_l4_page_table);
- if ( p2m_top == NULL )
+ top_mfn = p2m_alloc_ptp(p2m, PGT_l4_page_table);
+ if ( mfn_eq(top_mfn, INVALID_MFN) )
{
p2m_unlock(p2m);
return -ENOMEM;
}
- p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
+ p2m->phys_table = pagetable_from_mfn(top_mfn);
if ( hap_enabled(d) )
iommu_share_p2m_table(d);
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -225,16 +225,16 @@ static void ept_p2m_type_to_flags(struct
/* Fill in middle levels of ept table */
static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry)
{
- struct page_info *pg;
+ mfn_t mfn;
ept_entry_t *table;
unsigned int i;
- pg = p2m_alloc_ptp(p2m, 0);
- if ( pg == NULL )
+ mfn = p2m_alloc_ptp(p2m, 0);
+ if ( mfn_eq(mfn, INVALID_MFN) )
return 0;
ept_entry->epte = 0;
- ept_entry->mfn = page_to_mfn(pg);
+ ept_entry->mfn = mfn_x(mfn);
ept_entry->access = p2m->default_access;
ept_entry->r = ept_entry->w = ept_entry->x = 1;
@@ -243,7 +243,7 @@ static int ept_set_middle_entry(struct p
ept_entry->suppress_ve = 1;
- table = __map_domain_page(pg);
+ table = map_domain_page(mfn);
for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
table[i].suppress_ve = 1;
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -195,7 +195,7 @@ p2m_next_level(struct p2m_domain *p2m, v
l1_pgentry_t *p2m_entry;
l1_pgentry_t new_entry;
void *next;
- struct page_info *pg;
+ mfn_t mfn;
unsigned int i, flags;
unsigned long pfn;
@@ -206,12 +206,11 @@ p2m_next_level(struct p2m_domain *p2m, v
/* PoD/paging: Not present doesn't imply empty. */
if ( !l1e_get_flags(*p2m_entry) )
{
- pg = p2m_alloc_ptp(p2m, type);
- if ( pg == NULL )
+ mfn = p2m_alloc_ptp(p2m, type);
+ if ( mfn_eq(mfn, INVALID_MFN) )
return -ENOMEM;
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
- P2M_BASE_FLAGS | _PAGE_RW);
+ new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
switch ( type ) {
case PGT_l3_page_table:
@@ -239,11 +238,11 @@ p2m_next_level(struct p2m_domain *p2m, v
/* split 1GB pages into 2MB pages */
if ( type == PGT_l2_page_table && (flags & _PAGE_PSE) )
{
- pg = p2m_alloc_ptp(p2m, PGT_l2_page_table);
- if ( pg == NULL )
+ mfn = p2m_alloc_ptp(p2m, PGT_l2_page_table);
+ if ( mfn_eq(mfn, INVALID_MFN) )
return -ENOMEM;
- l1_entry = __map_domain_page(pg);
+ l1_entry = map_domain_page(mfn);
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
{
new_entry = l1e_from_pfn(pfn | (i * L1_PAGETABLE_ENTRIES), flags);
@@ -251,8 +250,7 @@ p2m_next_level(struct p2m_domain *p2m, v
p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2);
}
unmap_domain_page(l1_entry);
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
- P2M_BASE_FLAGS | _PAGE_RW); /* disable PSE */
+ new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
}
@@ -261,8 +259,8 @@ p2m_next_level(struct p2m_domain *p2m, v
/* split single 2MB large page into 4KB page in P2M table */
if ( type == PGT_l1_page_table && (flags & _PAGE_PSE) )
{
- pg = p2m_alloc_ptp(p2m, PGT_l1_page_table);
- if ( pg == NULL )
+ mfn = p2m_alloc_ptp(p2m, PGT_l1_page_table);
+ if ( mfn_eq(mfn, INVALID_MFN) )
return -ENOMEM;
/* New splintered mappings inherit the flags of the old superpage,
@@ -272,7 +270,7 @@ p2m_next_level(struct p2m_domain *p2m, v
else
flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
- l1_entry = __map_domain_page(pg);
+ l1_entry = map_domain_page(mfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
new_entry = l1e_from_pfn(pfn | i, flags);
@@ -281,8 +279,7 @@ p2m_next_level(struct p2m_domain *p2m, v
}
unmap_domain_page(l1_entry);
- new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
- P2M_BASE_FLAGS | _PAGE_RW);
+ new_entry = l1e_from_pfn(mfn_x(mfn), P2M_BASE_FLAGS | _PAGE_RW);
p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
}
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -684,7 +684,7 @@ void p2m_mem_paging_resume(struct domain
* Internal functions, only called by other p2m code
*/
-struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
+mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
/* Directly set a p2m entry: only for use by p2m code. Does not need
Attachment:
x86-p2m-alloc-ptp-MFN.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |