|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 1/2] xen/mm: Introduce arch_free_heap_page()
common/page_alloc.c references d->arch.relmem_list, which only exists on x86.
This only compiles on ARM because page_list_del2() discards its second
argument.
Introduce a new common arch_free_heap_page() which only uses common lists in
struct domain, and allow an architecture to override this with a custom
alternative. x86 then provides a custom arch_free_heap_page() which takes
care of managing d->arch.relmem_list.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Stefano Stabellini <stefano.stabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
xen/common/page_alloc.c | 4 ++--
xen/include/asm-x86/mm.h | 5 +++++
xen/include/xen/mm.h | 6 ++++++
3 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7179d67..22e8feb 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1837,7 +1837,7 @@ void free_domheap_pages(struct page_info *pg, unsigned
int order)
spin_lock_recursive(&d->page_alloc_lock);
for ( i = 0; i < (1 << order); i++ )
- page_list_del2(&pg[i], &d->xenpage_list, &d->arch.relmem_list);
+ arch_free_heap_page(d, &pg[i]);
d->xenheap_pages -= 1 << order;
drop_dom_ref = (d->xenheap_pages == 0);
@@ -1856,7 +1856,7 @@ void free_domheap_pages(struct page_info *pg, unsigned
int order)
for ( i = 0; i < (1 << order); i++ )
{
BUG_ON((pg[i].u.inuse.type_info & PGT_count_mask) != 0);
- page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
+ arch_free_heap_page(d, &pg[i]);
}
drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 23a4092..195cbcb 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -590,4 +590,9 @@ typedef struct mm_rwlock {
const char *locker_function; /* func that took it */
} mm_rwlock_t;
+#define arch_free_heap_page(d, pg) \
+ page_list_del2(pg, is_xen_heap_page(pg) ? \
+ &(d)->xenpage_list : &(d)->page_list, \
+ &(d)->arch.relmem_list)
+
#endif /* __ASM_X86_MM_H__ */
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index a795dd6..681e6c1 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -443,6 +443,12 @@ static inline unsigned int get_order_from_pages(unsigned
long nr_pages)
void scrub_one_page(struct page_info *);
+#ifndef arch_free_heap_page
+#define arch_free_heap_page(d, pg) \
+ page_list_del(pg, is_xen_heap_page(pg) ? \
+ &(d)->xenpage_list : &(d)->page_list)
+#endif
+
int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
domid_t foreign_domid,
unsigned long idx, xen_pfn_t gpfn);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |