[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 06/12] page-alloc: make scrub_on_page() static
Before starting to alter its properties, restrict the function's visibility. The only external user is mem-paging, which we can accommodate by different means. Also move the function up in its source file, so we won't need to forward-declare it. Constify its parameter at the same time. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: New. --- a/xen/arch/x86/mm/mem_paging.c +++ b/xen/arch/x86/mm/mem_paging.c @@ -316,9 +316,6 @@ static int evict(struct domain *d, gfn_t ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, p2m_ram_paged, a); - /* Clear content before returning the page to Xen */ - scrub_one_page(page); - /* Track number of paged gfns */ atomic_inc(&d->paged_pages); --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -136,6 +136,7 @@ #include <xen/numa.h> #include <xen/nodemask.h> #include <xen/event.h> +#include <xen/vm_event.h> #include <public/sysctl.h> #include <public/sched.h> #include <asm/page.h> @@ -757,6 +758,21 @@ static void page_list_add_scrub(struct p #endif #define SCRUB_BYTE_PATTERN (SCRUB_PATTERN & 0xff) +static void scrub_one_page(const struct page_info *pg) +{ + if ( unlikely(pg->count_info & PGC_broken) ) + return; + +#ifndef NDEBUG + /* Avoid callers relying on allocations returning zeroed pages. */ + unmap_domain_page(memset(__map_domain_page(pg), + SCRUB_BYTE_PATTERN, PAGE_SIZE)); +#else + /* For a production build, clear_page() is the fastest way to scrub. */ + clear_domain_page(_mfn(page_to_mfn(pg))); +#endif +} + static void poison_one_page(struct page_info *pg) { #ifdef CONFIG_SCRUB_DEBUG @@ -2431,10 +2447,12 @@ void free_domheap_pages(struct page_info /* * Normally we expect a domain to clear pages before freeing them, * if it cares about the secrecy of their contents. However, after - * a domain has died we assume responsibility for erasure. We do - * scrub regardless if option scrub_domheap is set. + * a domain has died or if it has mem-paging enabled we assume + * responsibility for erasure. We do scrub regardless if option + * scrub_domheap is set. */ - scrub = d->is_dying || scrub_debug || opt_scrub_domheap; + scrub = d->is_dying || mem_paging_enabled(d) || + scrub_debug || opt_scrub_domheap; } else { @@ -2519,21 +2537,6 @@ static __init int pagealloc_keyhandler_i __initcall(pagealloc_keyhandler_init); -void scrub_one_page(struct page_info *pg) -{ - if ( unlikely(pg->count_info & PGC_broken) ) - return; - -#ifndef NDEBUG - /* Avoid callers relying on allocations returning zeroed pages. */ - unmap_domain_page(memset(__map_domain_page(pg), - SCRUB_BYTE_PATTERN, PAGE_SIZE)); -#else - /* For a production build, clear_page() is the fastest way to scrub. */ - clear_domain_page(_mfn(page_to_mfn(pg))); -#endif -} - static void dump_heap(unsigned char key) { s_time_t now = NOW(); --- a/xen/include/asm-x86/mem_paging.h +++ b/xen/include/asm-x86/mem_paging.h @@ -24,12 +24,6 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg); -#ifdef CONFIG_MEM_PAGING -# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging) -#else -# define mem_paging_enabled(d) false -#endif - #endif /*__ASM_X86_MEM_PAGING_H__ */ /* --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -498,8 +498,6 @@ static inline unsigned int get_order_fro return order; } -void scrub_one_page(struct page_info *); - #ifndef arch_free_heap_page #define arch_free_heap_page(d, pg) \ page_list_del(pg, page_to_list(d, pg)) --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -1117,6 +1117,12 @@ static always_inline bool is_iommu_enabl return evaluate_nospec(d->options & XEN_DOMCTL_CDF_iommu); } +#ifdef CONFIG_MEM_PAGING +# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging) +#else +# define mem_paging_enabled(d) false +#endif + extern bool sched_smt_power_savings; extern bool sched_disable_smt_switching;
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |