[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] arm: Actually free __init/__initdata ranges on boot
# HG changeset patch # User Tim Deegan <tim@xxxxxxx> # Date 1331651459 0 # Node ID a9f64424f76fd8df233297589e6070f2f557251c # Parent 4e448a7d4c0e9e5a034539a4590751aa4c28902a arm: Actually free __init/__initdata ranges on boot Signed-off-by: Tim Deegan <tim@xxxxxxx> Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx> --- diff -r 4e448a7d4c0e -r a9f64424f76f xen/arch/arm/mm.c --- a/xen/arch/arm/mm.c Tue Mar 13 15:10:58 2012 +0000 +++ b/xen/arch/arm/mm.c Tue Mar 13 15:10:59 2012 +0000 @@ -48,6 +48,8 @@ unsigned long max_page; +extern char __init_begin[], __init_end[]; + /* Map a 4k page in a fixmap entry */ void set_fixmap(unsigned map, unsigned long mfn, unsigned attributes) { @@ -205,17 +207,7 @@ /* Undo the temporary map */ pte.bits = 0; write_pte(xen_second + second_table_offset(dest_va), pte); - /* - * Have removed a mapping previously used for .text. Flush everything - * for safety. - */ - asm volatile ( - "dsb;" /* Ensure visibility of PTE write */ - STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */ - STORE_CP32(0, BPIALL) /* Flush branch predictor */ - "dsb;" /* Ensure completion of TLB+BP flush */ - "isb;" - : : "r" (i /*dummy*/) : "memory"); + flush_xen_text_tlb(); /* Link in the fixmap pagetable */ pte = mfn_to_xen_entry((((unsigned long) xen_fixmap) + phys_offset) @@ -251,13 +243,7 @@ pte.pt.table = 1; write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte); /* Have changed a mapping used for .text. Flush everything for safety. */ - asm volatile ( - "dsb;" /* Ensure visibility of PTE write */ - STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */ - STORE_CP32(0, BPIALL) /* Flush branch predictor */ - "dsb;" /* Ensure completion of TLB+BP flush */ - "isb;" - : : "r" (i /*dummy*/) : "memory"); + flush_xen_text_tlb(); /* From now on, no mapping may be both writable and executable. */ WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR); @@ -328,6 +314,64 @@ frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pages * sizeof(struct page_info)); } +enum mg { mg_clear, mg_ro, mg_rw, mg_rx }; +static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg) +{ + lpae_t pte; + int i; + + ASSERT(is_kernel(p) && is_kernel(p + l)); + + /* Can only guard in page granularity */ + ASSERT(!((unsigned long) p & ~PAGE_MASK)); + ASSERT(!(l & ~PAGE_MASK)); + + for ( i = (p - _start) / PAGE_SIZE; + i < (p + l - _start) / PAGE_SIZE; + i++ ) + { + pte = xen_xenmap[i]; + switch ( mg ) + { + case mg_clear: + pte.pt.valid = 0; + break; + case mg_ro: + pte.pt.valid = 1; + pte.pt.pxn = 1; + pte.pt.xn = 1; + pte.pt.ro = 1; + break; + case mg_rw: + pte.pt.valid = 1; + pte.pt.pxn = 1; + pte.pt.xn = 1; + pte.pt.ro = 0; + break; + case mg_rx: + pte.pt.valid = 1; + pte.pt.pxn = 0; + pte.pt.xn = 0; + pte.pt.ro = 1; + break; + } + write_pte(xen_xenmap + i, pte); + } + flush_xen_text_tlb(); +} + +/* Release all __init and __initdata ranges to be reused */ +void free_init_memory(void) +{ + paddr_t pa = virt_to_maddr(__init_begin); + unsigned long len = __init_end - __init_begin; + set_pte_flags_on_range(__init_begin, len, mg_rw); + memset(__init_begin, 0xcc, len); + set_pte_flags_on_range(__init_begin, len, mg_clear); + init_domheap_pages(pa, pa + len); + printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10); +} + void arch_dump_shared_mem_info(void) { } diff -r 4e448a7d4c0e -r a9f64424f76f xen/arch/arm/setup.c --- a/xen/arch/arm/setup.c Tue Mar 13 15:10:58 2012 +0000 +++ b/xen/arch/arm/setup.c Tue Mar 13 15:10:59 2012 +0000 @@ -38,8 +38,6 @@ #include <asm/setup.h> #include "gic.h" -extern const char __init_begin[], __init_end[], __bss_start[]; - /* Spinlock for serializing CPU bringup */ unsigned long __initdata boot_gate = 1; /* Number of non-boot CPUs ready to enter C */ @@ -47,12 +45,7 @@ static __attribute_used__ void init_done(void) { - /* TODO: free (or page-protect) the init areas. - memset(__init_begin, 0xcc, __init_end - __init_begin); - free_xen_data(__init_begin, __init_end); - */ - printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10); - + free_init_memory(); startup_cpu_idle_loop(); } diff -r 4e448a7d4c0e -r a9f64424f76f xen/include/asm-arm/mm.h --- a/xen/include/asm-arm/mm.h Tue Mar 13 15:10:58 2012 +0000 +++ b/xen/include/asm-arm/mm.h Tue Mar 13 15:10:59 2012 +0000 @@ -278,6 +278,10 @@ #define memguard_guard_stack(_p) ((void)0) #define memguard_guard_range(_p,_l) ((void)0) #define memguard_unguard_range(_p,_l) ((void)0) + +/* Release all __init and __initdata ranges to be reused */ +void free_init_memory(void); + int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, unsigned int order); diff -r 4e448a7d4c0e -r a9f64424f76f xen/include/asm-arm/page.h --- a/xen/include/asm-arm/page.h Tue Mar 13 15:10:58 2012 +0000 +++ b/xen/include/asm-arm/page.h Tue Mar 13 15:10:59 2012 +0000 @@ -203,6 +203,22 @@ } /* + * Flush all hypervisor mappings from the TLB and branch predictor. + * This is needed after changing Xen code mappings. + */ +static inline void flush_xen_text_tlb(void) +{ + register unsigned long r0 asm ("r0"); + asm volatile ( + "dsb;" /* Ensure visibility of PTE writes */ + STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */ + STORE_CP32(0, BPIALL) /* Flush branch predictor */ + "dsb;" /* Ensure completion of TLB+BP flush */ + "isb;" + : : "r" (r0) /*dummy*/ : "memory"); +} + +/* * Flush all hypervisor mappings from the data TLB. This is not * sufficient when changing code mappings or for self modifying code. */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |