[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Michael's hacked shadow mode linux
ChangeSet 1.1246, 2005/03/15 15:53:52+00:00, rneugeba@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx Michael's hacked shadow mode linux Signed-off-by: michael.fetterman@xxxxxxxxxxxx Kconfig | 6 ++++++ i386/mm/hypervisor.c | 18 ++++++++++++++++++ i386/mm/init.c | 4 ++++ i386/mm/pgtable.c | 6 ++++++ 4 files changed, 34 insertions(+) diff -Nru a/linux-2.6.10-xen-sparse/arch/xen/Kconfig b/linux-2.6.10-xen-sparse/arch/xen/Kconfig --- a/linux-2.6.10-xen-sparse/arch/xen/Kconfig 2005-04-05 12:09:19 -04:00 +++ b/linux-2.6.10-xen-sparse/arch/xen/Kconfig 2005-04-05 12:09:19 -04:00 @@ -146,6 +146,12 @@ we only use this for benchmarking enable only if you know what you are doing +config XEN_SHADOW_MODE + bool "Fake shadow mode" + default n + help + fakes out a shadow mode kernel + config XEN_SCRUB_PAGES bool "Scrub memory before freeing it to Xen" diff -Nru a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c --- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c 2005-04-05 12:09:19 -04:00 +++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c 2005-04-05 12:09:19 -04:00 @@ -125,6 +125,7 @@ void queue_l1_entry_update(pte_t *ptr, unsigned long val) { +#ifndef CONFIG_XEN_SHADOW_MODE int cpu = smp_processor_id(); int idx; unsigned long flags; @@ -137,10 +138,15 @@ __flush_page_update_queue(); #endif spin_unlock_irqrestore(&update_lock, flags); +#else + _flush_page_update_queue(); + *(unsigned long *)ptr = val; +#endif } void queue_l2_entry_update(pmd_t *ptr, unsigned long val) { +#ifndef CONFIG_XEN_SHADOW_MODE int cpu = smp_processor_id(); int idx; unsigned long flags; @@ -150,6 +156,10 @@ per_cpu(update_queue[idx], cpu).val = val; increment_index(); spin_unlock_irqrestore(&update_lock, flags); +#else + _flush_page_update_queue(); + *(unsigned long *)ptr = val; +#endif } void queue_pt_switch(unsigned long ptr) @@ -278,6 +288,7 @@ /* queue and flush versions of the above */ void xen_l1_entry_update(pte_t *ptr, unsigned long val) { +#ifndef CONFIG_XEN_SHADOW_MODE int cpu = smp_processor_id(); int idx; unsigned long flags; @@ -287,10 +298,14 @@ per_cpu(update_queue[idx], cpu).val = val; increment_index_and_flush(); spin_unlock_irqrestore(&update_lock, flags); +#else + *(unsigned long *)ptr = val; +#endif } void xen_l2_entry_update(pmd_t *ptr, unsigned long val) { +#ifndef CONFIG_XEN_SHADOW_MODE int cpu = smp_processor_id(); int idx; unsigned long flags; @@ -300,6 +315,9 @@ per_cpu(update_queue[idx], cpu).val = val; increment_index_and_flush(); spin_unlock_irqrestore(&update_lock, flags); +#else + *(unsigned long *)ptr = val; +#endif } void xen_pt_switch(unsigned long ptr) diff -Nru a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c --- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c 2005-04-05 12:09:19 -04:00 +++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c 2005-04-05 12:09:19 -04:00 @@ -77,7 +77,9 @@ { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); +#ifndef CONFIG_XEN_SHADOW_MODE make_page_readonly(page_table); +#endif set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); if (page_table != pte_offset_kernel(pmd, 0)) BUG(); @@ -349,7 +351,9 @@ * it. We clean up by write-enabling and then freeing the old page dir. */ memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t)); +#ifndef CONFIG_XEN_SHADOW_MODE make_page_readonly(new_pgd); +#endif queue_pgd_pin(__pa(new_pgd)); load_cr3(new_pgd); queue_pgd_unpin(__pa(old_pgd)); diff -Nru a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c --- a/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c 2005-04-05 12:09:19 -04:00 +++ b/linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c 2005-04-05 12:09:19 -04:00 @@ -181,7 +181,9 @@ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (pte) { clear_page(pte); +#ifndef CONFIG_XEN_SHADOW_MODE make_page_readonly(pte); +#endif xen_flush_page_update_queue(); } return pte; @@ -194,7 +196,9 @@ set_page_count(page, 1); clear_page(pte); +#ifndef CONFIG_XEN_SHADOW_MODE make_page_readonly(pte); +#endif queue_pte_pin(__pa(pte)); flush_page_update_queue(); } @@ -304,7 +308,9 @@ spin_unlock_irqrestore(&pgd_lock, flags); memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); out: +#ifndef CONFIG_XEN_SHADOW_MODE make_page_readonly(pgd); +#endif queue_pgd_pin(__pa(pgd)); flush_page_update_queue(); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |