[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Shootdown TLB entries across all VCPUs for SMP shadow mode.
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 8952af4fc166b5b0057b42f7d48505c96630ab90 # Parent 8392d3955ed43593b0d9add61c6f2778cd27332e Shootdown TLB entries across all VCPUs for SMP shadow mode. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r 8392d3955ed4 -r 8952af4fc166 xen/arch/x86/shadow.c --- a/xen/arch/x86/shadow.c Thu Nov 24 15:32:38 2005 +++ b/xen/arch/x86/shadow.c Thu Nov 24 15:47:44 2005 @@ -1757,6 +1757,7 @@ struct out_of_sync_entry *entry; int need_flush = 0; l1_pgentry_t *ppte, opte, npte; + cpumask_t other_vcpus_mask; perfc_incrc(shadow_sync_all); @@ -1789,23 +1790,15 @@ unmap_domain_page(ppte); } - // XXX mafetter: SMP - // - // With the current algorithm, we've gotta flush all the TLBs - // before we can safely continue. I don't think we want to - // do it this way, so I think we should consider making - // entirely private copies of the shadow for each vcpu, and/or - // possibly having a mix of private and shared shadow state - // (any path from a PTE that grants write access to an out-of-sync - // page table page needs to be vcpu private). - // -#if 0 // this should be enabled for SMP guests... - flush_tlb_mask(cpu_online_map); -#endif + /* Other VCPUs mustn't use the revoked writable mappings. */ + other_vcpus_mask = d->cpumask; + cpu_clear(smp_processor_id(), other_vcpus_mask); + flush_tlb_mask(other_vcpus_mask); + + /* Flush ourself later. */ need_flush = 1; - // Second, resync all L1 pages, then L2 pages, etc... - // + /* Second, resync all L1 pages, then L2 pages, etc... */ need_flush |= resync_all(d, PGT_l1_shadow); #if CONFIG_PAGING_LEVELS == 2 diff -r 8392d3955ed4 -r 8952af4fc166 xen/arch/x86/shadow32.c --- a/xen/arch/x86/shadow32.c Thu Nov 24 15:32:38 2005 +++ b/xen/arch/x86/shadow32.c Thu Nov 24 15:47:44 2005 @@ -2554,6 +2554,7 @@ struct out_of_sync_entry *entry; int need_flush = 0; l1_pgentry_t *ppte, opte, npte; + cpumask_t other_vcpus_mask; perfc_incrc(shadow_sync_all); @@ -2586,23 +2587,15 @@ unmap_domain_page(ppte); } - // XXX mafetter: SMP - // - // With the current algorithm, we've gotta flush all the TLBs - // before we can safely continue. I don't think we want to - // do it this way, so I think we should consider making - // entirely private copies of the shadow for each vcpu, and/or - // possibly having a mix of private and shared shadow state - // (any path from a PTE that grants write access to an out-of-sync - // page table page needs to be vcpu private). - // -#if 0 // this should be enabled for SMP guests... - flush_tlb_mask(cpu_online_map); -#endif + /* Other VCPUs mustn't use the revoked writable mappings. */ + other_vcpus_mask = d->cpumask; + cpu_clear(smp_processor_id(), other_vcpus_mask); + flush_tlb_mask(other_vcpus_mask); + + /* Flush ourself later. */ need_flush = 1; - // Second, resync all L1 pages, then L2 pages, etc... - // + /* Second, resync all L1 pages, then L2 pages, etc... */ need_flush |= resync_all(d, PGT_l1_shadow); if ( shadow_mode_translate(d) ) need_flush |= resync_all(d, PGT_hl2_shadow); diff -r 8392d3955ed4 -r 8952af4fc166 xen/include/asm-x86/shadow.h --- a/xen/include/asm-x86/shadow.h Thu Nov 24 15:32:38 2005 +++ b/xen/include/asm-x86/shadow.h Thu Nov 24 15:47:44 2005 @@ -596,8 +596,8 @@ if ( need_flush ) { perfc_incrc(update_hl2e_invlpg); - // SMP BUG??? - local_flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]); + flush_tlb_one_mask(v->domain->cpumask, + &linear_pg_table[l1_linear_offset(va)]); } } } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |