[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] [LINUX] gnttab: Fix copy_grant_page race with seqlock
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1181210648 -3600 # Node ID a395e58bd23404cdcf470b029daf2b3a271d7da2 # Parent dd861cfb5d1acbc200a31c40a320f0d52b0d0c82 [LINUX] gnttab: Fix copy_grant_page race with seqlock Previously gnttab_copy_grant_page would always unmap the grant table entry, even if DMA operations were outstanding. This would allow a hostile guest to free a page still used by DMA to the hypervisor. This patch fixes this by making sure that we don't free the grant table entry if a DMA operation has taken place. To achieve this a seqlock is used to synchronise the DMA operations and copy_grant_page. The DMA operations use the read side of the seqlock so performance should be largely unaffected. Thanks to Isaku Yamahata for noticing the race condition. Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> --- drivers/xen/core/gnttab.c | 90 ++++++++++++++++++++++++---------------------- 1 files changed, 48 insertions(+), 42 deletions(-) diff -r dd861cfb5d1a -r a395e58bd234 drivers/xen/core/gnttab.c --- a/drivers/xen/core/gnttab.c Wed Jun 06 20:28:05 2007 +0100 +++ b/drivers/xen/core/gnttab.c Thu Jun 07 11:04:08 2007 +0100 @@ -34,6 +34,7 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/seqlock.h> #include <xen/interface/xen.h> #include <xen/gnttab.h> #include <asm/pgtable.h> @@ -62,6 +63,8 @@ static struct grant_entry *shared; static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; + +static DEFINE_SEQLOCK(gnttab_dma_lock); static int gnttab_expand(unsigned int req_entries); @@ -492,11 +495,6 @@ static int gnttab_map(unsigned int start static void gnttab_page_free(struct page *page) { - if (page->mapping) { - put_page((struct page *)page->mapping); - page->mapping = NULL; - } - ClearPageForeign(page); gnttab_reset_grant_page(page); put_page(page); @@ -538,8 +536,33 @@ int gnttab_copy_grant_page(grant_ref_t r mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); + write_seqlock(&gnttab_dma_lock); + + /* Make seq visible before checking page_mapped. */ + smp_mb(); + + /* Has the page been DMA-mapped? */ + if (unlikely(page_mapped(page))) { + write_sequnlock(&gnttab_dma_lock); + put_page(new_page); + err = -EBUSY; + goto out; + } + + if (!xen_feature(XENFEAT_auto_translated_physmap)) + set_phys_to_machine(pfn, new_mfn); + + gnttab_set_replace_op(&unmap, (unsigned long)addr, + (unsigned long)new_addr, ref); + + err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, + &unmap, 1); + BUG_ON(err); + BUG_ON(unmap.status); + + write_sequnlock(&gnttab_dma_lock); + if (!xen_feature(XENFEAT_auto_translated_physmap)) { - set_phys_to_machine(pfn, new_mfn); set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; @@ -548,14 +571,6 @@ int gnttab_copy_grant_page(grant_ref_t r BUG_ON(err); } - gnttab_set_replace_op(&unmap, (unsigned long)addr, - (unsigned long)new_addr, ref); - - err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, - &unmap, 1); - BUG_ON(err); - BUG_ON(unmap.status); - new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); @@ -564,22 +579,9 @@ int gnttab_copy_grant_page(grant_ref_t r SetPageForeign(page, gnttab_page_free); page->mapping = NULL; - /* - * Ensure that there is a barrier between setting the p2m entry - * and checking the map count. See gnttab_dma_map_page. - */ - smp_mb(); - - /* Has the page been DMA-mapped? */ - if (unlikely(page_mapped(page))) { - err = -EBUSY; - page->mapping = (void *)new_page; - } - out: put_page(page); return err; - } EXPORT_SYMBOL(gnttab_copy_grant_page); @@ -593,23 +595,27 @@ EXPORT_SYMBOL(gnttab_copy_grant_page); */ maddr_t gnttab_dma_map_page(struct page *page) { - maddr_t mfn = pfn_to_mfn(page_to_pfn(page)), mfn2; + maddr_t maddr = page_to_bus(page); + unsigned int seq; if (!PageForeign(page)) - return mfn << PAGE_SHIFT; - - if (mfn_to_local_pfn(mfn) < max_mapnr) - return mfn << PAGE_SHIFT; - - atomic_set(&page->_mapcount, 0); - - /* This barrier corresponds to the one in gnttab_copy_grant_page. */ - smp_mb(); - - /* Has this page been copied in the mean time? */ - mfn2 = pfn_to_mfn(page_to_pfn(page)); - - return mfn2 << PAGE_SHIFT; + return maddr; + + do { + seq = read_seqbegin(&gnttab_dma_lock); + maddr = page_to_bus(page); + + /* Has it become a local MFN? */ + if (pfn_valid(mfn_to_local_pfn(maddr >> PAGE_SHIFT))) + break; + + atomic_set(&page->_mapcount, 0); + + /* Make _mapcount visible before read_seqretry. */ + smp_mb(); + } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); + + return maddr; } int gnttab_resume(void) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |